code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def __init__(self, name=None, eid=None):
if None not in (name, eid):
raise TypeError("Provide only a `name` or an `eid`.")
self._eid = eid or _get_enum(name)
self._comments = EnumComments(self._eid)
|
Get an existing enum.
Only provide one of `name` and `eid`.
Args:
name: Name of the enum
eid: Enum ID
|
juraj-google-style
|
def tables_insert(self, table_name, schema=None, query=None, friendly_name=None, description=None):
url = (Api._ENDPOINT + (Api._TABLES_PATH % (table_name.project_id, table_name.dataset_id, '', '')))
data = {'kind': 'bigquery
if schema:
data['schema'] = {'fields': schema}
if query:
data['view'] = {'query': query}
if friendly_name:
data['friendlyName'] = friendly_name
if description:
data['description'] = description
return datalab.utils.Http.request(url, data=data, credentials=self._credentials)
|
Issues a request to create a table or view in the specified dataset with the specified id.
A schema must be provided to create a Table, or a query must be provided to create a View.
Args:
table_name: the name of the table as a tuple of components.
schema: the schema, if this is a Table creation.
query: the query, if this is a View creation.
friendly_name: an optional friendly name.
description: an optional description.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
|
codesearchnet
|
def offTagDel(self, name, func):
if ('*' in name):
self.ontagdelglobs.rem(name, func)
return
cblist = self.ontagdels.get(name)
if (cblist is None):
return
try:
cblist.remove(func)
except ValueError:
pass
|
Unregister a callback for tag deletion.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
|
codesearchnet
|
def __init__(self, line: Optional[Text] = None):
self.line = line or self.default_separator
super(Separator, self).__init__(self.line, None, "-")
|
Create a separator in a list.
Args:
line: Text to be displayed in the list, by default uses `---`.
|
juraj-google-style
|
def netmiko_file_transfer(task: Task, source_file: str, dest_file: str, **kwargs: Any) -> Result:
net_connect = task.host.get_connection('netmiko', task.nornir.config)
kwargs.setdefault('direction', 'put')
scp_result = file_transfer(net_connect, source_file=source_file, dest_file=dest_file, **kwargs)
if (kwargs.get('disable_md5') is True):
file_valid = scp_result['file_exists']
else:
file_valid = (scp_result['file_exists'] and scp_result['file_verified'])
return Result(host=task.host, result=file_valid, changed=scp_result['file_transferred'])
|
Execute Netmiko file_transfer method
Arguments:
source_file: Source file.
dest_file: Destination file.
kwargs: Additional arguments to pass to file_transfer
Returns:
Result object with the following attributes set:
* result (``bool``): file exists and MD5 is valid
* changed (``bool``): the destination file was changed
|
codesearchnet
|
def _ExtractJQuery(self, jquery_raw):
data_part = ''
if not jquery_raw:
return {}
if '[' in jquery_raw:
_, _, first_part = jquery_raw.partition('[')
data_part, _, _ = first_part.partition(']')
elif jquery_raw.startswith('
_, _, first_part = jquery_raw.partition('{')
data_part = '{{{0:s}'.format(first_part)
elif '({' in jquery_raw:
_, _, first_part = jquery_raw.partition('(')
data_part, _, _ = first_part.rpartition(')')
if not data_part:
return {}
try:
data_dict = json.loads(data_part)
except ValueError:
return {}
return data_dict
|
Extracts values from a JQuery string.
Args:
jquery_raw (str): JQuery string.
Returns:
dict[str, str]: extracted values.
|
juraj-google-style
|
def build_tab_completion_table(alias_table):
alias_commands = [t[1] for t in filter_aliases(alias_table)]
tab_completion_table = defaultdict(list)
for alias_command in alias_commands:
for reserved_command in azext_alias.cached_reserved_commands:
if reserved_command == alias_command or reserved_command.startswith(alias_command + ' ') \
and '' not in tab_completion_table[alias_command]:
tab_completion_table[alias_command].append('')
elif ' {} '.format(alias_command) in reserved_command or reserved_command.endswith(' ' + alias_command):
index = reserved_command.index(alias_command)
parent_command = reserved_command[:index - 1]
if parent_command not in tab_completion_table[alias_command]:
tab_completion_table[alias_command].append(parent_command)
with open(GLOBAL_ALIAS_TAB_COMP_TABLE_PATH, 'w') as f:
f.write(json.dumps(tab_completion_table))
return tab_completion_table
|
Build a dictionary where the keys are all the alias commands (without positional argument placeholders)
and the values are all the parent commands of the keys. After that, write the table into a file.
The purpose of the dictionary is to validate the alias tab completion state.
For example:
{
"group": ["", "ad"],
"dns": ["network"]
}
Args:
alias_table: The alias table.
Returns:
The tab completion table.
|
juraj-google-style
|
def iter(self, max_value: int) -> Iterator[int]:
return chain.from_iterable(
(self._get_range(elem, max_value) for elem in self.sequences))
|
Iterates through the sequence numbers contained in the set, bounded
by the given maximum value (in place of any ``*``).
Args:
max_value: The maximum value of the set.
|
juraj-google-style
|
def isClientCert(self, name):
crtpath = self._getPathJoin('users', '%s.p12' % name)
return os.path.isfile(crtpath)
|
Checks if a user client certificate (PKCS12) exists.
Args:
name (str): The name of the user keypair.
Examples:
Check if the client certificate "myuser" exists:
exists = cdir.isClientCert('myuser')
Returns:
bool: True if the certificate is present, False otherwise.
|
juraj-google-style
|
def Send(self, command_id, data=b'', size=0):
if data:
if not isinstance(data, bytes):
data = data.encode('utf8')
size = len(data)
if not self._CanAddToSendBuffer(len(data)):
self._Flush()
buf = struct.pack(b'<2I', self.id_to_wire[command_id], size) + data
self.send_buffer[self.send_idx:self.send_idx + len(buf)] = buf
self.send_idx += len(buf)
|
Send/buffer FileSync packets.
Packets are buffered and only flushed when this connection is read from. All
messages have a response from the device, so this will always get flushed.
Args:
command_id: Command to send.
data: Optional data to send, must set data or size.
size: Optionally override size from len(data).
|
juraj-google-style
|
def filename(self, fname, timestep=None, suffix='', force_legacy=False):
if timestep is not None:
fname += '{:05d}'.format(timestep)
fname += suffix
if not force_legacy and self.hdf5:
fpath = self.hdf5 / fname
else:
fpath = self.par['ioin']['output_file_stem'] + '_' + fname
fpath = self.path / fpath
return fpath
|
Return name of StagYY output file.
Args:
fname (str): name stem.
timestep (int): snapshot number, set to None if this is not
relevant.
suffix (str): optional suffix of file name.
force_legacy (bool): force returning the legacy output path.
Returns:
:class:`pathlib.Path`: the path of the output file constructed
with the provided segments.
|
juraj-google-style
|
def resolves_for(self, node):
self.node = node
self.actual_styles = node.style(*self.expected_styles.keys())
return all(
toregex(value).search(self.actual_styles[style])
for style, value in iter(self.expected_styles.items()))
|
Resolves this query relative to the given node.
Args:
node (node.Base): The node to be evaluated.
Returns:
int: The number of matches found.
|
juraj-google-style
|
def DeletePendingNotification(self, timestamp):
shown_notifications = self.Get(self.Schema.SHOWN_NOTIFICATIONS)
if not shown_notifications:
shown_notifications = self.Schema.SHOWN_NOTIFICATIONS()
pending = self.Get(self.Schema.PENDING_NOTIFICATIONS)
if not pending:
return
delete_count = 0
for idx in reversed(range(0, len(pending))):
if pending[idx].timestamp == timestamp:
shown_notifications.Append(pending[idx])
pending.Pop(idx)
delete_count += 1
if delete_count > 1:
raise UniqueKeyError("Multiple notifications at %s" % timestamp)
self.Set(self.Schema.PENDING_NOTIFICATIONS, pending)
self.Set(self.Schema.SHOWN_NOTIFICATIONS, shown_notifications)
|
Deletes the pending notification with the given timestamp.
Args:
timestamp: The timestamp of the notification. Assumed to be unique.
Raises:
UniqueKeyError: Raised if multiple notifications have the timestamp.
|
juraj-google-style
|
def remove_volume(self, name, force=False):
params = {}
if force:
if utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion('force removal was introduced in API 1.25')
params = {'force': force}
url = self._url('/volumes/{0}', name, params=params)
resp = self._delete(url)
self._raise_for_status(resp)
|
Remove a volume. Similar to the ``docker volume rm`` command.
Args:
name (str): The volume's name
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove.
|
codesearchnet
|
def avg_grads(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
|
Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
|
juraj-google-style
|
def broadcast_dynamic_shape(shape_x, shape_y):
if not isinstance(shape_x, RaggedTensorDynamicShape):
raise TypeError('shape_x must be a RaggedTensorDynamicShape')
if not isinstance(shape_y, RaggedTensorDynamicShape):
raise TypeError('shape_y must be a RaggedTensorDynamicShape')
if shape_x.rank is None or shape_y.rank is None:
raise ValueError('Unable to broadcast: unknown rank')
broadcast_rank = max(shape_x.rank, shape_y.rank)
shape_x = shape_x.broadcast_to_rank(broadcast_rank)
shape_y = shape_y.broadcast_to_rank(broadcast_rank)
for axis in range(broadcast_rank):
shape_x = shape_x.broadcast_dimension(axis, shape_y.dimension_size(axis))
shape_y = shape_y.broadcast_dimension(axis, shape_x.dimension_size(axis))
return shape_x
|
Returns the shape formed by broadcasting two shapes to be compatible.
Args:
shape_x: A `RaggedTensorDynamicShape`
shape_y: A `RaggedTensorDynamicShape`
Returns:
A `RaggedTensorDynamicShape`.
Raises:
ValueError: If `shape_x` and `shape_y` are not broadcast-compatible.
|
github-repos
|
def remove_chain(self, name):
if name in self.chains:
delattr(self.chains, name)
else:
raise ValueError("Chain with this name not found")
|
Remove chain from current shelve file
Args:
name: chain name
|
juraj-google-style
|
def add_completions(
replace_list: list, belstr: str, replace_span: Span, completion_text: str
) -> List[Mapping[str, Any]]:
completions = []
for r in replace_list:
if len(belstr) > 0:
belstr_end = len(belstr) - 1
else:
belstr_end = 0
log.debug(
f'Replace list {r} Replace_span {replace_span} BELstr: {belstr} Len: {belstr_end} Test1 {r["type"] == "Function"} Test2 {replace_span[1] + 1 == len(belstr)}'
)
if (
r["type"] == "Function"
and replace_span[0] > 0
and belstr[replace_span[0] - 1] == ","
):
log.debug("prior char is a comma")
replacement = (
belstr[0 : replace_span[0]]
+ " "
+ f"{r['replacement']}()"
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(
belstr[0 : replace_span[0]] + " " + f"{r['replacement']}()"
)
elif replace_span[0] > 0 and belstr[replace_span[0] - 1] == ",":
log.debug("prior char is a comma")
replacement = (
belstr[0 : replace_span[0]]
+ " "
+ r["replacement"]
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(belstr[0 : replace_span[0]] + " " + r["replacement"])
elif r["type"] == "Function" and replace_span[1] >= belstr_end:
replacement = belstr[0 : replace_span[0]] + f"{r['replacement']}()"
cursor_loc = len(replacement) - 1
log.debug(f"Replacement: {replacement}")
else:
replacement = (
belstr[0 : replace_span[0]]
+ r["replacement"]
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(
belstr[0 : replace_span[0]] + r["replacement"]
)
completions.append(
{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": r["highlight"],
"label": r["label"],
}
)
return completions
|
Create completions to return given replacement list
Args:
replace_list: list of completion replacement values
belstr: BEL String
replace_span: start, stop of belstr to replace
completion_text: text to use for completion - used for creating highlight
Returns:
[{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": highlight,
"label": label,
}]
|
juraj-google-style
|
def to_b58check(self, testnet=False):
version = (self.TESTNET_VERSION if testnet else self.MAINNET_VERSION)
return base58.b58encode_check((bytes([version]) + bytes(self)))
|
Generates a Base58Check encoding of this private key.
Returns:
str: A Base58Check encoded string representing the key.
|
codesearchnet
|
def get_package_hashes(filename):
log.debug('Getting package hashes')
filename = os.path.abspath(filename)
with open(filename, 'rb') as f:
data = f.read()
_hash = hashlib.sha256(data).hexdigest()
log.debug('Hash for file %s: %s', filename, _hash)
return _hash
|
Provides hash of given filename.
Args:
filename (str): Name of file to hash
Returns:
(str): sha256 hash
|
juraj-google-style
|
def core_name(self):
buf_size = self.MAX_BUF_SIZE
buf = (ctypes.c_char * buf_size)()
self._dll.JLINKARM_Core2CoreName(self.core_cpu(), buf, buf_size)
return ctypes.string_at(buf).decode()
|
Returns the name of the target ARM core.
Args:
self (JLink): the ``JLink`` instance
Returns:
The target core's name.
|
juraj-google-style
|
def bind_sockets(address, port):
ss = netutil.bind_sockets(port=(port or 0), address=address)
assert len(ss)
ports = {s.getsockname()[1] for s in ss}
assert (len(ports) == 1), 'Multiple ports assigned??'
actual_port = ports.pop()
if port:
assert (actual_port == port)
return (ss, actual_port)
|
Bind a socket to a port on an address.
Args:
address (str) :
An address to bind a port on, e.g. ``"localhost"``
port (int) :
A port number to bind.
Pass 0 to have the OS automatically choose a free port.
This function returns a 2-tuple with the new socket as the first element,
and the port that was bound as the second. (Useful when passing 0 as a port
number to bind any free port.)
Returns:
(socket, port)
|
codesearchnet
|
def add_key_value(self, key, value):
key = self._metadata_map.get(key, key)
if (key in ['dateAdded', 'eventDate', 'firstSeen', 'publishDate']):
self._group_data[key] = self._utils.format_datetime(value, date_format='%Y-%m-%dT%H:%M:%SZ')
elif (key == 'file_content'):
pass
else:
self._group_data[key] = value
|
Add custom field to Group object.
.. note:: The key must be the exact name required by the batch schema.
Example::
document = tcex.batch.group('Document', 'My Document')
document.add_key_value('fileName', 'something.pdf')
Args:
key (str): The field key to add to the JSON batch data.
value (str): The field value to add to the JSON batch data.
|
codesearchnet
|
def copy_docstring(source_class):
def decorator(method):
'Decorator implementation.\n\n Args:\n method (Callable): The method to copy the docstring to.\n\n Returns:\n Callable: the same method passed in with an updated docstring.\n\n Raises:\n ValueError: if the method already has a docstring.\n '
if method.__doc__:
raise ValueError('Method already has a docstring.')
source_method = getattr(source_class, method.__name__)
method.__doc__ = source_method.__doc__
return method
return decorator
|
Decorator that copies a method's docstring from another class.
Args:
source_class (type): The class that has the documented method.
Returns:
Callable: A decorator that will copy the docstring of the same
named method in the source class to the decorated method.
|
codesearchnet
|
def permut2expr(self, P):
if (len(P) > (1 << self.nbits)):
raise ValueError(('P must not contain more than %d elements' % (1 << self.nbits)))
X = self.var('X')
ret = super(MBA, self).permut2expr(P, X.vec)
return (self.from_vec(ret), X)
|
Convert a substitution table into an arybo application
Args:
P: list of integers. The list must not contain more than 2**nbits elements.
Returns:
A tuple containing an :class:`MBAVariable` object with the result
and the symbolic input variable used in this object. A typical use
case is to feed these into vectorial_decomp.
Example:
>>> mba = MBA(4)
>>> P = [i^7 for i in range(16)]
>>> E,X = mba.permut2expr(P)
>>> E.vectorial_decomp([X])
App NL = Vec([
0,
0,
0,
0
])
AffApp matrix = Mat([
[1, 0, 0, 0]
[0, 1, 0, 0]
[0, 0, 1, 0]
[0, 0, 0, 1]
])
AffApp cst = Vec([
1,
1,
1,
0
])
|
codesearchnet
|
def object_table(self, object_id=None):
self._check_connected()
if object_id is not None:
return self._object_table(object_id)
else:
object_keys = self._keys(ray.gcs_utils.TablePrefix_OBJECT_string +
"*")
object_ids_binary = {
key[len(ray.gcs_utils.TablePrefix_OBJECT_string):]
for key in object_keys
}
results = {}
for object_id_binary in object_ids_binary:
results[binary_to_object_id(object_id_binary)] = (
self._object_table(binary_to_object_id(object_id_binary)))
return results
|
Fetch and parse the object table info for one or more object IDs.
Args:
object_id: An object ID to fetch information about. If this is
None, then the entire object table is fetched.
Returns:
Information from the object table.
|
juraj-google-style
|
def depth_october_average_ground_temperature(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_october_average_ground_temperature`'.format(value))
self._depth_october_average_ground_temperature = value
|
Corresponds to IDD Field `depth_october_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_october_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def addFileHandler(self,filename='', dr='',lvl=1):
fname = self.name
if filename != '':
fname = filename
if '.' not in fname:
fname+='.log'
fh = logging.FileHandler(os.path.join(dr,fname))
fh.setLevel(lvl)
frmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
fFrmt = logging.Formatter(frmtString)
fh.setFormatter(fFrmt)
self.addHandler(fh)
|
This function will add a file handler to a log with the provided level.
Args:
lvl (int): The severity level of messages printed to the file with
the file handler, default = 1.
|
juraj-google-style
|
def model_spec(ops, matrix):
return api.ModelSpec(matrix=np.array(matrix), ops=[INPUT] + ops + [OUTPUT])
|
NASBench model spec that is parameterized by ops and their connections.
Args:
ops: a list of allowed ops except the INPUT and OUTPUT layer.
matrix: the adjacency matrix for the connectivity of each layers, which
should be an upper triangle matrix.
Returns:
A NASBench spec.
|
github-repos
|
def normalize(self, image, mean, std, rescale=False):
self._ensure_format_supported(image)
if isinstance(image, PIL.Image.Image):
image = self.to_numpy_array(image, rescale=True)
elif rescale:
if isinstance(image, np.ndarray):
image = self.rescale(image.astype(np.float32), 1 / 255.0)
elif is_torch_tensor(image):
image = self.rescale(image.float(), 1 / 255.0)
if isinstance(image, np.ndarray):
if not isinstance(mean, np.ndarray):
mean = np.array(mean).astype(image.dtype)
if not isinstance(std, np.ndarray):
std = np.array(std).astype(image.dtype)
elif is_torch_tensor(image):
import torch
if not isinstance(mean, torch.Tensor):
if isinstance(mean, np.ndarray):
mean = torch.from_numpy(mean)
else:
mean = torch.tensor(mean)
if not isinstance(std, torch.Tensor):
if isinstance(std, np.ndarray):
std = torch.from_numpy(std)
else:
std = torch.tensor(std)
if image.ndim == 3 and image.shape[0] in [1, 3]:
return (image - mean[:, None, None]) / std[:, None, None]
else:
return (image - mean) / std
|
Normalizes `image` with `mean` and `std`. Note that this will trigger a conversion of `image` to a NumPy array
if it's a PIL Image.
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to normalize.
mean (`List[float]` or `np.ndarray` or `torch.Tensor`):
The mean (per channel) to use for normalization.
std (`List[float]` or `np.ndarray` or `torch.Tensor`):
The standard deviation (per channel) to use for normalization.
rescale (`bool`, *optional*, defaults to `False`):
Whether or not to rescale the image to be between 0 and 1. If a PIL image is provided, scaling will
happen automatically.
|
github-repos
|
def cancelPnL(self, account, modelCode: str = ''):
key = (account, modelCode)
reqId = self.wrapper.pnlKey2ReqId.pop(key, None)
if reqId:
self.client.cancelPnL(reqId)
self.wrapper.pnls.pop(reqId, None)
else:
self._logger.error(
'cancelPnL: No subscription for '
f'account {account}, modelCode {modelCode}')
|
Cancel PnL subscription.
Args:
account: Cancel for this account.
modelCode: If specified, cancel for this account model.
|
juraj-google-style
|
def ParseOptions(cls, options, output_module):
if not isinstance(output_module, timesketch_out.TimesketchOutputModule):
raise errors.BadConfigObject(
'Output module is not an instance of TimesketchOutputModule')
document_type = cls._ParseStringOption(
options, 'document_type', default_value=cls._DEFAULT_DOCUMENT_TYPE)
output_module.SetDocumentType(document_type)
flush_interval = cls._ParseNumericOption(
options, 'flush_interval', default_value=cls._DEFAULT_FLUSH_INTERVAL)
output_module.SetFlushInterval(flush_interval)
index = cls._ParseStringOption(
options, 'index', default_value=cls._DEFAULT_UUID)
output_module.SetIndexName(index)
name = cls._ParseStringOption(
options, 'timeline_name', default_value=cls._DEFAULT_NAME)
output_module.SetTimelineName(name)
username = cls._ParseStringOption(
options, 'username', default_value=cls._DEFAULT_USERNAME)
output_module.SetTimelineOwner(username)
|
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (TimesketchOutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
|
juraj-google-style
|
def load_wav_file(filename):
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])
wav_loader = io_ops.read_file(wav_filename_placeholder)
wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1)
return sess.run(wav_decoder, feed_dict={wav_filename_placeholder: filename}).audio.flatten()
|
Loads an audio file and returns a float PCM-encoded array of samples.
Args:
filename: Path to the .wav file to load.
Returns:
Numpy array holding the sample data as floats between -1.0 and 1.0.
|
github-repos
|
def loss(logits, labels, batch_size=None):
if (not batch_size):
batch_size = FLAGS.batch_size
sparse_labels = tf.reshape(labels, [batch_size, 1])
indices = tf.reshape(tf.range(batch_size), [batch_size, 1])
concated = tf.concat(axis=1, values=[indices, sparse_labels])
num_classes = logits[0].get_shape()[(- 1)].value
dense_labels = tf.sparse_to_dense(concated, [batch_size, num_classes], 1.0, 0.0)
slim.losses.cross_entropy_loss(logits[0], dense_labels, label_smoothing=0.1, weight=1.0)
slim.losses.cross_entropy_loss(logits[1], dense_labels, label_smoothing=0.1, weight=0.4, scope='aux_loss')
|
Adds all losses for the model.
Note the final loss is not returned. Instead, the list of losses are collected
by slim.losses. The losses are accumulated in tower_loss() and summed to
calculate the total loss.
Args:
logits: List of logits from inference(). Each entry is a 2-D float Tensor.
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
batch_size: integer
|
codesearchnet
|
def get_query_info(sql, con, partition_column):
engine = create_engine(con)
if is_table(engine, sql):
table_metadata = get_table_metadata(engine, sql)
query = build_query_from_table(sql)
cols = get_table_columns(table_metadata)
else:
check_query(sql)
query = sql.replace(";", "")
cols = get_query_columns(engine, query)
cols_names = list(cols.keys())
return cols_names, query
|
Return a columns name list and the query string
Args:
sql: SQL query or table name
con: database connection or url string
partition_column: column used to share the data between the workers
Returns:
Columns name list and query string
|
juraj-google-style
|
def every_match(self, callback, **kwargs):
if (len(kwargs) == 0):
raise ArgumentError('You must specify at least one message field to wait on')
spec = MessageSpec(**kwargs)
responder = self._add_waiter(spec, callback)
return (spec, responder)
|
Invoke callback every time a matching message is received.
The callback will be invoked directly inside process_message so that
you can guarantee that it has been called by the time process_message
has returned.
The callback can be removed by a call to remove_waiter(), passing the
handle object returned by this call to identify it.
Args:
callback (callable): A callable function that will be called as
callback(message) whenever a matching message is received.
Returns:
object: An opaque handle that can be passed to remove_waiter().
This handle is the only way to remove this callback if you no
longer want it to be called.
|
codesearchnet
|
def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):
residual = hidden_states
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, position_embeddings=position_embeddings, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
second_residual = hidden_states
cross_attn_weights = None
hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = second_residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
|
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(seq_len, batch, embed_dim)`.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings that are added to the queries and keys in the self-attention layer.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes (`torch.LongTensor`, *optional*):
Spatial shapes.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
|
github-repos
|
def attach(self, engine, start=Events.STARTED, pause=Events.COMPLETED, resume=None, step=None):
engine.add_event_handler(start, self.reset)
engine.add_event_handler(pause, self.pause)
if (resume is not None):
engine.add_event_handler(resume, self.resume)
if (step is not None):
engine.add_event_handler(step, self.step)
return self
|
Register callbacks to control the timer.
Args:
engine (Engine):
Engine that this timer will be attached to.
start (Events):
Event which should start (reset) the timer.
pause (Events):
Event which should pause the timer.
resume (Events, optional):
Event which should resume the timer.
step (Events, optional):
Event which should call the `step` method of the counter.
Returns:
self (Timer)
|
codesearchnet
|
def parse_iso8601_str(string):
datetime_obj = datetime.datetime.strptime(string, '%Y-%m-%dT%H:%M:%SZ')
return int(calendar.timegm(datetime_obj.utctimetuple()))
|
Parse a fixed ISO8601 datetime string.
.. Note:: This function only parses dates in the format
``%Y-%m-%dT%H:%M:%SZ``. You must use a library like ``dateutils``
to properly parse dates and times.
Returns:
float: A UNIX timestamp.
|
codesearchnet
|
def _any_overlap_or_contiguous(self, test_overlap: bool) -> bool:
for i in range(len(self.intervals)):
for j in range((i + 1), len(self.intervals)):
first = self.intervals[i]
second = self.intervals[j]
if test_overlap:
test = first.overlaps(second)
else:
test = first.contiguous(second)
if test:
return True
return False
|
Do any of the intervals overlap?
Args:
test_overlap: if ``True``, test for overlapping intervals; if
``False``, test for contiguous intervals.
|
codesearchnet
|
def _bigbird_block_rand_mask(self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1):
if from_seq_length
raise ValueError('Error the number of blocks needs to be same!')
rand_attn = np.zeros((from_seq_length
if not self.training:
return rand_attn
middle_seq = np.arange(1, to_seq_length
last = to_seq_length
if last_idx > 2 * to_block_size:
last = last_idx
r = num_rand_blocks
for i in range(1, from_seq_length
start = i - 2
end = i
if i == 1:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r]
elif i == 2:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r]
elif i == from_seq_length
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
elif i == from_seq_length
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
elif start > last:
start = last
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
elif end + 1 == last:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
else:
rand_attn[i - 1, :] = np.random.permutation(np.concatenate((middle_seq[:start], middle_seq[end + 1:last])))[:r]
return rand_attn
|
Create adjacency list of random attention.
Args:
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
num_rand_blocks: int. Number of random chunks per row.
last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence,
if positive then num_rand_blocks blocks chosen only up to last_idx.
Returns:
adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks
|
github-repos
|
def _verify_docker_image_size(self, image_name):
shell_call(['docker', 'pull', image_name])
try:
image_size = subprocess.check_output(
['docker', 'inspect', '--format={{.Size}}', image_name]).strip()
image_size = int(image_size)
except (ValueError, subprocess.CalledProcessError) as e:
logging.error('Failed to determine docker image size: %s', e)
return False
logging.info('Size of docker image %s is %d', image_name, image_size)
if image_size > MAX_DOCKER_IMAGE_SIZE:
logging.error('Image size exceeds limit %d', MAX_DOCKER_IMAGE_SIZE)
return image_size <= MAX_DOCKER_IMAGE_SIZE
|
Verifies size of Docker image.
Args:
image_name: name of the Docker image.
Returns:
True if image size is within the limits, False otherwise.
|
juraj-google-style
|
def make_layer_stack(layers=gin.REQUIRED, num_layers=6):
return LayerStack([cls() for cls in layers] * num_layers)
|
Configurable layer stack.
Args:
layers: a list of subclasses of TransformerLayer
num_layers: an integer
Returns:
a LayerStack
|
juraj-google-style
|
def GetPresetsInformation(cls):
parser_presets_information = []
for preset_definition in ParsersManager.GetPresets():
preset_information_tuple = (preset_definition.name, ', '.join(preset_definition.parsers))
parser_presets_information.append(preset_information_tuple)
return parser_presets_information
|
Retrieves the presets information.
Returns:
list[tuple]: containing:
str: preset name
str: comma separated parser names that are defined by the preset
|
codesearchnet
|
class UnivNetModelOutput(ModelOutput):
waveforms: Optional[torch.FloatTensor] = None
waveform_lengths: Optional[torch.FloatTensor] = None
|
Output class for the [`UnivNetModel`], which includes the generated audio waveforms and the original unpadded
lengths of those waveforms (so that the padding can be removed by [`UnivNetModel.batch_decode`]).
Args:
waveforms (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Batched 1D (mono-channel) output audio waveforms.
waveform_lengths (`torch.FloatTensor` of shape `(batch_size,)`):
The batched length in samples of each unpadded waveform in `waveforms`.
|
github-repos
|
def EnsureGdbPosition(self, pid, tid, frame_depth):
position = [pid, tid, frame_depth]
if not pid:
return
if not self.IsAttached():
try:
self.Attach(position)
except gdb.error as exc:
raise PositionUnavailableException(exc.message)
if gdb.selected_inferior().pid != pid:
self.Detach()
try:
self.Attach(position)
except gdb.error as exc:
raise PositionUnavailableException(exc.message)
if tid:
tstate_head = GdbCache.INTERP_HEAD['tstate_head']
for tstate in self._IterateChainedList(tstate_head, 'next'):
if tid == tstate['thread_id']:
self.selected_tstate = tstate
break
else:
raise PositionUnavailableException('Thread %s does not exist.' %
str(tid))
stack_head = self.selected_tstate['frame']
if frame_depth is not None:
frames = list(self._IterateChainedList(stack_head, 'f_back'))
frames.reverse()
try:
self.selected_frame = frames[frame_depth]
except IndexError:
raise PositionUnavailableException('Stack is not %s frames deep' %
str(frame_depth + 1))
|
Make sure our position matches the request.
Args:
pid: The process ID of the target process
tid: The python thread ident of the target thread
frame_depth: The 'depth' of the requested frame in the frame stack
Raises:
PositionUnavailableException: If the requested process, thread or frame
can't be found or accessed.
|
juraj-google-style
|
def _kernel(kernel_spec):
if isinstance(kernel_spec, tf.compat.integral_types):
return [kernel_spec, kernel_spec]
elif (len(kernel_spec) == 1):
return [kernel_spec[0], kernel_spec[0]]
else:
assert (len(kernel_spec) == 2)
return kernel_spec
|
Expands the kernel spec into a length 2 list.
Args:
kernel_spec: An integer or a length 1 or 2 sequence that is expanded to a
list.
Returns:
A length 2 list.
|
codesearchnet
|
def connect_to(self, vertex, weight=1):
for edge in self.edges_out:
if vertex == edge.vertex_in:
return edge
return Edge(self, vertex, weight)
|
Connect this vertex to another one.
Args:
vertex (Vertex): vertex to connect to.
weight (int): weight of the edge.
Returns:
Edge: the newly created edge.
|
juraj-google-style
|
def from_storage(source, source_format='csv', csv_options=None, ignore_unknown_values=False, max_bad_records=0, compressed=False, schema=None):
result = FederatedTable()
if (source_format == 'csv'):
result._bq_source_format = 'CSV'
if (csv_options is None):
csv_options = _csv_options.CSVOptions()
elif (source_format == 'json'):
if csv_options:
raise Exception('CSV options are not support for JSON tables')
result._bq_source_format = 'NEWLINE_DELIMITED_JSON'
else:
raise Exception(('Invalid source format %s' % source_format))
result._source = (source if isinstance(source, list) else [source])
result._source_format = source_format
result._csv_options = csv_options
result._ignore_unknown_values = ignore_unknown_values
result._max_bad_records = max_bad_records
result._compressed = compressed
result._schema = schema
return result
|
Create an external table for a GCS object.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: For CSV files, the options such as quote character and delimiter.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: The maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
compressed: whether the data is GZ compressed or not (default False). Note that compressed
data can be used as a federated table but cannot be loaded into a BQ Table.
schema: the schema of the data. This is required for this table to be used as a federated
table or to be loaded using a Table object that itself has no schema (default None).
|
codesearchnet
|
def padded_urlsafe_b64decode(value):
b64string = to_bytes(value)
padded = b64string + b'=' * (-len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
|
Decodes base64 strings lacking padding characters.
Google infrastructure tends to omit the base64 padding characters.
Args:
value (Union[str, bytes]): The encoded value.
Returns:
bytes: The decoded value
|
juraj-google-style
|
def _write_cache(step, event_file_suffix=None, **kwargs):
file_suffix = _TT_EVENT_FILE_SUFFIX
if event_file_suffix is not None:
file_suffix = string_ops.string_join([file_suffix, event_file_suffix], separator='.')
summary_write_ops = []
summary_writer = summary.create_file_writer_v2(self._parameters.trace_dir, filename_suffix=file_suffix, max_queue=_TT_SUMMARY_MAX_QUEUE)
graph.add_to_collection(TENSOR_TRACER_SUMMARY_COLLECTION, summary_writer)
step_value = step[0]
dt = step_value.dtype
if dt.__ne__(dtypes.int64) and dt.__ne__(dtypes.uint64) and dt.__ne__(dtypes.float64):
step_value = math_ops.cast(step_value, dtypes.int64)
with summary_writer.as_default():
summary_metadata = summary_pb2.SummaryMetadata(plugin_data=summary_pb2.SummaryMetadata.PluginData(plugin_name=_TT_TENSORBOARD_PLUGIN_NAME))
for key, value in kwargs.items():
if not self._parameters.collect_summary_per_core:
if key == _TT_SUMMARY_TAG and value.shape.as_list()[0] != 1:
value = self.aggregate_global_cache(value)
with ops.control_dependencies([summary_writer.init()]):
summary_write_ops.append(summary.write(_TT_SUMMARY_TAG + '/' + key + '.' + graph_summary_tag, value, metadata=summary_metadata, step=step_value))
return control_flow_ops.group(summary_write_ops)
|
Writes the given caches as tensor summary.
Args:
step: Step tensor with dimension [num_cores].
event_file_suffix: Event filename suffix tensor.
**kwargs: The dictionary of tensors that needs to be written as
summaries. Key and value pairs within kwargs correspond to the tag
name, and tensor content that will be written using summary.write.
The trace_modes that use this function are:
- summary: In summary mode, kwargs includes a single (tag, content)
pair which are, _TT_SUMMARY_TAG and a tf.float32 signature_cache
variable. The dimension of the signature_cache is:
num_cores x num_traced_tensors x num_signatures.
- full_tensor_summary: kwargs will include all traced tensors. Tag
and content correspond to the name of the tensor, and its actual
content.
Returns:
A tf.Operation that needs to be executed for the host call dependencies.
|
github-repos
|
def consult_filters(self, url_info: URLInfo, url_record: URLRecord, is_redirect: bool=False) -> Tuple[(bool, str, dict)]:
if (not self._url_filter):
return (True, 'nofilters', None)
test_info = self._url_filter.test_info(url_info, url_record)
verdict = test_info['verdict']
if verdict:
reason = 'filters'
elif (is_redirect and self.is_only_span_hosts_failed(test_info)):
verdict = True
reason = 'redirect'
else:
reason = 'filters'
return (verdict, reason, test_info)
|
Consult the URL filter.
Args:
url_record: The URL record.
is_redirect: Whether the request is a redirect and it is
desired that it spans hosts.
Returns
tuple:
1. bool: The verdict
2. str: A short reason string: nofilters, filters, redirect
3. dict: The result from :func:`DemuxURLFilter.test_info`
|
codesearchnet
|
def path_to_text(self, path):
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos = set()
pages_data = PDFPage.get_pages(
fp,
pagenos,
maxpages=maxpages,
password=password,
caching=caching,
check_extractable=True
)
for page in pages_data:
interpreter.process_page(page)
text = retstr.getvalue()
text = text.replace("\n", "")
fp.close()
device.close()
retstr.close()
return text
|
Transform local PDF file to string.
Args:
path: path to PDF file.
Returns:
string.
|
juraj-google-style
|
def paragraphs(self, index = None):
if index is None:
return self.select(Paragraph,None,True,default_ignore_structure)
else:
if index < 0:
index = self.count(Paragraph,None,True,default_ignore_structure) + index
for i,e in enumerate(self.select(Paragraph,None,True,default_ignore_structure)):
if i == index:
return e
raise IndexError
|
Returns a generator of Paragraph elements found (recursively) under this element.
Arguments:
index (int or None): If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning the generator of all
|
juraj-google-style
|
def _callEventWaitAndGet(self, callback_id, event_name, timeout):
timeout_ms = int(timeout * 1000)
return self._event_client.eventWaitAndGet(callback_id, event_name, timeout_ms)
|
Calls snippet lib's eventWaitAndGet.
Override this method to use this class with various snippet lib
implementations.
Args:
callback_id: The callback identifier.
event_name: The callback name.
timeout: The number of seconds to wait for the event.
Returns:
The event dictionary.
|
github-repos
|
def format(self, record):
if record.levelno >= logging.ERROR:
color = colorama.Fore.RED
elif record.levelno >= logging.WARNING:
color = colorama.Fore.YELLOW
elif record.levelno >= logging.INFO:
color = colorama.Fore.RESET
else:
color = colorama.Fore.CYAN
format_template = (
'{}{}%(levelname)s{} [%(asctime)s][%(name)s]{} %(message)s')
if sys.stdout.isatty():
self._fmt = format_template.format(
colorama.Style.BRIGHT,
color,
colorama.Fore.RESET,
colorama.Style.RESET_ALL
)
else:
self._fmt = format_template.format(*[''] * 4)
if six.PY3:
self._style._fmt = self._fmt
return super(_LogColorFormatter, self).format(record)
|
Format the log record with timestamps and level based colors.
Args:
record: The log record to format.
Returns:
The formatted log record.
|
juraj-google-style
|
def ParseFileObject(self, parser_mediator, file_object):
data = file_object.read(self._HEADER_READ_SIZE)
if (not data.startswith(b'<?xml')):
raise errors.UnableToParseFile('Not an Android usage history file [not XML]')
(_, _, data) = data.partition(b'\n')
if (not data.startswith(b'<usage-history')):
raise errors.UnableToParseFile('Not an Android usage history file [wrong XML root key]')
file_object.seek(0, os.SEEK_SET)
xml = ElementTree.parse(file_object)
root_node = xml.getroot()
for application_node in root_node:
package_name = application_node.get('name', None)
for part_node in application_node.iter():
if (part_node.tag != 'comp'):
continue
last_resume_time = part_node.get('lrt', None)
if (last_resume_time is None):
parser_mediator.ProduceExtractionWarning('missing last resume time.')
continue
try:
last_resume_time = int(last_resume_time, 10)
except ValueError:
parser_mediator.ProduceExtractionWarning('unsupported last resume time: {0:s}.'.format(last_resume_time))
continue
event_data = AndroidAppUsageEventData()
event_data.component = part_node.get('name', None)
event_data.package = package_name
date_time = dfdatetime_java_time.JavaTime(timestamp=last_resume_time)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_RESUME)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses an Android usage-history file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
codesearchnet
|
def get_qubit_los(self, user_lo_config):
try:
_q_los = self.default_qubit_los.copy()
except KeyError:
raise PulseError('Qubit default frequencies not exist.')
for (channel, lo_freq) in user_lo_config.qubit_lo_dict().items():
_q_los[channel.index] = lo_freq
if (_q_los == self.default_qubit_los):
return None
return _q_los
|
Embed default qubit LO frequencies from backend and format them to list object.
If configured lo frequency is the same as default, this method returns `None`.
Args:
user_lo_config (LoConfig): A dictionary of LOs to format.
Returns:
list: A list of qubit LOs.
Raises:
PulseError: when LO frequencies are missing.
|
codesearchnet
|
def set_category(self, category):
pcategory = self.find("general/category")
pcategory.clear()
name = ElementTree.SubElement(pcategory, "name")
if isinstance(category, Category):
id_ = ElementTree.SubElement(pcategory, "id")
id_.text = category.id
name.text = category.name
elif isinstance(category, basestring):
name.text = category
|
Set the policy's category.
Args:
category: A category object.
|
juraj-google-style
|
def roll_to_business_day(self, date_tensor, roll_convention):
pass
|
Rolls the given dates to business dates according to given convention.
Args:
date_tensor: DateTensor of dates to roll from.
roll_convention: BusinessDayConvention. Determines how to roll a date that
falls on a holiday.
Returns:
The resulting DateTensor.
|
github-repos
|
def incomplete_size(self, name=None):
if name is None:
name = '%s_BarrierIncompleteSize' % self._name
return gen_data_flow_ops.barrier_incomplete_size(self._barrier_ref, name=name)
|
Compute the number of incomplete elements in the given barrier.
Args:
name: A name for the operation (optional).
Returns:
A single-element tensor containing the number of incomplete elements in
the given barrier.
|
github-repos
|
def least_squares_effective_mass( cartesian_k_points, eigenvalues ):
if not points_are_in_a_straight_line( cartesian_k_points ):
raise ValueError( 'k-points are not collinear' )
dk = cartesian_k_points - cartesian_k_points[0]
mod_dk = np.linalg.norm( dk, axis = 1 )
delta_e = eigenvalues - eigenvalues[0]
effective_mass = 1.0 / ( np.polyfit( mod_dk, eigenvalues, 2 )[0] * ev_to_hartree * 2.0 )
return effective_mass
|
Calculate the effective mass using a least squares quadratic fit.
Args:
cartesian_k_points (np.array): Cartesian reciprocal coordinates for the k-points
eigenvalues (np.array): Energy eigenvalues at each k-point to be used in the fit.
Returns:
(float): The fitted effective mass
Notes:
If the k-points do not sit on a straight line a ValueError will be raised.
|
juraj-google-style
|
def buckingham_input(self, structure, keywords, library=None,
uc=True, valence_dict=None):
gin = self.keyword_line(*keywords)
gin += self.structure_lines(structure, symm_flg=not uc)
if not library:
gin += self.buckingham_potential(structure, valence_dict)
else:
gin += self.library_line(library)
return gin
|
Gets a GULP input for an oxide structure and buckingham potential
from library.
Args:
structure: pymatgen.core.structure.Structure
keywords: GULP first line keywords.
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
valence_dict: {El: valence}
|
juraj-google-style
|
def __init__(self, baseplate, token, actor_urn, *args, **kwargs):
self.baseplate = baseplate
self.rest_baseurl = 'https:
self.token = token
self.headers = {"Authorization": "Bot {}".format(token),
"User-Agent": "Legobot",
"Content-Type": "application/json"}
self.actor_urn = actor_urn
self.ws = None
threading.Thread.__init__(self)
|
Initialize DiscoBot
Args:
baseplate (Legobot.Lego): The parent Pykka actor.
Typically passed in from Legobot.Connectors.Discord.Discord
token (string): Discord bot token
actor_urn (string): URN of Pykka actor launching DiscoBot
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
|
juraj-google-style
|
def get_threads(self, page=1):
url = self._url.page_url(page)
return self._request_threads(url)
|
Returns all threads on a certain page.
Gets a list of Thread objects for every thread on the given page. If a thread is
already in our cache, the cached version is returned and thread.want_update is
set to True on the specific thread object.
Pages on 4chan are indexed from 1 onwards.
Args:
page (int): Page to request threads for. Defaults to the first page.
Returns:
list of :mod:`basc_py4chan.Thread`: List of Thread objects representing the threads on the given page.
|
codesearchnet
|
def weights_multi_problem(labels, taskid=-1):
taskid = check_nonnegative(taskid)
past_taskid = tf.cumsum(to_float(tf.equal(labels, taskid)), axis=1)
past_taskid *= to_float(tf.not_equal(labels, taskid))
non_taskid = to_float(labels)
return to_float(tf.not_equal(past_taskid * non_taskid, 0))
|
Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all labels past the taskid.
Args:
labels: A Tensor of int32s.
taskid: an int32 representing the task id for a problem.
Returns:
A Tensor of floats.
Raises:
ValueError: The Task ID must be valid.
|
juraj-google-style
|
def _patch_expand_path(self, settings, name, value):
if os.path.isabs(value):
return os.path.normpath(value)
value = os.path.expanduser(value)
if ((not os.path.isabs(value)) and self.projectdir):
value = os.path.join(self.projectdir, value)
return os.path.normpath(value)
|
Patch a path to expand home directory and make absolute path.
Args:
settings (dict): Current settings.
name (str): Setting name.
value (str): Path to patch.
Returns:
str: Patched path to an absolute path.
|
codesearchnet
|
def from_yang(self, text: str) -> ScalarValue:
res = self.parse_value(text)
if (res is None):
raise InvalidArgument(text)
return res
|
Parse value specified in a YANG module.
Args:
text: String representation of the value.
Raises:
InvalidArgument: If the receiver type cannot parse the text.
|
codesearchnet
|
def print_solution(model, solver):
model_proto = model.Proto()
response_proto = solver.ResponseProto()
variables_in_objective_map = {}
maximization = False
if model_proto.HasField('objective'):
objective = model_proto.objective
for i in range(len(objective.vars)):
variables_in_objective_map[objective.vars[i]] = objective.coeffs[i]
if (objective.scaling_factor < 0.0):
maximization = True
variable_assignments = []
variables_in_objective = []
num_vars = len(model_proto.variables)
for var_index in range(num_vars):
if (not model_proto.variables[var_index].name):
continue
variable_name = model_proto.variables[var_index].name
if (var_index in variables_in_objective_map):
coefficient = variables_in_objective_map[var_index]
if coefficient:
if maximization:
coefficient *= (- 1)
if (coefficient < 0):
variables_in_objective.append(' - {} * {}'.format((- coefficient), variable_name))
elif (coefficient > 0):
variables_in_objective.append(' + {} * {}'.format(coefficient, variable_name))
variable_assignments.append(' {} = {}\n'.format(variable_name, response_proto.solution[var_index]))
print(''.join(variable_assignments), end='')
if (variables_in_objective and (variables_in_objective[0][1] == '+')):
variables_in_objective[0] = variables_in_objective[0][2:]
print('{}:{}'.format(('Maximize' if maximization else 'Minimize'), ''.join(variables_in_objective)))
print('Objective value: {}\n'.format(solver.ObjectiveValue()))
|
Prints the solution associated with solver.
If solver has already had Solve() called on it, prints the solution. This
includes each variable and its assignment, along with the objective function
and its optimal value.
If solver has not had Solve() called on it, or there is no feasible solution,
this will probably crash.
Args:
model: A pywrapcp.CpModel object.
solver: A pywrapcp.CpSolver object.
Returns:
Nothing, but prints the solution associated with solver.
|
codesearchnet
|
def check_status(self, **kwargs):
for work in self:
work.check_status()
if kwargs.pop('show', False):
self.show_status(**kwargs)
|
Check the status of the works in self.
Args:
show: True to show the status of the flow.
kwargs: keyword arguments passed to show_status
|
codesearchnet
|
def RegisterRecordType(cls, record_class):
record_type = record_class.MatchType()
if (record_type not in UpdateRecord.KNOWN_CLASSES):
UpdateRecord.KNOWN_CLASSES[record_type] = []
UpdateRecord.KNOWN_CLASSES[record_type].append(record_class)
|
Register a known record type in KNOWN_CLASSES.
Args:
record_class (UpdateRecord): An update record subclass.
|
codesearchnet
|
def run_from_cli(self, args):
if args['--dump-config']:
self._config.print_config()
else:
(stdout, stderr) = self.lint(args['<path>'])
self.print_results(stdout, stderr)
|
Read arguments, run and print results.
Args:
args (dict): Arguments parsed by docopt.
|
codesearchnet
|
def ParsePartitionsTable(
self, parser_mediator, database=None, table=None, **unused_kwargs):
if database is None:
raise ValueError('Missing database value.')
if table is None:
raise ValueError('Missing table value.')
for esedb_record in table.records:
if parser_mediator.abort:
break
record_values = self._GetRecordValues(
parser_mediator, table.name, esedb_record)
event_data = MsieWebCachePartitionsEventData()
event_data.directory = record_values.get('Directory', None)
event_data.partition_identifier = record_values.get('PartitionId', None)
event_data.partition_type = record_values.get('PartitionType', None)
event_data.table_identifier = record_values.get('TableId', None)
timestamp = record_values.get('LastScavengeTime', None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, 'Last Scavenge Time')
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses the Partitions table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
Raises:
ValueError: if the database or table value is missing.
|
juraj-google-style
|
def get_db_row(db, start, size):
type_ = snap7.snap7types.wordlen_to_ctypes[snap7.snap7types.S7WLByte]
data = client.db_read(db, start, type_, size)
return data
|
Here you see and example of readying out a part of a DB
Args:
db (int): The db to use
start (int): The index of where to start in db data
size (int): The size of the db data to read
|
juraj-google-style
|
def mkdirs(path):
filesystem = FileSystems.get_filesystem(path)
return filesystem.mkdirs(path)
|
Recursively create directories for the provided path.
Args:
path: string path of the directory structure that should be created
Raises:
IOError: if leaf directory already exists.
|
github-repos
|
def node_run(input_file, coords_only, bc_settings, bc_grid_weights):
log = logging.getLogger('pyspark')
log.setLevel(logging.INFO)
if (len(log.handlers) == 0):
log.addHandler(logging.StreamHandler(sys.stdout))
precision = bc_settings.value['precision']
imager = oskar.Imager(precision)
for (key, value) in bc_settings.value['imager'].items():
setattr(imager, key, value)
grid_size = imager.plane_size
grid_weights = None
ms_han = oskar.MeasurementSet.open(input_file)
if coords_only:
if (imager.weighting == 'Uniform'):
grid_weights = numpy.zeros([grid_size, grid_size], dtype=precision)
log.info('Reading coordinates from %s', input_file)
imager.coords_only = True
process_input_data(ms_han, imager, None, grid_weights)
imager.coords_only = False
return (grid_weights, imager.num_w_planes)
grid_data = numpy.zeros([grid_size, grid_size], dtype=('c8' if (precision == 'single') else 'c16'))
log.info('Reading visibilities from %s', input_file)
if bc_settings.value['combine']:
if (imager.weighting == 'Uniform'):
grid_weights = bc_grid_weights.value
grid_norm = process_input_data(ms_han, imager, grid_data, grid_weights)
log.info('Returning gridded visibilities to RDD')
return (grid_data, grid_norm)
else:
if (imager.weighting == 'Uniform'):
grid_weights = numpy.zeros([grid_size, grid_size], dtype=precision)
if ((imager.weighting == 'Uniform') or (imager.algorithm == 'W-projection')):
imager.coords_only = True
process_input_data(ms_han, imager, None, grid_weights)
imager.coords_only = False
grid_norm = process_input_data(ms_han, imager, grid_data, grid_weights)
output_file = (splitext(input_file)[0] + '.fits')
save_image(imager, grid_data, grid_norm, output_file)
log.info('Finished. Output file is %s', output_file)
return 0
|
Main function to process visibility data on Spark cluster nodes.
Args:
input_file (str):
RDD element containing filename to process.
coords_only (boolean):
If true, read only baseline coordinates to define the weights grid.
bc_settings (pyspark.broadcast.Broadcast):
Spark broadcast variable containing pipeline settings dictionary.
bc_grid_weights (pyspark.broadcast.Broadcast):
Spark broadcast variable containing weights grid. May be None.
Returns:
tuple: Output RDD element.
|
codesearchnet
|
def configure_stream(level='WARNING'):
root_logger = logging.getLogger()
root_logger.setLevel(level)
template = '[%(asctime)s] %(name)-25s %(levelname)-8s %(message)s'
formatter = logging.Formatter(template)
console = logging.StreamHandler()
console.setLevel(level)
console.setFormatter(formatter)
root_logger.addHandler(console)
return root_logger
|
Configure root logger using a standard stream handler.
Args:
level (string, optional): lowest level to log to the console
Returns:
logging.RootLogger: root logger instance with attached handler
|
codesearchnet
|
def eval_image(image, height, width, scope=None):
with tf.name_scope(values=[image, height, width], name=scope,
default_name='eval_image'):
image = tf.image.central_crop(image, central_fraction=0.875)
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
return image
|
Prepare one image for evaluation.
Args:
image: 3-D float Tensor
height: integer
width: integer
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
|
juraj-google-style
|
def serialize_cert_to_pem(cert_obj):
return cert_obj.public_bytes(encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM)
|
Serialize certificate to PEM.
The certificate can be also be a Certificate Signing Request (CSR).
Args:
cert_obj: cryptography.Certificate
Returns:
bytes: PEM encoded certificate
|
codesearchnet
|
def get_summary_dict(self, include_msd_t=False, include_mscd_t=False):
d = {
"D": self.diffusivity,
"D_sigma": self.diffusivity_std_dev,
"D_charge": self.chg_diffusivity,
"D_charge_sigma": self.chg_diffusivity_std_dev,
"S": self.conductivity,
"S_sigma": self.conductivity_std_dev,
"S_charge": self.chg_conductivity,
"D_components": self.diffusivity_components.tolist(),
"S_components": self.conductivity_components.tolist(),
"D_components_sigma": self.diffusivity_components_std_dev.tolist(),
"S_components_sigma": self.conductivity_components_std_dev.tolist(),
"specie": str(self.specie),
"step_skip": self.step_skip,
"time_step": self.time_step,
"temperature": self.temperature,
"max_framework_displacement": self.max_framework_displacement,
"Haven_ratio": self.haven_ratio
}
if include_msd_t:
d["msd"] = self.msd.tolist()
d["msd_components"] = self.msd_components.tolist()
d["dt"] = self.dt.tolist()
if include_mscd_t:
d["mscd"] = self.mscd.tolist()
return d
|
Provides a summary of diffusion information.
Args:
include_msd_t (bool): Whether to include mean square displace and
time data with the data.
include_msd_t (bool): Whether to include mean square charge displace and
time data with the data.
Returns:
(dict) of diffusion and conductivity data.
|
juraj-google-style
|
def _get_summary_signatures(self):
signatures = self._flag_value_as_list(FLAG_NAME_SUMMARY_SIGNATURES)
supported_signatures = self._supported_signatures()
tt_signatures = []
for signature in signatures:
signature_with_prefix = '%s_%s' % (_TT_PREFIX, signature)
if signature in supported_signatures:
tt_signatures.append(signature)
elif signature_with_prefix in supported_signatures:
tt_signatures.append(signature_with_prefix)
else:
logging.warning('Unknown signature:%s. Supported signatures: %s' % (signature, supported_signatures))
if not tt_signatures:
return {TT_SUMMARY_MAX_ABS: 0, TT_SUMMARY_NORM: 1}
else:
return {signature: idx for idx, signature in enumerate(tt_signatures)}
|
Verifies and returns the summary signatures.
Returns:
A dictionary of the signature identifiers {signature: index} that will be
computed when trace_mode is summary.
|
github-repos
|
def SplitKeyPath(key_path, path_separator=definitions.KEY_PATH_SEPARATOR):
return list(filter(None, key_path.split(path_separator)))
|
Splits the key path into path segments.
Args:
key_path (str): key path.
path_separator (Optional[str]): path separator.
Returns:
list[str]: key path segments without the root path segment, which is an
empty string.
|
juraj-google-style
|
def get_cache_key(**kwargs):
key = '__'.join(['{}:{}'.format(item, value) for (item, value) in iteritems(kwargs)])
return hashlib.md5(key.encode('utf-8')).hexdigest()
|
Get MD5 encoded cache key for given arguments.
Here is the format of key before MD5 encryption.
key1:value1__key2:value2 ...
Example:
>>> get_cache_key(site_domain="example.com", resource="enterprise")
# Here is key format for above call
# "site_domain:example.com__resource:enterprise"
a54349175618ff1659dee0978e3149ca
Arguments:
**kwargs: Key word arguments that need to be present in cache key.
Returns:
An MD5 encoded key uniquely identified by the key word arguments.
|
codesearchnet
|
def _dispatch_command(self, command):
if command in self.CLI_EXIT_COMMANDS:
return debugger_cli_common.EXPLICIT_USER_EXIT
try:
prefix, args, output_file_path = self._parse_command(command)
except SyntaxError as e:
print(str(e))
return
if self._command_handler_registry.is_registered(prefix):
try:
screen_output = self._command_handler_registry.dispatch_command(prefix, args, screen_info=None)
except debugger_cli_common.CommandLineExit as e:
return e.exit_token
else:
screen_output = debugger_cli_common.RichTextLines([self.ERROR_MESSAGE_PREFIX + 'Invalid command prefix "%s"' % prefix])
self._display_output(screen_output)
if output_file_path:
try:
screen_output.write_to_file(output_file_path)
print('Wrote output to %s' % output_file_path)
except Exception:
print('Failed to write output to %s' % output_file_path)
|
Dispatch user command.
Args:
command: (str) Command to dispatch.
Returns:
An exit token object. None value means that the UI loop should not exit.
A non-None value means the UI loop should exit.
|
github-repos
|
def get_proposed_feature(project):
change_collector = ChangeCollector(project)
collected_changes = change_collector.collect_changes()
try:
new_feature_info = one_or_raise(collected_changes.new_feature_info)
importer, _, _ = new_feature_info
except ValueError:
raise BalletError('Too many features collected')
module = importer()
feature = _get_contrib_feature_from_module(module)
return feature
|
Get the proposed feature
The path of the proposed feature is determined by diffing the project
against a comparison branch, such as master. The feature is then imported
from that path and returned.
Args:
project (ballet.project.Project): project info
Raises:
ballet.exc.BalletError: more than one feature collected
|
juraj-google-style
|
def __init__(self, *dic):
super().__init__()
self.value = [ArraySingle()]
self.l = self.value[0].value
|
init
Args:
*dic (dict): dictionary with format {'Day': 12, 'Hour': 34} Avaliable keys are Month, Day, Weekday, Hour, Minute. *Note the uppercase.* You can use gen(), genMix() to generate complex config dictionary.
|
juraj-google-style
|
def line_iter(xo: int, yo: int, xd: int, yd: int) -> Iterator[Tuple[int, int]]:
data = ffi.new("TCOD_bresenham_data_t *")
lib.TCOD_line_init_mt(xo, yo, xd, yd, data)
x = ffi.new("int *")
y = ffi.new("int *")
yield xo, yo
while not lib.TCOD_line_step_mt(x, y, data):
yield (x[0], y[0])
|
returns an Iterable
This Iterable does not include the origin point.
Args:
xo (int): X starting point.
yo (int): Y starting point.
xd (int): X destination point.
yd (int): Y destination point.
Returns:
Iterable[Tuple[int,int]]: An Iterable of (x,y) points.
|
juraj-google-style
|
def fingerprint(self):
return gen_dataset_ops.dataset_fingerprint(self._variant_tensor)
|
Computes the fingerprint of this `Dataset`.
If two datasets have the same fingerprint, it is guaranteed that they
would produce identical elements as long as the content of the upstream
input files does not change and they produce data deterministically.
However, two datasets producing identical values does not always mean they
would have the same fingerprint due to different graph constructs.
In other words, if two datasets have different fingerprints, they could
still produce identical values.
Returns:
A scalar `tf.Tensor` of type `tf.uint64`.
|
github-repos
|
def parse_frequencies(variant, transcripts):
frequencies = {}
thousand_genomes_keys = ['1000GAF']
thousand_genomes_max_keys = ['1000G_MAX_AF']
exac_keys = ['EXACAF']
exac_max_keys = ['ExAC_MAX_AF', 'EXAC_MAX_AF']
gnomad_keys = ['GNOMADAF', 'GNOMAD_AF']
gnomad_max_keys = ['GNOMADAF_POPMAX', 'GNOMADAF_MAX']
for test_key in thousand_genomes_keys:
thousand_g = parse_frequency(variant, test_key)
if thousand_g:
frequencies['thousand_g'] = thousand_g
break
for test_key in thousand_genomes_max_keys:
thousand_g_max = parse_frequency(variant, test_key)
if thousand_g_max:
frequencies['thousand_g_max'] = thousand_g_max
break
for test_key in exac_keys:
exac = parse_frequency(variant, test_key)
if exac:
frequencies['exac'] = exac
break
for test_key in exac_max_keys:
exac_max = parse_frequency(variant, test_key)
if exac_max:
frequencies['exac_max'] = exac_max
break
for test_key in gnomad_keys:
gnomad = parse_frequency(variant, test_key)
if gnomad:
frequencies['gnomad'] = gnomad
break
for test_key in gnomad_max_keys:
gnomad_max = parse_frequency(variant, test_key)
if gnomad_max:
frequencies['gnomad_max'] = gnomad_max
break
if (not frequencies):
for transcript in transcripts:
exac = transcript.get('exac_maf')
exac_max = transcript.get('exac_max')
thousand_g = transcript.get('thousand_g_maf')
thousandg_max = transcript.get('thousandg_max')
gnomad = transcript.get('gnomad_maf')
gnomad_max = transcript.get('gnomad_max')
if exac:
frequencies['exac'] = exac
if exac_max:
frequencies['exac_max'] = exac_max
if thousand_g:
frequencies['thousand_g'] = thousand_g
if thousandg_max:
frequencies['thousand_g_max'] = thousandg_max
if gnomad:
frequencies['gnomad'] = gnomad
if gnomad_max:
frequencies['gnomad_max'] = gnomad_max
thousand_g_left = parse_frequency(variant, 'left_1000GAF')
if thousand_g_left:
frequencies['thousand_g_left'] = thousand_g_left
thousand_g_right = parse_frequency(variant, 'right_1000GAF')
if thousand_g_right:
frequencies['thousand_g_right'] = thousand_g_right
return frequencies
|
Add the frequencies to a variant
Frequencies are parsed either directly from keys in info fieds or from the
transcripts is they are annotated there.
Args:
variant(cyvcf2.Variant): A parsed vcf variant
transcripts(iterable(dict)): Parsed transcripts
Returns:
frequencies(dict): A dictionary with the relevant frequencies
|
codesearchnet
|
def list_groups(self, filtr=None):
return self.service.list_groups(
filtr, self.url_prefix, self.auth, self.session,
self.session_send_opts)
|
Get the groups the logged in user is a member of.
Optionally filter by 'member' or 'maintainer'.
Args:
filtr (optional[string|None]): ['member'|'maintainer'] or defaults to None.
Returns:
(list[string]): List of group names.
Raises:
requests.HTTPError on failure.
|
juraj-google-style
|
def get_sv_variants(self, chromosome=None, end_chromosome=None, sv_type=None, pos=None, end=None):
query = {}
if chromosome:
query['chrom'] = chromosome
if end_chromosome:
query['end_chrom'] = end_chromosome
if sv_type:
query['sv_type'] = sv_type
if pos:
if (not ('$and' in query)):
query['$and'] = []
query['$and'].append({'pos_left': {'$lte': pos}})
query['$and'].append({'pos_right': {'$gte': pos}})
if end:
if (not ('$and' in query)):
query['$and'] = []
query['$and'].append({'end_left': {'$lte': end}})
query['$and'].append({'end_right': {'$gte': end}})
LOG.info('Find all sv variants {}'.format(query))
return self.db.structural_variant.find(query).sort([('chrom', ASCENDING), ('pos_left', ASCENDING)])
|
Return all structural variants in the database
Args:
chromosome (str)
end_chromosome (str)
sv_type (str)
pos (int): Left position of SV
end (int): Right position of SV
Returns:
variants (Iterable(Variant))
|
codesearchnet
|
def is_ready(self, node_id, metadata_priority=True):
if (not self._can_send_request(node_id)):
return False
if metadata_priority:
if self._metadata_refresh_in_progress:
return False
if (self.cluster.ttl() == 0):
return False
return True
|
Check whether a node is ready to send more requests.
In addition to connection-level checks, this method also is used to
block additional requests from being sent during a metadata refresh.
Arguments:
node_id (int): id of the node to check
metadata_priority (bool): Mark node as not-ready if a metadata
refresh is required. Default: True
Returns:
bool: True if the node is ready and metadata is not refreshing
|
codesearchnet
|
def evpn_instance_rd_auto(self, **kwargs):
config = ET.Element('config')
rbridge_id = ET.SubElement(config, 'rbridge-id', xmlns='urn:brocade.com:mgmt:brocade-rbridge')
rbridge_id_key = ET.SubElement(rbridge_id, 'rbridge-id')
rbridge_id_key.text = kwargs.pop('rbridge_id')
evpn_instance = ET.SubElement(rbridge_id, 'evpn-instance', xmlns='urn:brocade.com:mgmt:brocade-bgp')
instance_name_key = ET.SubElement(evpn_instance, 'instance-name')
instance_name_key.text = kwargs.pop('instance_name')
route_distinguisher = ET.SubElement(evpn_instance, 'route-distinguisher')
ET.SubElement(route_distinguisher, 'auto')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Add RD auto under EVPN instance.
Args:
rbridge_id: Rbrdige id .
instance_name: EVPN instance name.
Returns:
True if command completes successfully or False if not.
Raises:
None
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output=dev.interface.evpn_instance_rd_auto(
... evpn_instance_name='100',
... rbridge_id='1')
|
codesearchnet
|
def patch_apply(self, patches, text):
if not patches:
return (text, [])
patches = self.patch_deepCopy(patches)
nullPadding = self.patch_addPadding(patches)
text = nullPadding + text + nullPadding
self.patch_splitMax(patches)
delta = 0
results = []
for patch in patches:
expected_loc = patch.start2 + delta
text1 = self.diff_text1(patch.diffs)
end_loc = -1
if len(text1) > self.Match_MaxBits:
start_loc = self.match_main(text, text1[:self.Match_MaxBits],
expected_loc)
if start_loc != -1:
end_loc = self.match_main(text, text1[-self.Match_MaxBits:],
expected_loc + len(text1) - self.Match_MaxBits)
if end_loc == -1 or start_loc >= end_loc:
start_loc = -1
else:
start_loc = self.match_main(text, text1, expected_loc)
if start_loc == -1:
results.append(False)
delta -= patch.length2 - patch.length1
else:
results.append(True)
delta = start_loc - expected_loc
if end_loc == -1:
text2 = text[start_loc : start_loc + len(text1)]
else:
text2 = text[start_loc : end_loc + self.Match_MaxBits]
if text1 == text2:
text = (text[:start_loc] + self.diff_text2(patch.diffs) +
text[start_loc + len(text1):])
else:
diffs = self.diff_main(text1, text2, False)
if (len(text1) > self.Match_MaxBits and
self.diff_levenshtein(diffs) / float(len(text1)) >
self.Patch_DeleteThreshold):
results[-1] = False
else:
self.diff_cleanupSemanticLossless(diffs)
index1 = 0
for (op, data) in patch.diffs:
if op != self.DIFF_EQUAL:
index2 = self.diff_xIndex(diffs, index1)
if op == self.DIFF_INSERT:
text = text[:start_loc + index2] + data + text[start_loc +
index2:]
elif op == self.DIFF_DELETE:
text = text[:start_loc + index2] + text[start_loc +
self.diff_xIndex(diffs, index1 + len(data)):]
if op != self.DIFF_DELETE:
index1 += len(data)
text = text[len(nullPadding):-len(nullPadding)]
return (text, results)
|
Merge a set of patches onto the text. Return a patched text, as well
as a list of true/false values indicating which patches were applied.
Args:
patches: Array of Patch objects.
text: Old text.
Returns:
Two element Array, containing the new text and an array of boolean values.
|
juraj-google-style
|
def sample_from_likelihood(self, n_timesteps=10):
self.latent_state_sequences = lmap((lambda A: ltake(n_timesteps, iterate((lambda s: pd.Series((A @ s.values), index=s.index)), self.s0))), self.transition_matrix_collection)
self.observed_state_sequences = [[self.sample_observed_state(s) for s in latent_state_sequence] for latent_state_sequence in self.latent_state_sequences]
|
Sample a collection of observed state sequences from the likelihood
model given a collection of transition matrices.
Args:
n_timesteps: The number of timesteps for the sequences.
|
codesearchnet
|
def merge_level_and_latent_dist(level_dist, latent_dist, merge_std='prev_level'):
(level_mean, level_std) = (level_dist.loc, level_dist.scale)
(latent_mean, latent_std) = (latent_dist.loc, latent_dist.scale)
new_mean = (level_mean + latent_mean)
if (merge_std == 'normal'):
z_shape = common_layers.shape_list(latent_mean)
log_scale = tf.get_variable('merge_std', shape=z_shape, dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=False)
scale = tf.exp((log_scale * 3.0))
elif (merge_std == 'prev_level'):
scale = level_std
elif (merge_std == 'prev_step'):
scale = latent_std
return tfp.distributions.Normal(loc=new_mean, scale=scale)
|
Merge level_dist and latent_dist.
new_dist ~ N(level_dist.mean + latent_dis.mean, std) where std is determined
according to merge_std.
Args:
level_dist: instance of tfp.distributions.Normal
latent_dist: instance of tfp.distributions.Normal
merge_std: can be "prev_level", "prev_step" or "normal".
Returns:
merged_dist: instance of tfp.distributions.Normal
|
codesearchnet
|
def if_true(self, predicate: Callable[..., bool]):
return Conditional(predicate, self, None)
|
Conditionally applies current operation when predicate returns True.
Args:
predicate: The predicate that takes the outputs from the previous
operation as input, with optional keyword arguments `global_state` and
`step`. Returns True if current operation needs to be enabled.
Otherwise no operation will be performed.
Returns:
A conditional operation.
|
github-repos
|
def start_logging(self, region, name):
ct = self.session.client('cloudtrail', region_name=region)
ct.start_logging(Name=name)
auditlog(event='cloudtrail.start_logging', actor=self.ns, data={'account': self.account.account_name, 'region': region})
self.log.info('Enabled logging for {} ({})'.format(name, region))
|
Turn on logging for a CloudTrail Trail
Args:
region (`str`): Name of the AWS region
name (`str`): Name of the CloudTrail Trail
Returns:
`None`
|
codesearchnet
|
def get_min_muO2(self, min_voltage=None, max_voltage=None):
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if (pair.muO2_discharge is not None):
data.extend([d['chempot'] for d in pair.muO2_discharge])
if (pair.muO2_charge is not None):
data.extend([d['chempot'] for d in pair.muO2_discharge])
return (min(data) if (len(data) > 0) else None)
|
Minimum critical oxygen chemical potential along path.
Args:
min_voltage: The minimum allowable voltage for a given step
max_voltage: The maximum allowable voltage allowable for a given
step
Returns:
Minimum critical oxygen chemical of all compounds along the
insertion path (a subset of the path can be chosen by the optional
arguments).
|
codesearchnet
|
def applies_to(self, transition, from_state=None):
if '*' in self.names:
return True
elif self.kind in (HOOK_BEFORE, HOOK_AFTER, HOOK_CHECK):
return self._match_transition(transition)
elif self.kind == HOOK_ON_ENTER:
return self._match_state(transition.target)
elif from_state is None:
return any(self._match_state(src) for src in transition.source)
else:
return self._match_state(from_state)
|
Whether this hook applies to the given transition/state.
Args:
transition (Transition): the transition to check
from_state (State or None): the state to check. If absent, the check
is 'might this hook apply to the related transition, given a
valid source state'.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.