code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def write_libraries(dir, libraries):
files = [open(os.path.join(dir, k), "w") for k, _ in libraries]
for f, (_, v) in zip(files, libraries):
v.write_markdown_to_file(f)
for f, (_, v) in zip(files, libraries):
v.write_other_members(f)
f.close()
|
Write a list of libraries to disk.
Args:
dir: Output directory.
libraries: List of (filename, library) pairs.
|
juraj-google-style
|
def schedCoro(self, coro):
import synapse.lib.provenance as s_provenance
if __debug__:
assert s_coro.iscoro(coro)
import synapse.lib.threads as s_threads
assert (s_threads.iden() == self.tid)
task = self.loop.create_task(coro)
if asyncio.current_task():
s_provenance.dupstack(task)
def taskDone(task):
self._active_tasks.remove(task)
try:
task.result()
except asyncio.CancelledError:
pass
except Exception:
logger.exception('Task scheduled through Base.schedCoro raised exception')
self._active_tasks.add(task)
task.add_done_callback(taskDone)
return task
|
Schedules a free-running coroutine to run on this base's event loop. Kills the coroutine if Base is fini'd.
It does not pend on coroutine completion.
Precondition:
This function is *not* threadsafe and must be run on the Base's event loop
Returns:
asyncio.Task: An asyncio.Task object.
|
codesearchnet
|
def convert(self):
return super(TFLiteSavedModelConverter, self).convert()
|
Converts a TensorFlow GraphDef based on instance variables.
Note that in the converted TensorFlow Lite model, the input tensor's order
might be changed each time `convert` is called. To access input tensor
information, please consider using the `SignatureRunner` API
(`interpreter.get_signature_runner`).
Returns:
The converted data in serialized format, either a TFLite Flatbuffer or
a Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
|
github-repos
|
def Current(os_override=None, arch_override=None):
return Platform(os_override if os_override else OperatingSystem.Current(), arch_override if arch_override else Architecture.Current())
|
Determines the current platform you are running on.
Args:
os_override: OperatingSystem, A value to use instead of the current.
arch_override: Architecture, A value to use instead of the current.
Returns:
Platform, The platform tuple of operating system and architecture. Either
can be None if it could not be determined.
|
github-repos
|
def Verify(self, public_key):
if self.digest_type != self.HashType.SHA256:
raise rdfvalue.DecodeError("Unsupported digest.")
if self.signature_type not in [
self.SignatureType.RSA_PKCS1v15, self.SignatureType.RSA_PSS
]:
raise rdfvalue.DecodeError("Unsupported signature type.")
try:
public_key.Verify(self.data, self.signature)
except InvalidSignature as e:
raise rdfvalue.DecodeError("Could not verify blob. Error: %s" % e)
return True
|
Verify the data in this blob.
Args:
public_key: The public key to use for verification.
Returns:
True when verification succeeds.
Raises:
rdfvalue.DecodeError if the data is not suitable verified.
|
juraj-google-style
|
def _get_responses_list(self, raw_output, stream):
responses = []
raw_output, self._incomplete_output[stream] = _buffer_incomplete_responses(
raw_output, self._incomplete_output.get(stream)
)
if not raw_output:
return responses
response_list = list(
filter(lambda x: x, raw_output.decode(errors="replace").split("\n"))
)
for response in response_list:
if gdbmiparser.response_is_finished(response):
pass
else:
parsed_response = gdbmiparser.parse_response(response)
parsed_response["stream"] = stream
self.logger.debug("%s", pformat(parsed_response))
responses.append(parsed_response)
return responses
|
Get parsed response list from string output
Args:
raw_output (unicode): gdb output to parse
stream (str): either stdout or stderr
|
juraj-google-style
|
def lazy_load(name):
def wrapper(load_fn):
@_memoize
def load_once(self):
if load_once.loading:
raise ImportError(('Circular import when resolving LazyModule %r' % name))
load_once.loading = True
try:
module = load_fn()
finally:
load_once.loading = False
self.__dict__.update(module.__dict__)
load_once.loaded = True
return module
load_once.loading = False
load_once.loaded = False
class LazyModule(types.ModuleType):
def __getattr__(self, attr_name):
return getattr(load_once(self), attr_name)
def __dir__(self):
return dir(load_once(self))
def __repr__(self):
if load_once.loaded:
return ('<%r via LazyModule (loaded)>' % load_once(self))
return ('<module %r via LazyModule (not yet loaded)>' % self.__name__)
return LazyModule(name)
return wrapper
|
Decorator to define a function that lazily loads the module 'name'.
This can be used to defer importing troublesome dependencies - e.g. ones that
are large and infrequently used, or that cause a dependency cycle -
until they are actually used.
Args:
name: the fully-qualified name of the module; typically the last segment
of 'name' matches the name of the decorated function
Returns:
Decorator function that produces a lazy-loading module 'name' backed by the
underlying decorated function.
|
codesearchnet
|
def sigmoid_recall_one_hot(logits, labels, weights_fn=None):
with tf.variable_scope('sigmoid_recall_one_hot', values=[logits, labels]):
del weights_fn
num_classes = logits.shape[(- 1)]
predictions = tf.nn.sigmoid(logits)
predictions = tf.argmax(predictions, (- 1))
predictions = tf.one_hot(predictions, num_classes)
(_, recall) = tf.metrics.recall(labels=labels, predictions=predictions)
return (recall, tf.constant(1.0))
|
Calculate recall for a set, given one-hot labels and logits.
Predictions are converted to one-hot,
as predictions[example][arg-max(example)] = 1
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
recall (scalar), weights
|
codesearchnet
|
def _parse_url(self, url):
if not self._full_urls:
m = _URL_RE.match(url)
if m is None:
raise ValueError('Could not parse url: %s' % url)
return ('', m.group(1))
else:
m = _FULL_URL_RE.match(url)
if m is None:
raise ValueError('Could not parse url: %s' % url)
return (m.group(1), m.group(2) or '/')
|
Verifies that url begins with hdfs:// prefix, strips it and adds a
leading /.
Parsing behavior is determined by HadoopFileSystemOptions.hdfs_full_urls.
Args:
url: (str) A URL in the form hdfs://path/...
or in the form hdfs://server/path/...
Raises:
ValueError if the URL doesn't match the expect format.
Returns:
(str, str) If using hdfs_full_urls, for an input of
'hdfs://server/path/...' will return (server, '/path/...').
Otherwise, for an input of 'hdfs://path/...', will return
('', '/path/...').
|
github-repos
|
def try_checkpoint_metadata(self, trial):
if trial._checkpoint.storage == Checkpoint.MEMORY:
logger.debug("Not saving data for trial w/ memory checkpoint.")
return
try:
logger.debug("Saving trial metadata.")
self._cached_trial_state[trial.trial_id] = trial.__getstate__()
except Exception:
logger.exception("Error checkpointing trial metadata.")
|
Checkpoints metadata.
Args:
trial (Trial): Trial to checkpoint.
|
juraj-google-style
|
def percent_point(self, y, V):
self.check_fit()
if self.theta < 0:
return V
else:
result = []
for _y, _V in zip(y, V):
minimum = fminbound(self.partial_derivative_scalar, EPSILON, 1.0, args=(_y, _V))
if isinstance(minimum, np.ndarray):
minimum = minimum[0]
result.append(minimum)
return np.array(result)
|
Compute the inverse of conditional cumulative distribution :math:`C(u|v)^-1`
Args:
y: `np.ndarray` value of :math:`C(u|v)`.
v: `np.ndarray` given value of v.
|
juraj-google-style
|
def Create(self, urn, aff4_type, mode='w', token=None, age=NEWEST_TIME, force_new_version=True, object_exists=False, mutation_pool=None, transaction=None):
if (not data_store.AFF4Enabled()):
raise NotImplementedError('AFF4 data store has been disabled.')
if (mode not in ['w', 'r', 'rw']):
raise AttributeError(('Invalid mode %s' % mode))
if (token is None):
token = data_store.default_token
if (urn is not None):
urn = rdfvalue.RDFURN(urn)
_ValidateAFF4Type(aff4_type)
if ('r' in mode):
try:
existing = self.Open(urn, mode=mode, token=token, age=age, transaction=transaction)
result = existing.Upgrade(aff4_type)
if aff4_type:
result.aff4_type = aff4_type.__name__
if (force_new_version and (existing.Get(result.Schema.TYPE) != aff4_type.__name__)):
result.ForceNewVersion()
return result
except IOError:
pass
result = aff4_type(urn, mode=mode, token=token, age=age, aff4_type=aff4_type.__name__, object_exists=object_exists, mutation_pool=mutation_pool, transaction=transaction)
result.Initialize()
if force_new_version:
result.ForceNewVersion()
return result
|
Creates the urn if it does not already exist, otherwise opens it.
If the urn exists and is of a different type, this will also promote it to
the specified type.
Args:
urn: The object to create.
aff4_type: The desired type for this object.
mode: The desired mode for this object.
token: The Security Token to use for opening this item.
age: The age policy used to build this object. Only makes sense when mode
has "r".
force_new_version: Forces the creation of a new object in the data_store.
object_exists: If we know the object already exists we can skip index
creation.
mutation_pool: An optional MutationPool object to write to. If not given,
the data_store is used directly.
transaction: For locked objects, a lock is passed to the object.
Returns:
An AFF4 object of the desired type and mode.
Raises:
AttributeError: If the mode is invalid.
|
codesearchnet
|
def get_configured_consensus_module(block_id, state_view):
settings_view = SettingsView(state_view)
default_consensus = ('genesis' if (block_id == NULL_BLOCK_IDENTIFIER) else 'devmode')
consensus_module_name = settings_view.get_setting('sawtooth.consensus.algorithm', default_value=default_consensus)
return ConsensusFactory.get_consensus_module(consensus_module_name)
|
Returns the consensus_module based on the consensus module set by
the "sawtooth_settings" transaction family.
Args:
block_id (str): the block id associated with the current state_view
state_view (:obj:`StateView`): the current state view to use for
setting values
Raises:
UnknownConsensusModuleError: Thrown when an invalid consensus
module has been configured.
|
codesearchnet
|
def create_channel(cls, address="firestore.googleapis.com:443", credentials=None):
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES
)
|
Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
|
juraj-google-style
|
def get_help(self, cmd_prefix=None):
if not cmd_prefix:
help_info = RichTextLines([])
if self._help_intro:
help_info.extend(self._help_intro)
sorted_prefixes = sorted(self._handlers)
for cmd_prefix in sorted_prefixes:
lines = self._get_help_for_command_prefix(cmd_prefix)
lines.append('')
lines.append('')
help_info.extend(RichTextLines(lines))
return help_info
else:
return RichTextLines(self._get_help_for_command_prefix(cmd_prefix))
|
Compile help information into a RichTextLines object.
Args:
cmd_prefix: Optional command prefix. As the prefix itself or one of its
aliases.
Returns:
A RichTextLines object containing the help information. If cmd_prefix
is None, the return value will be the full command-line help. Otherwise,
it will be the help information for the specified command.
|
github-repos
|
def triangular(logu, name=None):
with tf.compat.v1.name_scope(name, 'triangular', [logu]):
logu = tf.convert_to_tensor(value=logu, name='logu')
return (pearson(logu) / (1.0 + tf.exp(logu)))
|
The Triangular Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Triangular Csiszar-function is:
```none
f(u) = (u - 1)**2 / (1 + u)
```
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
triangular_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
|
codesearchnet
|
def get_polypeptide_within(self, chain_id, resnum, angstroms, only_protein=True, use_ca=False, custom_coord=None, return_resnums=False):
if self.structure:
parsed = self.structure
else:
parsed = self.parse_structure()
residue_list = ssbio.protein.structure.properties.residues.within(resnum=resnum, chain_id=chain_id, model=parsed.first_model, angstroms=angstroms, use_ca=use_ca, custom_coord=custom_coord)
if only_protein:
filtered_residue_list = [x for x in residue_list if (x.id[0] == ' ')]
else:
filtered_residue_list = residue_list
residue_list_combined = Polypeptide(filtered_residue_list)
if return_resnums:
resnums = [int(x.id[1]) for x in filtered_residue_list]
return (residue_list_combined, resnums)
return residue_list_combined
|
Get a Polypeptide object of the amino acids within X angstroms of the specified chain + residue number.
Args:
resnum (int): Residue number of the structure
chain_id (str): Chain ID of the residue number
angstroms (float): Radius of the search sphere
only_protein (bool): If only protein atoms (no HETATMS) should be included in the returned sequence
use_ca (bool): If the alpha-carbon atom should be used for searching, default is False (last atom of residue used)
custom_coord (list): custom XYZ coord
return_resnums (bool): if list of resnums should be returned
Returns:
Bio.PDB.Polypeptide.Polypeptide: Biopython Polypeptide object
|
codesearchnet
|
def Collect(self, knowledge_base):
environment_variable = knowledge_base.GetEnvironmentVariable('programdata')
allusersappdata = getattr(environment_variable, 'value', None)
if not allusersappdata:
environment_variable = knowledge_base.GetEnvironmentVariable(
'allusersprofile')
allusersdata = getattr(environment_variable, 'value', None)
if allusersdata:
allusersappdata = '\\'.join([allusersdata, 'Application Data'])
if allusersappdata:
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='allusersappdata', value=allusersappdata)
try:
logger.debug('setting environment variable: {0:s} to: "{1:s}"'.format(
'allusersappdata', allusersappdata))
knowledge_base.AddEnvironmentVariable(environment_variable)
except KeyError:
pass
|
Collects values from the knowledge base.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
Raises:
PreProcessFail: if the preprocessing fails.
|
juraj-google-style
|
def get_month(datestring):
convert_written = re.compile('jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec', re.IGNORECASE)
month = convert_written.search(datestring)
month_number = None
if month:
month_number = strptime(month.group(), '%b').tm_mon
if (month_number < 10):
month_number = add_zero(month_number)
return str(month_number)
|
Transforms a written month into corresponding month number.
E.g. November -> 11, or May -> 05.
Keyword arguments:
datestring -- a string
Returns:
String, or None if the transformation fails
|
codesearchnet
|
def ground_temperature_depth(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `ground_temperature_depth`'.format(value))
self._ground_temperature_depth = value
|
Corresponds to IDD Field `ground_temperature_depth`
Args:
value (float): value for IDD Field `ground_temperature_depth`
Unit: m
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def _RunScript(self, script):
process = subprocess.Popen(
script, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
while True:
for line in iter(process.stdout.readline, b''):
self.logger.info(line.decode('utf-8').rstrip('\n'))
if process.poll() is not None:
break
|
Run a script and log the streamed script output.
Args:
script: string, the file location of an executable script.
|
juraj-google-style
|
def p(value, bits=None, endian=None, target=None):
return globals()[('p%d' % _get_bits(bits, target))](value, endian=endian, target=target)
|
Pack a signed pointer for a given target.
Args:
value(int): The value to pack.
bits(:class:`pwnypack.target.Target.Bits`): Override the default
word size. If ``None`` it will look at the word size of
``target``.
endian(:class:`~pwnypack.target.Target.Endian`): Override the default
byte order. If ``None``, it will look at the byte order of
the ``target`` argument.
target(:class:`~pwnypack.target.Target`): Override the default byte
order. If ``None``, it will look at the byte order of
the global :data:`~pwnypack.target.target`.
|
codesearchnet
|
def operates_on(self, qubits: Iterable[raw_types.Qid]) -> bool:
return any(q in qubits for q in self.qubits)
|
Determines if the moment has operations touching the given qubits.
Args:
qubits: The qubits that may or may not be touched by operations.
Returns:
Whether this moment has operations involving the qubits.
|
juraj-google-style
|
def get_barycenter(self):
try:
mass = self['mass'].values
except KeyError:
mass = self.add_data('mass')['mass'].values
pos = self.loc[:, ['x', 'y', 'z']].values
return (pos * mass[:, None]).sum(axis=0) / self.get_total_mass()
|
Return the mass weighted average location.
Args:
None
Returns:
:class:`numpy.ndarray`:
|
juraj-google-style
|
def nack(self, channel_id=None, **kwargs):
path = "/event-service/v1/channels/{}/nack".format(channel_id)
r = self._httpclient.request(
method="POST",
url=self.url,
path=path,
**kwargs
)
return r
|
Send a negative read-acknowledgement to the service.
Causes the channel's read point to move to its previous position
prior to the last poll.
Args:
channel_id (str): The channel ID.
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``event_nack.py`` example.
|
juraj-google-style
|
def pre_run_cell(self, cellno, code):
self.cellid = cellno
import ast
if findloop(ast.parse(code)):
from acorn.logging.decoration import set_streamlining
set_streamlining(True)
from time import time
self.pre = {'m': 'loop', 'a': None, 's': time(), 'r': None, 'c': code}
|
Executes before the user-entered code in `ipython` is run. This
intercepts loops and other problematic code that would produce lots of
database entries and streamlines it to produce only a single entry.
Args:
cellno (int): the cell number that is about to be executed.
code (str): python source code that is about to be executed.
|
codesearchnet
|
def decode(self, ids, strip_extraneous=False):
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
return unicode_to_native(
tokenizer.decode(self._subtoken_ids_to_tokens(ids)))
|
Converts a sequence of subtoken ids to a native string.
Args:
ids: a list of integers in the range [0, vocab_size)
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
a native string
|
juraj-google-style
|
def delete_endpoint(self, delete_endpoint_config=True):
if delete_endpoint_config:
self._delete_endpoint_config()
self.sagemaker_session.delete_endpoint(self.endpoint)
|
Delete the Amazon SageMaker endpoint backing this predictor. Also delete the endpoint configuration attached
to it if delete_endpoint_config is True.
Args:
delete_endpoint_config (bool, optional): Flag to indicate whether to delete endpoint configuration together
with endpoint. Defaults to True. If True, both endpoint and endpoint configuration will be deleted. If
False, only endpoint will be deleted.
|
juraj-google-style
|
def draw_on(self, canvas, stem_color, leaf_color, thickness, ages=None):
if canvas.__module__ in SUPPORTED_CANVAS:
drawer = SUPPORTED_CANVAS[canvas.__module__]
drawer(self, canvas, stem_color, leaf_color, thickness, ages).draw()
|
Draw the tree on a canvas.
Args:
canvas (object): The canvas, you want to draw the tree on. Supported canvases: svgwrite.Drawing and PIL.Image (You can also add your custom libraries.)
stem_color (tupel): Color or gradient for the stem of the tree.
leaf_color (tupel): Color for the leaf (= the color for last iteration).
thickness (int): The start thickness of the tree.
|
juraj-google-style
|
def live(self, url, resource_class, resource_args, params=None):
return self.adapter.live(self, url, resource_class, resource_args, params=params)
|
Get a live endpoint.
Args:
url(string): URL for the request
resource_class(class): The class to use for entries coming
from the live endpoint.
resource_args(dict): Additional arguments to pass to the
`resource_class` constructor
Keyword Args:
params(dict): Request parameters for the live url
Returns:
An iterator over the live endpoint. Depending on the
adapter the iterator will allow asynchronous
behavior. The default adapter will block while
iterating over the response of this method.
|
codesearchnet
|
def create_message_from_descriptor(desc: descriptor.Descriptor, **kwargs: Any) -> message.Message:
return get_message_class_from_descriptor(desc)(**kwargs)
|
Instantiates a new Message based on a provided Descriptor.
Args:
desc: The Descriptor that describes the Message to instantiate.
**kwargs: An optional list of key/value pairs to initialize with.
Returns:
A new Message based on the provided Descriptor.
|
github-repos
|
def _GetGradWrt(output_grad, other_operand, input_shape, input_subs, other_subs, output_subs):
reduced_label_set = set(input_subs).difference(set(output_subs + other_subs + '.'))
left_subs = ''.join((s for s in input_subs if s not in reduced_label_set))
grad_reduced = gen_linalg_ops.einsum([output_grad, other_operand], '{},{}->{}'.format(output_subs, other_subs, left_subs))
if not reduced_label_set:
return grad_reduced
return _GetGradReduced(grad_reduced, left_subs, input_subs, input_shape, reduced_label_set)
|
Returns the gradient wrt an input operand for a binary einsum.
This function does not handle (un)broadcasting. This must be done separately
on the returned gradient.
Args:
output_grad: The gradient wrt the output of a binary einsum operation.
other_operand: The complementary `Tensor` operand i.e. which is not the
input operand.
input_shape: A `Tensor` representing the shape of input operand.
input_subs: The subscripts of the input operand.
other_subs: The subscripts of the complementary operand.
output_subs: The output subscripts.
|
github-repos
|
def step(self, action):
if self.done:
raise ValueError('cannot step in a done environment! call `reset`')
self.controllers[0][:] = action
_LIB.Step(self._env)
reward = self._get_reward()
self.done = self._get_done()
info = self._get_info()
self._did_step(self.done)
if reward < self.reward_range[0]:
reward = self.reward_range[0]
elif reward > self.reward_range[1]:
reward = self.reward_range[1]
return self.screen, reward, self.done, info
|
Run one frame of the NES and return the relevant observation data.
Args:
action (byte): the bitmap determining which buttons to press
Returns:
a tuple of:
- state (np.ndarray): next frame as a result of the given action
- reward (float) : amount of reward returned after given action
- done (boolean): whether the episode has ended
- info (dict): contains auxiliary diagnostic information
|
juraj-google-style
|
async def rewind(self, query="1"):
if not self.state == 'ready':
logger.debug("Trying to rewind from wrong state '{}'".format(self.state))
return
if query == "":
query = "1"
try:
num = int(query)
except TypeError:
self.statuslog.error("Rewind argument must be a number")
except ValueError:
self.statuslog.error("Rewind argument must be a number")
else:
if len(self.prev_queue) == 0:
self.statuslog.error("No songs to rewind")
return
if num < 0:
self.statuslog.error("Rewind must be postitive or 0")
return
elif num > len(self.prev_queue):
self.statuslog.warning("Rewinding to start")
else:
self.statuslog.info("Rewinding")
for i in range(num + 1):
if len(self.prev_queue) > 0:
self.queue.insert(0, self.prev_queue.pop())
try:
self.streamer.stop()
except Exception as e:
logger.exception(e)
|
The rewind command
Args:
query (str): The number of items to skip
|
juraj-google-style
|
def inverse(self, y, name='inverse'):
return self._call_inverse(y, name)
|
Returns the inverse `Bijector` evaluation, i.e., X = g^{-1}(Y).
Args:
y: `Tensor`. The input to the "inverse" evaluation.
name: The name to give this op.
Returns:
`Tensor`, if this bijector is injective.
If not injective, returns the k-tuple containing the unique
`k` points `(x1, ..., xk)` such that `g(xi) = y`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if `_inverse` is not implemented.
|
github-repos
|
def fit(self, x, y):
train = np.vstack((np.array([self.featurize_row(row.iloc[0], row.iloc[1]) for (idx, row) in x.iterrows()]), np.array([self.featurize_row(row.iloc[1], row.iloc[0]) for (idx, row) in x.iterrows()])))
labels = np.vstack((y, (- y))).ravel()
verbose = (1 if self.verbose else 0)
self.clf = CLF(verbose=verbose, min_samples_leaf=self.L, n_estimators=self.E, max_depth=self.max_depth, n_jobs=self.n_jobs).fit(train, labels)
|
Train the model.
Args:
x_tr (pd.DataFrame): CEPC format dataframe containing the pairs
y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs
|
codesearchnet
|
def __init__(self, resolver_context):
super(GzipFile, self).__init__(resolver_context)
self._compressed_data_size = -1
self._current_offset = 0
self._gzip_file_object = None
self._members_by_end_offset = collections.OrderedDict()
self.uncompressed_data_size = 0
|
Initializes a file-like object.
Args:
resolver_context (Context): resolver context.
Raises:
ValueError: when file_object is set.
|
juraj-google-style
|
def potential(self, value):
if value:
self._potential = True
else:
self._potential = False
|
Setter for 'potential' property
Args:
value (bool): True if a potential is required. False else
|
codesearchnet
|
def create_masks(input_dim, hidden_dims, input_order='left-to-right', hidden_order='left-to-right'):
degrees = create_degrees(input_dim, hidden_dims, input_order, hidden_order)
masks = []
for (input_degrees, output_degrees) in zip(degrees[:(- 1)], degrees[1:]):
mask = tf.cast((input_degrees[(:, np.newaxis)] <= output_degrees), tf.float32)
masks.append(mask)
mask = tf.cast((degrees[(- 1)][(:, np.newaxis)] < degrees[0]), tf.float32)
masks.append(mask)
return masks
|
Returns a list of binary mask matrices respecting autoregressive ordering.
Args:
input_dim: Number of inputs.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer; those number of units will always be set to
input_dim downstream. Each hidden unit size must be at least the size of
length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
|
codesearchnet
|
def bcoo_add_indices(x1, x2, sum_duplicates):
x2_zeros = jnp.zeros(x2.data.shape, x1.data.dtype)
concat_axis = len(x1.indices.shape) - 2
output_indices = jnp.concatenate([x1.indices, x2.indices], axis=concat_axis)
output_data = jnp.concatenate([x1.data, x2_zeros], axis=concat_axis)
output = jax_sparse.BCOO((output_data, output_indices), shape=x1.shape)
if sum_duplicates:
output = jax_sparse.bcoo_sum_duplicates(output)
return output
|
Add the indices of `x2` to `x1` with zero values.
Args:
x1: `BCOO` tensor to add indices to.
x2: `BCOO` tensor to take the indices to add to x1.
sum_duplicates: if `True` calls `bcoo_sum_duplicates` on the output.
Returns:
a `BCOO` tensor equal to `x1` but with extra zeros at indices in `x2`
that were missing in `x1`.
|
github-repos
|
def sg_producer_func(func):
r
@wraps(func)
def wrapper(**kwargs):
r
opt = tf.sg_opt(kwargs) + tf.sg_opt(dtypes=[tf.sg_floatx], capacity=32, num_threads=1)
assert opt.source is not None, 'source is mandatory.'
if type(opt.source) is not list and type(opt.source) is not tuple:
opt.source = [opt.source]
if type(opt.dtypes) is not list and type(opt.dtypes) is not tuple:
opt.dtypes = [opt.dtypes]
if opt.out_dtypes is None:
opt.out_dtypes = opt.dtypes
if type(opt.out_dtypes) is not list and type(opt.out_dtypes) is not tuple:
opt.out_dtypes = [opt.out_dtypes]
assert len(opt.source) == len(opt.dtypes), 'Source and dtypes should have same length.'
def enqueue_func(sess, op):
data = func(sess.run(opt.source))
feed_dict = {}
for ph, col in zip(placeholders, data):
feed_dict[ph] = col
sess.run(op, feed_dict=feed_dict)
placeholders = []
for dtype in opt.dtypes:
placeholders.append(tf.placeholder(dtype=dtype))
queue = tf.FIFOQueue(opt.capacity, dtypes=opt.out_dtypes)
enqueue_op = queue.enqueue(placeholders)
runner = _FuncQueueRunner(enqueue_func, queue, [enqueue_op] * opt.num_threads)
tf.train.add_queue_runner(runner)
return queue.dequeue()
return wrapper
|
r"""Decorates a function `func` as sg_producer_func.
Args:
func: A function to decorate.
|
juraj-google-style
|
def update_internal_networks(self, network_uri_list, force=False, timeout=(- 1)):
uri = '{}/internalNetworks'.format(self.data['uri'])
return self._helper.update(network_uri_list, uri=uri, force=force, timeout=timeout)
|
Updates internal networks on the logical interconnect.
Args:
network_uri_list: List of Ethernet network uris.
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Logical Interconnect.
|
codesearchnet
|
def build_uri(self, id_or_uri):
if (not id_or_uri):
logger.exception(RESOURCE_CLIENT_INVALID_ID)
raise ValueError(RESOURCE_CLIENT_INVALID_ID)
if ('/' in id_or_uri):
self.validate_resource_uri(id_or_uri)
return id_or_uri
else:
return ((self._base_uri + '/') + id_or_uri)
|
Helps to build the URI from resource id and validate the URI.
Args:
id_or_uri: ID/URI of the resource.
Returns:
Returns a valid resource URI
|
codesearchnet
|
def _call_validators(self):
msg = []
msg.extend(self._validate_keyfile())
msg.extend(self._validate_dns_zone())
msg.extend(self._validate_retries())
msg.extend(self._validate_project())
return msg
|
Actually run all the validations.
Returns:
list(str): Error messages from the validators.
|
codesearchnet
|
def array(x, dtype=None):
if any_symbolic_tensors((x,)):
return Array(dtype=dtype).symbolic_call(x)
return backend.numpy.array(x, dtype=dtype)
|
Create a tensor.
Args:
x: Input tensor.
dtype: The desired data-type for the tensor.
Returns:
A tensor.
Examples:
>>> keras.ops.array([1, 2, 3])
array([1, 2, 3], dtype=int32)
>>> keras.ops.array([1, 2, 3], dtype="float32")
array([1., 2., 3.], dtype=float32)
|
github-repos
|
def _save_to_hdx(self, action, id_field_name, file_to_upload=None):
result = self._write_to_hdx(action, self.data, id_field_name, file_to_upload)
self.old_data = self.data
self.data = result
|
Creates or updates an HDX object in HDX, saving current data and replacing with returned HDX object data
from HDX
Args:
action (str): Action to perform: 'create' or 'update'
id_field_name (str): Name of field containing HDX object identifier
file_to_upload (Optional[str]): File to upload to HDX
Returns:
None
|
juraj-google-style
|
def get_well(self, uwi):
if uwi is None:
raise ValueError('a UWI must be provided')
matching_wells = [w for w in self if w.uwi == uwi]
return matching_wells[0] if len(matching_wells) >= 1 else None
|
Returns a Well object identified by UWI
Args:
uwi (string): the UWI string for the well.
Returns:
well
|
juraj-google-style
|
def supply(self, issuer):
issuer_uri_config = self._issuer_uri_configs.get(issuer)
if (not issuer_uri_config):
return
jwks_uri = issuer_uri_config.jwks_uri
if jwks_uri:
return jwks_uri
open_id_valid = issuer_uri_config.open_id_valid
if open_id_valid:
discovered_jwks_uri = _discover_jwks_uri(issuer)
self._issuer_uri_configs[issuer] = IssuerUriConfig(False, discovered_jwks_uri)
return discovered_jwks_uri
|
Supplies the `jwks_uri` for the given issuer.
Args:
issuer: the issuer.
Returns:
The `jwks_uri` that is either statically configured or retrieved via
OpenId discovery. None is returned when the issuer is unknown or the
OpenId discovery fails.
|
codesearchnet
|
def shuffle_sparse_coo_matrix(sparse_matrix, dropout_rate=0.0, min_dropout_rate=None, max_dropout_rate=None):
if ((dropout_rate < 0.0) or (dropout_rate >= 1.0)):
raise ValueError(('Dropout rate should be in [0, 1) but is %f' % dropout_rate))
(num_rows, num_cols) = sparse_matrix.shape
shuffled_rows = shuffle(np.arange(num_rows))
shuffled_cols = shuffle(np.arange(num_cols))
if (dropout_rate > 0.0):
sparse_matrix = _dropout_sparse_coo_matrix(sparse_matrix, dropout_rate, min_dropout_rate, max_dropout_rate)
new_row = np.take(shuffled_rows, sparse_matrix.row)
new_col = np.take(shuffled_cols, sparse_matrix.col)
return sparse.csr_matrix((sparse_matrix.data, (new_row, new_col)), shape=(num_rows, num_cols))
|
Shuffle sparse matrix encoded as a SciPy coo matrix.
Args:
sparse_matrix: a SciPy coo sparse matrix.
dropout_rate: if dropout_rate > 0 then non-zero elements of the input matrix
will be droped uniformly at random.
min_dropout_rate: minimum value for the dropout rate. If None
FLAGS.min_dropout_rate is used.
max_dropout_rate: minimum value for the dropout rate. If None
FLAGS.max_dropout_rate is used.
Returns:
A SciPy csr_matrix entailing the randomized interactions.
|
codesearchnet
|
def backslashcase(string):
str1 = re.sub('_', '\\\\', snakecase(string))
return str1
|
Convert string into spinal case.
Join punctuation with backslash.
Args:
string: String to convert.
Returns:
string: Spinal cased string.
|
codesearchnet
|
def LoadFromStorage(cls, path=None):
if (path is None):
path = os.path.join(os.path.expanduser('~'), 'googleads.yaml')
return cls(**googleads.common.LoadFromStorage(path, cls._YAML_KEY, cls._REQUIRED_INIT_VALUES, cls._OPTIONAL_INIT_VALUES))
|
Creates an AdWordsClient with information stored in a yaml file.
Args:
[optional]
path: The path string to the file containing cached AdWords data.
Returns:
An AdWordsClient initialized with the values cached in the file.
Raises:
A GoogleAdsValueError if the given yaml file does not contain the
information necessary to instantiate a client object - either a
required key was missing or an OAuth2 key was missing.
|
codesearchnet
|
def upsert(self, conflict_target: List, fields: Dict, index_predicate: str=None) -> int:
return self.get_queryset().upsert(conflict_target, fields, index_predicate)
|
Creates a new record or updates the existing one
with the specified data.
Arguments:
conflict_target:
Fields to pass into the ON CONFLICT clause.
fields:
Fields to insert/update.
index_predicate:
The index predicate to satisfy an arbiter partial index.
Returns:
The primary key of the row that was created/updated.
|
juraj-google-style
|
def find_structure(self, filename_or_structure):
try:
if isinstance(filename_or_structure, str):
s = Structure.from_file(filename_or_structure)
elif isinstance(filename_or_structure, Structure):
s = filename_or_structure
else:
raise MPRestError("Provide filename or Structure object.")
payload = {'structure': json.dumps(s.as_dict(), cls=MontyEncoder)}
response = self.session.post(
'{}/find_structure'.format(self.preamble), data=payload
)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp['valid_response']:
return resp['response']
else:
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}"
.format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
|
Finds matching structures on the Materials Project site.
Args:
filename_or_structure: filename or Structure object
Returns:
A list of matching structures.
Raises:
MPRestError
|
juraj-google-style
|
def _list_objects(self, client_kwargs, path, max_request_entries):
kwargs = dict(prefix=path)
if max_request_entries:
kwargs['limit'] = max_request_entries
else:
kwargs['full_listing'] = True
with _handle_client_exception():
response = self.client.get_container(
client_kwargs['container'], **kwargs)
for obj in response[1]:
yield obj.pop('name'), obj
|
Lists objects.
args:
client_kwargs (dict): Client arguments.
path (str): Path relative to current locator.
max_request_entries (int): If specified, maximum entries returned
by request.
Returns:
generator of tuple: object name str, object header dict
|
juraj-google-style
|
def attention_bias_batch(batch_coordinates_q, batch_coordinates_k=None, condition_fn=None):
if (batch_coordinates_k is None):
batch_coordinates_k = batch_coordinates_q
def to_float(bc):
bc = tf.squeeze(bc, 1)
bc = tf.to_float(bc)
return bc
bc_v = tf.expand_dims(to_float(batch_coordinates_q), 1)
bc_h = tf.expand_dims(to_float(batch_coordinates_k), 0)
bias_batch = (bc_h - bc_v)
bias_batch = condition_fn(bias_batch)
bias_batch *= (- 1000000000.0)
return bias_batch
|
Generate a mask to prevent the batch to attend to each others.
Args:
batch_coordinates_q: Int-like Tensor of shape [length_q, 1] containing the
coordinates of the batches
batch_coordinates_k: Int-like Tensor of shape [length_k, 1] containing the
coordinates of the batches. If None, do self-attention.
condition_fn: Callable defining the attention mask.
Returns:
Float-like Tensor of shape [length_q, length_k] containing either 0 or
-infinity (-1e9).
|
codesearchnet
|
def _is_every_steps(self, phase_step, batch, every):
if not every:
return False
covered_steps = range(phase_step, phase_step + batch)
return any((step + 1) % every == 0 for step in covered_steps)
|
Determine whether a periodic event should happen at this step.
Args:
phase_step: The incrementing step.
batch: The number of steps progressed at once.
every: The interval of the period.
Returns:
Boolean of whether the event should happen.
|
juraj-google-style
|
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(DerivationParameters, self).read(input_stream, kmip_version=kmip_version)
local_stream = BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.CRYPTOGRAPHIC_PARAMETERS, local_stream):
self._cryptographic_parameters = CryptographicParameters()
self._cryptographic_parameters.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.INITIALIZATION_VECTOR, local_stream):
self._initialization_vector = ByteString(tag=enums.Tags.INITIALIZATION_VECTOR)
self._initialization_vector.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.DERIVATION_DATA, local_stream):
self._derivation_data = ByteString(tag=enums.Tags.DERIVATION_DATA)
self._derivation_data.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.SALT, local_stream):
self._salt = ByteString(tag=enums.Tags.SALT)
self._salt.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(Tags.ITERATION_COUNT, local_stream):
self._iteration_count = Integer(tag=Tags.ITERATION_COUNT)
self._iteration_count.read(local_stream, kmip_version=kmip_version)
self.is_oversized(local_stream)
|
Read the data encoding the DerivationParameters struct and decode it
into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
|
codesearchnet
|
def get_duration(self, matrix_name):
duration = 0.0
if (matrix_name in self.data):
duration = sum([stage.duration() for stage in self.data[matrix_name]])
return duration
|
Get duration for a concrete matrix.
Args:
matrix_name (str): name of the Matrix.
Returns:
float: duration of concrete matrix in seconds.
|
codesearchnet
|
def get(self,key,default=None):
retval = self.__getitem__(key)
if not retval:
retval = default
return retval
|
Get a value from the dictionary.
Args:
key (str): The dictionary key.
default (any): The default to return if the key is not in the
dictionary. Defaults to None.
Returns:
str or any: The dictionary value or the default if the key is not
in the dictionary.
|
juraj-google-style
|
def pop(self, key, default=None):
return self._dictionary.pop(key.lower(), default)
|
Remove the key and return the associated value or default if not
found
Args:
key (str): The key to remove
default (obj): The value to return if key is not present
|
codesearchnet
|
def get_object_from_normpath(self, file_path):
file_path = make_string_path(file_path)
if file_path == self.root.name:
return self.root
if file_path == self.dev_null.name:
return self.dev_null
file_path = self._original_path(file_path)
path_components = self._path_components(file_path)
target_object = self.root
try:
for component in path_components:
if S_ISLNK(target_object.st_mode):
target_object = self.resolve(target_object.contents)
if not S_ISDIR(target_object.st_mode):
if not self.is_windows_fs:
self.raise_io_error(errno.ENOTDIR, file_path)
self.raise_io_error(errno.ENOENT, file_path)
target_object = target_object.get_entry(component)
except KeyError:
self.raise_io_error(errno.ENOENT, file_path)
return target_object
|
Search for the specified filesystem object within the fake
filesystem.
Args:
file_path: Specifies target FakeFile object to retrieve, with a
path that has already been normalized/resolved.
Returns:
The FakeFile object corresponding to file_path.
Raises:
IOError: if the object is not found.
|
juraj-google-style
|
def get_sqlalchemy_url(database=None, host=None, port=None, username=None, password=None, driver='postgres'):
strings = [('%s:
if username:
strings.append(username)
if password:
strings.append((':%s@' % password))
else:
strings.append('@')
if host:
strings.append(host)
if (port is not None):
strings.append((':%d' % int(port)))
if database:
strings.append(('/%s' % database))
return ''.join(strings)
|
Gets SQLAlchemy url from database connection parameters
Args:
database (Optional[str]): Database name
host (Optional[str]): Host where database is located
port (Union[int, str, None]): Database port
username (Optional[str]): Username to log into database
password (Optional[str]): Password to log into database
driver (str): Database driver. Defaults to 'postgres'.
Returns:
db_url (str): SQLAlchemy url
|
codesearchnet
|
def to_md_file(string, filename, out_path='.'):
md_file = ('%s.md' % filename)
with open(os.path.join(out_path, md_file), 'w') as f:
f.write(string)
print('wrote {}.'.format(md_file))
|
Import a module path and create an api doc from it
Args:
string (str): string with line breaks to write to file.
filename (str): filename without the .md
out_path (str): The output directory
|
codesearchnet
|
def UpdateChainAndProcess(self, parser_mediator, registry_key, **kwargs):
parser_mediator.AppendToParserChain(self)
try:
self.Process(parser_mediator, registry_key, **kwargs)
finally:
parser_mediator.PopFromParserChain()
|
Updates the parser chain and processes a Windows Registry key or value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Raises:
ValueError: If the Windows Registry key is not set.
|
juraj-google-style
|
def s3_app_bucket(self, include_region=False):
if include_region:
s3_app_bucket = self.format['s3_app_region_bucket'].format(**self.data)
else:
s3_app_bucket = self.format['s3_app_bucket'].format(**self.data)
return s3_app_bucket
|
Generate s3 application bucket name.
Args:
include_region (bool): Include region in the name generation.
|
codesearchnet
|
def _anonymize_table(cls, table_data, pii_fields):
for pii_field in pii_fields:
field_name = pii_field['name']
transformer = cls.get_class(TRANSFORMERS['categorical'])(pii_field)
table_data[field_name] = transformer.anonymize_column(table_data)
return table_data
|
Anonymize in `table_data` the fields in `pii_fields`.
Args:
table_data (pandas.DataFrame): Original dataframe/table.
pii_fields (list[dict]): Metadata for the fields to transform.
Result:
pandas.DataFrame: Anonymized table.
|
juraj-google-style
|
def assign(self, value, use_locking=None, name=None, read_value=True):
with _handle_graph(self.handle):
value_tensor = ops.convert_to_tensor(value, dtype=self.dtype)
if not self._shape.is_compatible_with(value_tensor.shape):
if self.name is None:
tensor_name = ''
else:
tensor_name = ' ' + str(self.name)
raise ValueError(f"Cannot assign value to variable '{tensor_name}': Shape mismatch.The variable shape {self._shape}, and the assigned value shape {value_tensor.shape} are incompatible.")
kwargs = {}
if forward_compat.forward_compatible(2022, 3, 23):
validate_shape = self._validate_shape and self._shape.is_fully_defined()
kwargs['validate_shape'] = validate_shape
assign_op = gen_resource_variable_ops.assign_variable_op(self.handle, value_tensor, name=name, **kwargs)
if read_value:
return self._lazy_read(assign_op)
return assign_op
|
Assigns a new value to this variable.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
name: The name to use for the assignment.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
|
github-repos
|
def _process_update(self, item, feed_item):
campaign = self.campaign_dao.get(feed_item, required=True)
feed_item[FieldMap.CAMPAIGN_ID] = campaign['id']
feed_item[FieldMap.CAMPAIGN_NAME] = campaign['name']
item['name'] = feed_item.get(FieldMap.PLACEMENT_GROUP_NAME, None)
item['placementGroupType'] = feed_item.get(FieldMap.PLACEMENT_GROUP_TYPE, None)
item['pricingSchedule']['startDate'] = feed_item.get(FieldMap.PLACEMENT_GROUP_START_DATE, None)
item['pricingSchedule']['endDate'] = feed_item.get(FieldMap.PLACEMENT_GROUP_END_DATE, None)
item['pricingSchedule']['pricingType'] = feed_item.get(FieldMap.PLACEMENT_GROUP_PRICING_TYPE, None)
|
Updates a placement group based on the values from the feed.
Args:
item: Object representing the placement group to be updated, this object
is updated directly.
feed_item: Feed item representing placement group values from the
Bulkdozer feed.
|
github-repos
|
def _MergeEntities(self, a, b):
if (a.shape_id != b.shape_id):
raise MergeError('shape_id must be the same')
distance = max(ApproximateDistanceBetweenPoints(a.points[0][:2], b.points[0][:2]), ApproximateDistanceBetweenPoints(a.points[(- 1)][:2], b.points[(- 1)][:2]))
if (distance > self.largest_shape_distance):
raise MergeError(('The shape endpoints are too far away: %.1fm (largest_shape_distance is %.1fm)' % (distance, self.largest_shape_distance)))
return self._Migrate(b, self.feed_merger.b_schedule, False)
|
Merges the shapes by taking the new shape.
Args:
a: The first transitfeed.Shape instance.
b: The second transitfeed.Shape instance.
Returns:
The merged shape.
Raises:
MergeError: If the ids are different or if the endpoints are further
than largest_shape_distance apart.
|
codesearchnet
|
def timestamp_fmt(value, fmt):
return int(calendar.timegm(
datetime.datetime.strptime(value, fmt).utctimetuple()
))
|
Convert timestamp string to time in seconds since epoch.
Wraps the datetime.datetime.strptime(). This is slow use the other
timestamp_*() functions if possible.
Args:
value: A timestamp string.
fmt: A timestamp format string.
Returns:
The time in seconds since epoch as an integer.
|
juraj-google-style
|
def GetAllClientLabels(token, include_catchall=False):
labels_index = aff4.FACTORY.Create(standard.LabelSet.CLIENT_LABELS_URN, standard.LabelSet, mode='r', token=token)
labels = set(labels_index.ListLabels())
if include_catchall:
labels.add(ALL_CLIENTS_LABEL)
return labels
|
Get the set of all label names applied to all clients.
Args:
token: token to use when opening the index.
include_catchall: If true, we include ALL_CLIENTS_LABEL in the results.
Returns:
set of label name strings, including the catchall "All"
|
codesearchnet
|
def NewPathSpec(cls, type_indicator, **kwargs):
if type_indicator not in cls._path_spec_types:
raise KeyError(
'Path specification type: {0:s} not set.'.format(type_indicator))
if 'parent' in kwargs and kwargs['parent'] is None:
del kwargs['parent']
path_spec_type = cls._path_spec_types[type_indicator]
return path_spec_type(**kwargs)
|
Creates a new path specification for the specific type indicator.
Args:
type_indicator (str): type indicator.
kwargs (dict): keyword arguments depending on the path specification.
Returns:
PathSpec: path specification.
Raises:
KeyError: if path specification is not registered.
|
juraj-google-style
|
def query(self, connection, query, fetch=True):
self.install_module(connection)
statements = sqlparse.parse(sqlparse.format(query, strip_comments=True))
logger.debug('Finding and installing all partitions from query. \n query: {}'.format(query))
new_query = []
if (len(statements) > 1):
raise BadSQLError('Can only query a single statement')
if (len(statements) == 0):
raise BadSQLError("DIdn't get any statements in '{}'".format(query))
statement = statements[0]
logger.debug('Searching statement for partition ref.\n statement: {}'.format(statement.to_unicode()))
logger.debug('Executing updated query after partition install.\n query before update: {}\n query to execute (updated query): {}'.format(statement, new_query))
return self._execute(connection, statement.to_unicode(), fetch=fetch)
|
Creates virtual tables for all partitions found in the query and executes query.
Args:
query (str): sql query
fetch (bool): fetch result from database if True, do not fetch overwise.
|
codesearchnet
|
def create_group(self, name):
self.service.create_group(
name, self.url_prefix, self.auth, self.session,
self.session_send_opts)
|
Create a new group.
Args:
name (string): Name of the group to create.
Raises:
requests.HTTPError on failure.
|
juraj-google-style
|
def splitext2(filepath):
root, filename = os.path.split(safepath(filepath))
filename, ext = os.path.splitext(safepath(filename))
return root, filename, ext
|
Split filepath into root, filename, ext
Args:
filepath (str, path): file path
Returns:
str
|
juraj-google-style
|
def extract_backup_bundle(self, resource, timeout=(- 1)):
return self._client.update(resource, uri=self.BACKUP_ARCHIVE_PATH, timeout=timeout)
|
Extracts the existing backup bundle on the appliance and creates all the artifacts.
Args:
resource (dict): Deployment Group to extract.
timeout:
Timeout in seconds. Waits for task completion by default. The timeout does not abort the operation in
OneView, it just stops waiting for its completion.
Returns:
dict: A Deployment Group associated with the Artifact Bundle backup.
|
codesearchnet
|
def _is_subclass(self, cls, class_spec):
if isinstance(cls, abstract.AMBIGUOUS_OR_EMPTY):
return None
return abstract_utils.check_against_mro(self.ctx, cls, class_spec)
|
Check if the given class is a subclass of a class specification.
Args:
cls: A BaseValue, the first argument to an issubclass call.
class_spec: A BaseValue, the second issubclass argument.
Returns:
True if the class is a subclass (or is a class) in the class_spec, False
if not, and None if it is ambiguous.
|
github-repos
|
def read_to_offset(self, offset):
assert offset >= self._offset
result = self._buffer[self._offset: offset]
self._offset += len(result)
return result
|
Returns bytes from self._buffer and update related offsets.
Args:
offset: read from current offset to this offset, exclusive.
Returns:
Requested bytes from buffer.
|
juraj-google-style
|
def _parse_v_parameters(val_type, val, filename, param_name):
if (val_type == 'logical'):
val = [(i == 'T') for i in val.split()]
elif (val_type == 'int'):
try:
val = [int(i) for i in val.split()]
except ValueError:
val = _parse_from_incar(filename, param_name)
if (val is None):
raise IOError('Error in parsing vasprun.xml')
elif (val_type == 'string'):
val = val.split()
else:
try:
val = [float(i) for i in val.split()]
except ValueError:
val = _parse_from_incar(filename, param_name)
if (val is None):
raise IOError('Error in parsing vasprun.xml')
return val
|
Helper function to convert a Vasprun array-type parameter into the proper
type. Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
filename: Fullpath of vasprun.xml. Used for robust error handling.
E.g., if vasprun.xml contains \\*\\*\\* for some Incar parameters,
the code will try to read from an INCAR file present in the same
directory.
param_name: Name of parameter.
Returns:
Parsed value.
|
codesearchnet
|
def __recognize_dict(self, node: yaml.Node, expected_type: Type) -> RecResult:
logger.debug('Recognizing as a dict')
if (not issubclass(generic_type_args(expected_type)[0], str)):
raise RuntimeError('YAtiML only supports dicts with strings as keys')
if (not isinstance(node, yaml.MappingNode)):
message = '{}{}Expected a dict/mapping here'.format(node.start_mark, os.linesep)
return ([], message)
value_type = generic_type_args(expected_type)[1]
for (_, value) in node.value:
(recognized_value_types, message) = self.recognize(value, value_type)
if (len(recognized_value_types) == 0):
return ([], message)
if (len(recognized_value_types) > 1):
return ([Dict[(str, t)] for t in recognized_value_types], message)
return ([expected_type], '')
|
Recognize a node that we expect to be a dict of some kind.
Args:
node: The node to recognize.
expected_type: Dict[str, ...something...]
Returns:
expected_type if it was recognized, [] otherwise.
|
codesearchnet
|
def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
return
|
Updates the candidate generation strategy based on the outcomes.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
beam search or log softmax for each vocabulary token when using beam search
num_matches (`int`):
The number of matches between the candidate sequences and the model predictions.
|
github-repos
|
def _group_device_list(devices):
assert not _is_device_list_single_worker(devices)
device_dict = {}
for d in devices:
d_spec = tf_device.DeviceSpec.from_string(d)
if d_spec.job not in device_dict:
device_dict[d_spec.job] = []
while len(device_dict[d_spec.job]) <= d_spec.task:
device_dict[d_spec.job].append([])
device_dict[d_spec.job][d_spec.task].append(d)
return device_dict
|
Groups the devices list by task_type and task_id.
Args:
devices: a list of device strings for remote devices.
Returns:
a dict of list of device strings mapping from task_type to a list of devices
for the task_type in the ascending order of task_id.
|
github-repos
|
def check_config_file(msg):
with jsonconfig.Config('messages', indent=4) as cfg:
verify_profile_name(msg, cfg)
retrieve_data_from_config(msg, cfg)
if (msg._auth is None):
retrieve_pwd_from_config(msg, cfg)
if msg.save:
update_config_data(msg, cfg)
update_config_pwd(msg, cfg)
|
Checks the config.json file for default settings and auth values.
Args:
:msg: (Message class) an instance of a message class.
|
codesearchnet
|
def find_contour_yaml(config_file=__file__, names=None):
checked = set()
contour_yaml = _find_countour_yaml(os.path.dirname(config_file), checked, names=names)
if (not contour_yaml):
contour_yaml = _find_countour_yaml(os.getcwd(), checked, names=names)
return contour_yaml
|
Traverse directory trees to find a contour.yaml file
Begins with the location of this file then checks the
working directory if not found
Args:
config_file: location of this file, override for
testing
Returns:
the path of contour.yaml or None if not found
|
codesearchnet
|
def shift(schedule: ScheduleComponent, time: int, name: str = None) -> Schedule:
if name is None:
name = schedule.name
return union((time, schedule), name=name)
|
Return schedule shifted by `time`.
Args:
schedule: The schedule to shift
time: The time to shift by
name: Name of shifted schedule. Defaults to name of `schedule`
|
juraj-google-style
|
def open(self, path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO):
return self._path_open(path, 'rb', mime_type, compression_type)
|
Returns a read channel for the given file path.
Args:
path: string path of the file object to be written to the system
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
|
github-repos
|
def publish(self, object_id: str, event_type: str,
event_data: dict = None):
object_key = SchedulingObject.get_key(self.type, object_id)
publish(event_type=event_type,
event_data=event_data,
object_type=self.type,
object_id=object_id,
object_key=object_key,
origin=None)
|
Publish a scheduling object event.
Args:
object_id (str): ID of the scheduling object
event_type (str): Type of event.
event_data (dict, optional): Event data.
|
juraj-google-style
|
def hardware_version(self):
res = self.rpc(0, 2, result_type=(0, True))
binary_version = res['buffer']
ver = ''
for x in binary_version:
if (x != 0):
ver += chr(x)
return ver
|
Return the embedded hardware version string for this tile.
The hardware version is an up to 10 byte user readable string that is
meant to encode any necessary information about the specific hardware
that this tile is running on. For example, if you have multiple
assembly variants of a given tile, you could encode that information
here.
Returns:
str: The hardware version read from the tile.
|
codesearchnet
|
def DeleteIndex(self, index):
to_remove = None
for i in self.Items:
if i.index == index:
to_remove = i
if to_remove:
self.Items.remove(to_remove)
|
Remove a spent coin based on its index.
Args:
index (int):
|
juraj-google-style
|
def _brent_loop_body(state, params, constants):
best_estimate = state.best_estimate
last_estimate = state.last_estimate
contrapoint = state.contrapoint
value_at_best_estimate = state.value_at_best_estimate
value_at_last_estimate = state.value_at_last_estimate
value_at_contrapoint = state.value_at_contrapoint
step_to_best_estimate = state.step_to_best_estimate
step_to_last_estimate = state.step_to_last_estimate
num_iterations = state.num_iterations
finished = state.finished
replace_contrapoint = ~finished & (value_at_last_estimate * value_at_best_estimate < constants.zero_value)
contrapoint = tf.where(replace_contrapoint, last_estimate, contrapoint)
value_at_contrapoint = tf.where(replace_contrapoint, value_at_last_estimate, value_at_contrapoint)
step_to_last_estimate = tf.where(replace_contrapoint, best_estimate - last_estimate, step_to_last_estimate)
step_to_best_estimate = tf.where(replace_contrapoint, step_to_last_estimate, step_to_best_estimate)
replace_best_estimate = tf.where(finished, constants.false, tf.math.abs(value_at_contrapoint) < tf.math.abs(value_at_best_estimate))
last_estimate = tf.where(replace_best_estimate, best_estimate, last_estimate)
best_estimate = tf.where(replace_best_estimate, contrapoint, best_estimate)
contrapoint = tf.where(replace_best_estimate, last_estimate, contrapoint)
value_at_last_estimate = tf.where(replace_best_estimate, value_at_best_estimate, value_at_last_estimate)
value_at_best_estimate = tf.where(replace_best_estimate, value_at_contrapoint, value_at_best_estimate)
value_at_contrapoint = tf.where(replace_best_estimate, value_at_last_estimate, value_at_contrapoint)
root_tolerance = 0.5 * (params.absolute_root_tolerance + params.relative_root_tolerance * tf.math.abs(best_estimate))
bisection_step = 0.5 * (contrapoint - best_estimate)
finished |= (num_iterations >= params.max_iterations) | (tf.math.abs(bisection_step) < root_tolerance) | ~tf.math.is_finite(value_at_best_estimate) | (tf.math.abs(value_at_best_estimate) <= params.function_tolerance)
compute_short_step = tf.where(finished, constants.false, (root_tolerance < tf.math.abs(step_to_last_estimate)) & (tf.math.abs(value_at_best_estimate) < tf.math.abs(value_at_last_estimate)))
short_step = tf.where(compute_short_step, tf.where(tf.equal(last_estimate, contrapoint), _secant_step(best_estimate, last_estimate, value_at_best_estimate, value_at_last_estimate), _quadratic_interpolation_step(value_at_best_estimate, value_at_last_estimate, value_at_contrapoint, best_estimate, last_estimate, contrapoint)), constants.zero)
use_short_step = tf.where(compute_short_step, 2 * tf.math.abs(short_step) < tf.minimum(3 * tf.math.abs(bisection_step) - root_tolerance, tf.math.abs(step_to_last_estimate)), constants.false)
step_to_last_estimate = tf.where(use_short_step, step_to_best_estimate, bisection_step)
step_to_best_estimate = tf.where(finished, constants.zero, tf.where(use_short_step, short_step, bisection_step))
last_estimate = tf.where(finished, last_estimate, best_estimate)
best_estimate += tf.where(finished, constants.zero, tf.where(root_tolerance < tf.math.abs(step_to_best_estimate), step_to_best_estimate, tf.where(bisection_step > 0, root_tolerance, -root_tolerance)))
value_at_last_estimate = tf.where(finished, value_at_last_estimate, value_at_best_estimate)
value_at_best_estimate = tf.where(finished, value_at_best_estimate, params.objective_fn(best_estimate))
num_iterations = tf.where(finished, num_iterations, num_iterations + 1)
return [_BrentSearchState(best_estimate=best_estimate, last_estimate=last_estimate, contrapoint=contrapoint, value_at_best_estimate=value_at_best_estimate, value_at_last_estimate=value_at_last_estimate, value_at_contrapoint=value_at_contrapoint, step_to_best_estimate=step_to_best_estimate, step_to_last_estimate=step_to_last_estimate, num_iterations=num_iterations, finished=finished)]
|
Performs one iteration of the Brent root-finding algorithm.
Args:
state: A Python `_BrentSearchState` namedtuple.
params: A Python `_BrentSearchParams` namedtuple.
constants: A Python `_BrentSearchConstants` namedtuple.
Returns:
The `Tensor`s to use for the next iteration of the algorithm.
|
github-repos
|
def encode_request(request_line, **headers):
lines = [request_line]
lines.extend([('%s: %s' % kv) for kv in headers.items()])
return ('\r\n'.join(lines) + '\r\n\r\n').encode('utf-8')
|
Creates the data for a SSDP request.
Args:
request_line (string): The request line for the request (e.g.
``"M-SEARCH * HTTP/1.1"``).
headers (dict of string -> string): Dictionary of header name - header
value pairs to present in the request.
Returns:
bytes: The encoded request.
|
codesearchnet
|
def get_or_create_from_ip(ip):
data = ip_api_handler.get(ip)
if data and any(v for v in data.values()):
if data.get('ip_address', None) is None or not data['ip_address']:
data['ip_address'] = ip
return IPInfo.objects.get_or_create(**data)
return None, False
|
Get or create an entry using obtained information from an IP.
Args:
ip (str): IP address xxx.xxx.xxx.xxx.
Returns:
ip_info: an instance of IPInfo.
|
juraj-google-style
|
def get_without(self, fragments,
use_lookup=None):
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
if pd.api.types.is_list_like(fragments):
for fragment in fragments:
try:
index_of_all_fragments |= fragment.index
except NameError:
index_of_all_fragments = fragment.index
else:
index_of_all_fragments = fragments.index
missing_part = self.loc[self.index.difference(index_of_all_fragments)]
missing_part = missing_part.fragmentate(use_lookup=use_lookup)
return sorted(missing_part, key=len, reverse=True)
|
Return self without the specified fragments.
Args:
fragments: Either a list of :class:`~chemcoord.Cartesian` or a
:class:`~chemcoord.Cartesian`.
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
list: List containing :class:`~chemcoord.Cartesian`.
|
juraj-google-style
|
def gcd(*numbers):
n = numbers[0]
for i in numbers:
n = pygcd(n, i)
return n
|
Returns the greatest common divisor for a sequence of numbers.
Args:
\*numbers: Sequence of numbers.
Returns:
(int) Greatest common divisor of numbers.
|
juraj-google-style
|
def __init__(self, name, indicator):
super(ThreatIntelligence, self).__init__()
self.name = name
self.indicator = indicator
|
Initializes the Threat Intelligence container.
Args:
name (string): name of the threat
indicator (string): regular expression relevant to a threat
|
juraj-google-style
|
def has_key(self, key):
rc = self._libinput.libinput_device_keyboard_has_key(self._handle, key)
assert rc >= 0, 'This device is not a keyboard device'
return bool(rc)
|
Check if a :attr:`~libinput.constant.DeviceCapability.KEYBOARD`
device has a given key.
Args:
key (int): Key to check for, see ``input.h`` for key definitions.
Returns:
bool: :obj:`True` if the device has this key, :obj:`False` if
it does not.
Raises:
AssertionError
|
juraj-google-style
|
def parse_conservations(variant):
conservations = {}
conservations['gerp'] = parse_conservation(variant, 'dbNSFP_GERP___RS')
conservations['phast'] = parse_conservation(variant, 'dbNSFP_phastCons100way_vertebrate')
conservations['phylop'] = parse_conservation(variant, 'dbNSFP_phyloP100way_vertebrate')
return conservations
|
Parse the conservation predictors
Args:
variant(dict): A variant dictionary
Returns:
conservations(dict): A dictionary with the conservations
|
codesearchnet
|
def _flatten_location_translations(location_translations):
sources_to_process = set(six.iterkeys(location_translations))
def _update_translation(source):
destination = location_translations[source]
if destination not in location_translations:
return destination
else:
sources_to_process.discard(destination)
final_destination = _update_translation(destination)
location_translations[source] = final_destination
return final_destination
while sources_to_process:
_update_translation(sources_to_process.pop())
|
If location A translates to B, and B to C, then make A translate directly to C.
Args:
location_translations: dict of Location -> Location, where the key translates to the value.
Mutated in place for efficiency and simplicity of implementation.
|
juraj-google-style
|
def _call(self, utterances_batch: List[str], utterances_ids: List[int] = None) -> List[RichMessage]:
rich_message = RichMessage()
for utt_id, utt in enumerate(utterances_batch):
if utterances_ids:
id_ = utterances_ids[utt_id]
log.debug(f'Utterance: {utt}')
if utt == "/start":
welcome = "I am a new e-commerce bot. I will help you to find products that you are looking for. Please type your request in plain text."
rich_message.add_control(PlainText(welcome))
continue
if utt[0] == "@":
command, *parts = utt.split(":")
log.debug(f'Actions: {parts}')
if command == "@details":
batch_index = int(parts[0])
item_index = int(parts[1])
rich_message.add_control(PlainText(show_details(
self.history[id_][batch_index][item_index])))
continue
if command == "@entropy":
state = self.history[id_][int(parts[0])]
state[parts[1]] = parts[2]
state["start"] = 0
state["stop"] = 5
utt = state['query']
self.states[id_] = state
if command == "@next":
state = self.history[id_][int(parts[0])]
state['start'] = state['stop']
state['stop'] = state['stop'] + 5
utt = state['query']
self.states[id_] = state
else:
if id_ not in self.states:
self.states[id_] = {}
self.states[id_]["start"] = 0
self.states[id_]["stop"] = 5
responses_batch, confidences_batch, state_batch = self.skills[0](
[utt], self.history[id_], [self.states[id_]])
self.states[id_] = state_batch[0]
self.states[id_]["query"] = utt
items_batch, entropy_batch = responses_batch
for batch_idx, items in enumerate(items_batch):
self.history[id_].append(items)
self.history[id_].append(self.states[id_])
for idx, item in enumerate(items):
rich_message.add_control(_draw_item(item, idx, self.history[id_]))
if len(items) == self.states[id_]['stop'] - self.states[id_]['start']:
buttons_frame = _draw_tail(entropy_batch[batch_idx], self.history[id_])
rich_message.add_control(buttons_frame)
return [rich_message]
|
Processes batch of utterances and returns corresponding responses batch.
Args:
utterances_batch: Batch of incoming utterances.
utterances_ids: Batch of dialog IDs corresponding to incoming utterances.
Returns:
responses: A batch of responses corresponding to the
utterance batch received by agent.
|
juraj-google-style
|
def _CreateFeed(client):
feed_service = client.GetService('FeedService', version='v201809')
operation = {
'operand': {
'name': 'DSA Feed %s' % uuid.uuid4(),
'attributes': [
{'type': 'URL_LIST', 'name': 'Page URL'},
{'type': 'STRING_LIST', 'name': 'Label'}
],
'origin': 'USER'
},
'operator': 'ADD'
}
feed = feed_service.mutate([operation])['value'][0]
return _DSAFeedDetails(feed['id'], feed['attributes'][0]['id'],
feed['attributes'][1]['id'])
|
Creates the feed for DSA page URLs.
Args:
client: an AdWordsClient instance.
Returns:
A _DSAFeedDetails instance containing details about the created feed.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.