code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def pull_df(self, md5):
try:
_packed_df = self.workbench.get_dataframe(md5)
_df = pd.read_msgpack(lz4.loads(_packed_df))
return _df
except zerorpc.exceptions.RemoteError as e:
return repr_to_str_decorator.r_to_s(self._data_not_found)(e)
|
Wrapper for the Workbench get_dataframe method
Args:
md5: pull the dataframe identified by this md5
Returns:
The uncompressed/unserialized dataframe
|
juraj-google-style
|
def get_configs(__pkg: str, __name: str = 'config') -> List[str]:
dirs = [user_config(__pkg), ]
dirs.extend(path.expanduser(path.sep.join([d, __pkg]))
for d in getenv('XDG_CONFIG_DIRS', '/etc/xdg').split(':'))
configs = []
for dname in reversed(dirs):
test_path = path.join(dname, __name)
if path.exists(test_path):
configs.append(test_path)
return configs
|
Return all configs for given package.
Args:
__pkg: Package name
__name: Configuration file name
|
juraj-google-style
|
def update_config_data(msg, cfg):
for attr in msg:
if ((attr in cfg.data[msg.profile]) and (attr is not 'auth')):
cfg.data[msg.profile][attr] = getattr(msg, attr)
|
Updates the profile's config entry with values set in each attr by the
user. This will overwrite existing values.
Args:
:msg: (Message class) an instance of a message class.
:cfg: (jsonconfig.Config) config instance.
|
codesearchnet
|
def increment(self, size: int):
assert (size >= 0), size
self.files += 1
self.size += size
self.bandwidth_meter.feed(size)
|
Increment the number of files downloaded.
Args:
size: The size of the file
|
codesearchnet
|
def get_annotations_dict(members: dict[str, cfg.Variable]) -> '_instances.AnnotationsDict | None':
if '__annotations__' not in members:
return None
annots_var = members['__annotations__']
try:
annots = get_atomic_value(annots_var)
except ConversionError:
return None
return annots if isinstance(annots, _abstract.AnnotationsDict) else None
|
Get __annotations__ from a members map.
Returns None rather than {} if the dict does not exist so that callers always
have a reference to the actual dictionary, and can mutate it if needed.
Args:
members: A dict of member name to variable
Returns:
members['__annotations__'] unpacked as a python dict, or None
|
github-repos
|
def _run_command(argv):
(command_name, argv) = _get_command_and_argv(argv)
_LOGGER.info('Running command "%s %s" with args: %s', settings.command, command_name, argv)
subcommand = _get_subcommand(command_name)
func = call.get_callable(subcommand)
doc = usage.format_usage(subcommand.__doc__)
args = _get_parsed_args(command_name, doc, argv)
return (call.call(func, args) or 0)
|
Run the command with the given CLI options and exit.
Command functions are expected to have a __doc__ string that is parseable
by docopt.
Args:
argv: The list of command line arguments supplied for a command. The
first argument is expected to be the name of the command to be run.
Note that this is different than the full arguments parsed by
docopt for the entire program.
Raises:
ValueError: Raised if the user attempted to run an invalid command.
|
codesearchnet
|
def validate_sns_topic_subscription(self, region):
sns = self.session.client('sns', region_name=region)
arn = 'arn:aws:sns:{}:{}:{}'.format(region, self.account.account_number, self.topic_name)
try:
data = sns.list_subscriptions_by_topic(TopicArn=arn)
except ClientError as ex:
self.log.error('Failed to list subscriptions by topic in {} ({}): {}'.format(
self.account.account_name,
region,
ex
))
return False
for sub in data['Subscriptions']:
if sub['Endpoint'] == self.sqs_queue:
if sub['SubscriptionArn'] == 'PendingConfirmation':
self.log.warning('Subscription pending confirmation for {} in {}'.format(
self.account.account_name,
region
))
return False
return True
return False
|
Validates SQS subscription to the SNS topic. Returns `True` if subscribed or `False` if not subscribed
or topic is missing
Args:
region (str): Name of AWS Region
Returns:
`bool`
|
juraj-google-style
|
def imresize(img, size, return_scale=False, interpolation='bilinear'):
h, w = img.shape[:2]
resized_img = cv2.resize(
img, size, interpolation=interp_codes[interpolation])
if not return_scale:
return resized_img
else:
w_scale = size[0] / w
h_scale = size[1] / h
return resized_img, w_scale, h_scale
|
Resize image to a given size.
Args:
img (ndarray): The input image.
size (tuple): Target (w, h).
return_scale (bool): Whether to return `w_scale` and `h_scale`.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos".
Returns:
tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
|
juraj-google-style
|
def recursive_print(name, val, spaces=0):
if name is None:
msg = None
else:
fmt = '.' * max(0, spaces - 2) + '
msg = fmt.format(name)
if isinstance(val, dict):
if msg is not None:
print(msg)
for k in val.keys():
recursive_print(k, val[k], spaces + 2)
elif isinstance(val, torch.Tensor):
print(msg, ':', val.size())
else:
print(msg, ':', val)
|
Recursively print the structure of a checkpoint. This function is taken from `convert_megatron_gpt2_checkpoint.py`
Args:
name (str): the name of the current tensor parameter
val (Tuple(int)): the shape of the current tensor parameter
spaces (int): the number of spaces to print before the output for a nested structure
|
github-repos
|
def writeInput(self, session, directory, name):
self.project_directory = directory
with tmp_chdir(directory):
replaceParamFile = self.replaceParamFile
self.write(session=session, directory=directory, name=name)
self._writeXput(session=session, directory=directory, fileCards=self.INPUT_FILES, name=name, replaceParamFile=replaceParamFile)
self._writeXputMaps(session=session, directory=directory, mapCards=self.INPUT_MAPS, name=name, replaceParamFile=replaceParamFile)
|
Write only input files for a GSSHA project from the database to file.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
directory (str): Directory where the files will be written.
name (str): Name that will be given to project when written (e.g.: 'example'). Files that follow the project
naming convention will be given this name with the appropriate extension (e.g.: 'example.prj',
'example.cmt', and 'example.gag'). Files that do not follow this convention will retain their original
file names.
|
codesearchnet
|
def center_crop(self, image: 'torch.Tensor', size: dict[str, int], **kwargs) -> 'torch.Tensor':
if size.height is None or size.width is None:
raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}")
return F.center_crop(image, (size['height'], size['width']))
|
Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along
any edge, the image is padded with 0's and then center cropped.
Args:
image (`"torch.Tensor"`):
Image to center crop.
size (`Dict[str, int]`):
Size of the output image.
Returns:
`torch.Tensor`: The center cropped image.
|
github-repos
|
def define_simulation_graph(batch_env, algo_cls, config):
step = tf.Variable(0, False, dtype=tf.int32, name='global_step')
is_training = tf.placeholder(tf.bool, name='is_training')
should_log = tf.placeholder(tf.bool, name='should_log')
do_report = tf.placeholder(tf.bool, name='do_report')
force_reset = tf.placeholder(tf.bool, name='force_reset')
algo = algo_cls(batch_env, step, is_training, should_log, config)
done, score, summary = tools.simulate(
batch_env, algo, should_log, force_reset)
message = 'Graph contains {} trainable variables.'
tf.logging.info(message.format(tools.count_weights()))
return tools.AttrDict(locals())
|
Define the algorithm and environment interaction.
Args:
batch_env: In-graph environments object.
algo_cls: Constructor of a batch algorithm.
config: Configuration object for the algorithm.
Returns:
Object providing graph elements via attributes.
|
juraj-google-style
|
def prune_volumes(self, filters=None):
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
url = self._url('/volumes/prune')
return self._result(self._post(url, params=params), True)
|
Delete unused volumes
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted volume names and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def _is_padded_shape_compatible_with(padded_shape, input_component_shape):
if padded_shape.dims is None or input_component_shape.dims is None:
return True
if len(padded_shape.dims) != len(input_component_shape.dims):
return False
for padded_dim, input_dim in zip(padded_shape.dims, input_component_shape.dims):
if padded_dim.value is not None and input_dim.value is not None and (padded_dim.value < input_dim.value):
return False
return True
|
Returns `True` if `input_component_shape` can be padded to `padded_shape`.
Args:
padded_shape: A `tf.TensorShape`.
input_component_shape: A `tf.TensorShape`.
Returns:
`True` if `input_component_shape` can be padded to `padded_shape`, otherwise
`False`.
|
github-repos
|
def compute_average_oxidation_state(site):
try:
avg_oxi = sum([(sp.oxi_state * occu) for (sp, occu) in site.species.items() if (sp is not None)])
return avg_oxi
except AttributeError:
pass
try:
return site.charge
except AttributeError:
raise ValueError('Ewald summation can only be performed on structures that are either oxidation state decorated or have site charges.')
|
Calculates the average oxidation state of a site
Args:
site: Site to compute average oxidation state
Returns:
Average oxidation state of site.
|
codesearchnet
|
def kmer_count(seq_list, k):
all_kmers = generate_all_kmers(k)
kmer_count_list = []
for seq in seq_list:
kmer_count_list.append([seq.count(kmer) for kmer in all_kmers])
return pd.DataFrame(kmer_count_list, columns=all_kmers)
|
Generate k-mer counts from a set of sequences
Args:
seq_list (iterable): List of DNA sequences (with letters from {A, C, G, T})
k (int): K in k-mer.
Returns:
pandas.DataFrame: Count matrix for seach sequence in seq_list
Example:
>>> kmer_count(["ACGTTAT", "GACGCGA"], 2)
AA AC AG AT CA CC CG CT GA GC GG GT TA TC TG TT
0 0 1 0 1 0 0 1 0 0 0 0 1 1 0 0 1
1 0 1 0 0 0 0 2 0 2 1 0 0 0 0 0 0
|
codesearchnet
|
def load_entity(self, name, file_name, reload_cache=False):
Entity.verify_name(name)
self.entities.load(Entity.wrap_name(name), file_name, reload_cache)
with open(file_name) as f:
self.padaos.add_entity(name, f.read().split('\n'))
self.must_train = True
|
Loads an entity, optionally checking the cache first
Args:
name (str): The associated name of the entity
file_name (str): The location of the entity file
reload_cache (bool): Whether to refresh all of cache
|
codesearchnet
|
def save_plot(self, filename, img_format="eps", **kwargs):
plt = self.get_plot(**kwargs)
plt.savefig(filename, format=img_format)
|
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
|
juraj-google-style
|
def _checksum(cls, line):
tr_table = str.maketrans({c: None for c in (ascii_uppercase + '+ .')})
no_letters = line[:68].translate(tr_table).replace('-', '1')
return (sum([int(l) for l in no_letters]) % 10)
|
Compute the checksum of a full line
Args:
line (str): Line to compute the checksum from
Return:
int: Checksum (modulo 10)
|
codesearchnet
|
def _RunOsLoginControl(self, params):
try:
return subprocess.call(([constants.OSLOGIN_CONTROL_SCRIPT] + params))
except OSError as e:
if (e.errno == errno.ENOENT):
return None
else:
raise
|
Run the OS Login control script.
Args:
params: list, the params to pass to the script
Returns:
int, the return code from the call, or None if the script is not found.
|
codesearchnet
|
def has_enough_gas_reserve(raiden, channels_to_open: int=0) -> Tuple[(bool, int)]:
secure_reserve_estimate = get_reserve_estimate(raiden, channels_to_open)
current_account_balance = raiden.chain.client.balance(raiden.chain.client.address)
return ((secure_reserve_estimate <= current_account_balance), secure_reserve_estimate)
|
Checks if the account has enough balance to handle the lifecycles of all
open channels as well as the to be created channels.
Note: This is just an estimation.
Args:
raiden: A raiden service instance
channels_to_open: The number of new channels that should be opened
Returns:
Tuple of a boolean denoting if the account has enough balance for
the remaining lifecycle events and the estimate for the remaining
lifecycle cost
|
codesearchnet
|
def _supervised_signature_def(method_name, inputs, loss=None, predictions=None, metrics=None):
if inputs is None or not inputs:
raise ValueError('{} inputs cannot be None or empty.'.format(method_name))
signature_inputs = {key: utils.build_tensor_info(tensor) for key, tensor in inputs.items()}
signature_outputs = {}
for output_set in (loss, predictions, metrics):
if output_set is not None:
sig_out = {key: utils.build_tensor_info(tensor) for key, tensor in output_set.items()}
signature_outputs.update(sig_out)
signature_def = signature_def_utils.build_signature_def(signature_inputs, signature_outputs, method_name)
return signature_def
|
Creates a signature for training and eval data.
This function produces signatures that describe the inputs and outputs
of a supervised process, such as training or evaluation, that
results in loss, metrics, and the like. Note that this function only requires
inputs to be not None.
Args:
method_name: Method name of the SignatureDef as a string.
inputs: dict of string to `Tensor`.
loss: dict of string to `Tensor` representing computed loss.
predictions: dict of string to `Tensor` representing the output predictions.
metrics: dict of string to `Tensor` representing metric ops.
Returns:
A train- or eval-flavored signature_def.
Raises:
ValueError: If inputs or outputs is `None`.
|
github-repos
|
def comment_to_ast(self, comment, link_resolver):
assert (comment is not None)
text = comment.description
if (self.remove_xml_tags or (comment.filename in self.gdbus_codegen_sources)):
text = re.sub('<.*?>', '', text)
if self.escape_html:
text = cgi.escape(text)
(ast, diagnostics) = cmark.gtkdoc_to_ast(text, link_resolver)
for diag in diagnostics:
if (comment.filename and (comment.filename not in self.gdbus_codegen_sources)):
column = (diag.column + comment.col_offset)
if (diag.lineno == 0):
column += comment.initial_col_offset
lines = text.split('\n')
line = lines[diag.lineno]
i = 0
while (line[i] == ' '):
i += 1
column += (i - 1)
if ((diag.lineno > 0) and any([(c != ' ') for c in lines[(diag.lineno - 1)]])):
column += 1
lineno = (- 1)
if (comment.lineno != (- 1)):
lineno = (((comment.lineno - 1) + comment.line_offset) + diag.lineno)
warn(diag.code, message=diag.message, filename=comment.filename, lineno=lineno, column=column)
return ast
|
Given a gtk-doc comment string, returns an opaque PyCapsule
containing the document root.
This is an optimization allowing to parse the docstring only
once, and to render it multiple times with
`ast_to_html`, links discovery and
most of the link resolution being lazily done in that second phase.
If you don't care about performance, you should simply
use `translate`.
Args:
text: unicode, the docstring to parse.
link_resolver: hotdoc.core.links.LinkResolver, an object
which will be called to retrieve `hotdoc.core.links.Link`
objects.
Returns:
capsule: A PyCapsule wrapping an opaque C pointer, which
can be passed to `ast_to_html`
afterwards.
diagnostics: A list of diagnostics as output by the gtk-doc cmark
extension
|
codesearchnet
|
def get_course_details(self, course_id):
return self._load_data(self.COURSES_ENDPOINT, resource_id=course_id, many=False)
|
Return the details of a single course by id - not a course run id.
Args:
course_id (str): The unique id for the course in question.
Returns:
dict: Details of the course in question.
|
codesearchnet
|
def get_input_info_dict(self, signature=None):
return self._spec.get_input_info_dict(signature=signature, tags=self._tags)
|
Describes the inputs required by a signature.
Args:
signature: A string with the signature to get inputs information for.
If None, the default signature is used if defined.
Returns:
The result of ModuleSpec.get_input_info_dict() for the given signature,
and the graph variant selected by `tags` when this Module was initialized.
Raises:
KeyError: if there is no such signature.
|
codesearchnet
|
def get_message(self, message_id):
for message in self.messages:
if message.id == message_id:
return message
raise ArgumentError("Message ID not found", message_id=message_id)
|
Get a message by its persistent id.
Args:
message_id (int): The id of the message that we're looking for
|
juraj-google-style
|
def deprecated_argument_lookup(new_name, new_value, old_name, old_value):
if old_value is not None:
if new_value is not None:
raise ValueError(f"Cannot specify both '{old_name}' and '{new_name}'.")
return old_value
return new_value
|
Looks up deprecated argument name and ensures both are not used.
Args:
new_name: new name of argument
new_value: value of new argument (or None if not used)
old_name: old name of argument
old_value: value of old argument (or None if not used)
Returns:
The effective argument that should be used.
Raises:
ValueError: if new_value and old_value are both non-null
|
github-repos
|
def _validator(code_or_name, validator_type):
if validator_type == "error":
from .errors import codes
from .errors import EXT
elif validator_type == "warning":
from .warnings import codes
from .warnings import EXT
else:
pass
def decorator(func):
def wrapper(*args, **kw):
extra = func(*args, **kw)
if extra is None: return []
if isinstance(code_or_name, string_types):
code = EXT
name = codes[code][0] + ":" + code_or_name
else:
code = code_or_name
name = codes[code][0]
text = codes[code][1]
return [(code, name, text, extra)]
wrapper.validator_type = validator_type
return wrapper
return decorator
|
Internal shared implementation to handle both error and warning
validation checks.
Args:
code code_or_name (int or str) : a defined error code or custom message
validator_type (str) : either "error" or "warning"
Returns:
validation decorator
|
juraj-google-style
|
def parse(self, argument):
if (not isinstance(argument, six.string_types)):
raise TypeError('flag value must be a string, found "{}"'.format(type(argument)))
return argument
|
Parses the string argument and returns the native value.
By default it returns its argument unmodified.
Args:
argument: string argument passed in the commandline.
Raises:
ValueError: Raised when it fails to parse the argument.
TypeError: Raised when the argument has the wrong type.
Returns:
The parsed value in native type.
|
codesearchnet
|
def is_commutable(expr1, expr2, eps=1e-08):
return (sum(((x * x.conjugate()).real for x in commutator(expr1, expr2).coeffs())) < eps)
|
Test whether expr1 and expr2 are commutable.
Args:
expr1 (Expr, Term or Pauli operator): Pauli's expression.
expr2 (Expr, Term or Pauli operator): Pauli's expression.
eps (float, optional): Machine epsilon.
If |[expr1, expr2]| < eps, consider it is commutable.
Returns:
bool: if expr1 and expr2 are commutable, returns True, otherwise False.
|
codesearchnet
|
def relocate(source, destination, move=False):
venv = api.VirtualEnvironment(source)
if (not move):
venv.relocate(destination)
return None
venv.move(destination)
return None
|
Adjust the virtual environment settings and optional move it.
Args:
source (str): Path to the existing virtual environment.
destination (str): Desired path of the virtual environment.
move (bool): Whether or not to actually move the files. Default False.
|
codesearchnet
|
def callable_eq(x: Optional[Callable[..., Any]], y: Optional[Callable[..., Any]]) -> bool:
if x is y:
return True
if x is None or y is None:
return False
if inspect.isfunction(x) and inspect.isfunction(y):
return _code_eq(x.__code__, y.__code__)
elif inspect.ismethod(x) and inspect.ismethod(y):
return _code_eq(x.__code__, y.__code__) and x.__self__ is y.__self__
return x == y
|
Returns True if two (maybe) callables are equal.
For functions: `x` and `y` are considered equal when they are the same
instance or have the same code (e.g. lambda x: x).
For methods: `x` and `y` are considered equal when:
static method: The same method from the same class hierarchy. E.g. subclass
inherits a base class' static method.
class method: The same method from the same class. Inherited class method
are considered different class method.
instance method: When `self` is not bound, the same method from the same
class hierarchy (like static method). When `self` is bound, the same
method on the same object.
Args:
x: An optional function or method object.
y: An optinoal function or method object.
Returns:
Returns True if `x` and `y` are considered equal. Meaning that they are
either the same instance or derived from the same code and have the same
effect.
|
github-repos
|
def fastq_verifier(entries, ambiguous=False):
if ambiguous:
regex = '^@.+{0}[ACGTURYKMSWBDHVNX]+{0}\\+.*{0}[!"
else:
regex = '^@.+{0}[ACGTU]+{0}\\+.*{0}[!-~]+{0}$'.format(os.linesep)
delimiter = '{0}'.format(os.linesep)
for entry in entries:
if (len(entry.sequence) != len(entry.quality)):
msg = 'The number of bases in {0} does not match the number of quality scores'.format(entry.id)
raise FormatError(message=msg)
try:
entry_verifier([entry.write()], regex, delimiter)
except FormatError as error:
if (error.part == 0):
msg = 'Unknown Header Error with {0}'.format(entry.id)
raise FormatError(message=msg)
elif ((error.part == 1) and ambiguous):
msg = '{0} contains a base not in [ACGTURYKMSWBDHVNX]'.format(entry.id)
raise FormatError(message=msg)
elif ((error.part == 1) and (not ambiguous)):
msg = '{0} contains a base not in [ACGTU]'.format(entry.id)
raise FormatError(message=msg)
elif (error.part == 2):
msg = 'Unknown error with line 3 of {0}'.format(entry.id)
raise FormatError(message=msg)
elif (error.part == 3):
msg = '{0} contains a quality score not in [!-~]'.format(entry.id)
raise FormatError(message=msg)
else:
msg = '{0}: Unknown Error: Likely a Bug'.format(entry.id)
raise FormatError(message=msg)
|
Raises error if invalid FASTQ format detected
Args:
entries (list): A list of FastqEntry instances
ambiguous (bool): Permit ambiguous bases, i.e. permit non-ACGTU bases
Raises:
FormatError: Error when FASTQ format incorrect with descriptive message
Example:
>>> from bio_utils.iterators import fastq_iter
>>> import os
>>> entries = r'@entry1{0}AAGGATTCG{0}+{0}112234432{0}' \
... r'@entry{0}AGGTCCCCCG{0}+{0}4229888884{0}' \
... r'@entry3{0}GCCTAGC{0}9ddsa5n'.format(os.linesep)
>>> fastq_entries = fastq_iter(iter(entries.split(os.linesep)))
>>> fastq_verifier(fastq_entries)
|
codesearchnet
|
def _should_pack(arg):
return isinstance(arg, list)
|
Determines whether the caller needs to pack the argument in a tuple.
If user-defined function returns a list of tensors, `nest.flatten()` and
`ops.convert_to_tensor()` and would conspire to attempt to stack those tensors
into a single tensor because the tf.data version of `nest.flatten()` does
not recurse into lists. Since it is more likely that the list arose from
returning the result of an operation (such as `tf.numpy_function()`) that
returns a list of not-necessarily-stackable tensors, we treat the returned
value as a `tuple` instead. A user wishing to pack the return value into a
single tensor can use an explicit `tf.stack()` before returning.
Args:
arg: argument to check
Returns:
Indication of whether the caller needs to pack the argument in a tuple.
|
github-repos
|
def ws050(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `ws050`'.format(value))
self._ws050 = value
|
Corresponds to IDD Field `ws050`
Wind speed corresponding 5.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `ws050`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def from_json_file(cls, filename):
with open(filename, 'r') as fp:
return cls(json.load(fp))
|
Load a lexicon from a JSON file.
Args:
filename (str): The path to a JSON dump.
|
juraj-google-style
|
def update_qos_aggregated_configuration(self, qos_configuration, timeout=(- 1)):
uri = '{}{}'.format(self.data['uri'], self.QOS_AGGREGATED_CONFIGURATION)
return self._helper.update(qos_configuration, uri=uri, timeout=timeout)
|
Updates the QoS aggregated configuration for the logical interconnect.
Args:
qos_configuration:
QOS configuration.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
dict: Logical Interconnect.
|
codesearchnet
|
def stepBy(self, steps):
self.setValue(self.value() + steps*self.singleStep())
|
steps value up/down by a single step. Single step is defined in singleStep().
Args:
steps (int): positiv int steps up, negativ steps down
|
juraj-google-style
|
def _BuildFindSpecsFromArtifact(self, definition, environment_variables):
find_specs = []
for source in definition.sources:
if source.type_indicator == artifact_types.TYPE_INDICATOR_FILE:
for path_entry in set(source.paths):
specifications = self._BuildFindSpecsFromFileSourcePath(
path_entry, source.separator, environment_variables,
self._knowledge_base.user_accounts)
find_specs.extend(specifications)
self.file_system_artifact_names.add(definition.name)
elif (source.type_indicator ==
artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):
for key_path in set(source.keys):
if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path):
specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)
find_specs.extend(specifications)
self.registry_artifact_names.add(definition.name)
elif (source.type_indicator ==
artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):
key_paths = {
key_value['key'] for key_value in source.key_value_pairs}
key_paths_string = ', '.join(key_paths)
logger.warning((
'Windows Registry values are not supported, extracting keys: '
'"{0!s}"').format(key_paths_string))
for key_path in key_paths:
if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path):
specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)
find_specs.extend(specifications)
self.registry_artifact_names.add(definition.name)
elif (source.type_indicator ==
artifact_types.TYPE_INDICATOR_ARTIFACT_GROUP):
for name in source.names:
specifications = self._BuildFindSpecsFromGroupName(
name, environment_variables)
find_specs.extend(specifications)
else:
logger.warning(
'Unsupported artifact definition source type: "{0:s}"'.format(
source.type_indicator))
return find_specs
|
Builds find specifications from an artifact definition.
Args:
definition (artifacts.ArtifactDefinition): artifact definition.
environment_variables (list[EnvironmentVariableArtifact]):
environment variables.
Returns:
list[dfvfs.FindSpec|dfwinreg.FindSpec]: dfVFS or dfWinReg find
specifications.
|
juraj-google-style
|
def _mouseDown(x, y, button):
if button == 'left':
try:
_sendMouseEvent(MOUSEEVENTF_LEFTDOWN, x, y)
except (PermissionError, OSError):
pass
elif button == 'middle':
try:
_sendMouseEvent(MOUSEEVENTF_MIDDLEDOWN, x, y)
except (PermissionError, OSError):
pass
elif button == 'right':
try:
_sendMouseEvent(MOUSEEVENTF_RIGHTDOWN, x, y)
except (PermissionError, OSError):
pass
else:
assert False, "button argument not in ('left', 'middle', 'right')"
|
Send the mouse down event to Windows by calling the mouse_event() win32
function.
Args:
x (int): The x position of the mouse event.
y (int): The y position of the mouse event.
button (str): The mouse button, either 'left', 'middle', or 'right'
Returns:
None
|
juraj-google-style
|
def _path_formatter(self, suffix):
if suffix.lower() == "mirror":
path_items = [self.bucket, self.s3path]
else:
path_items = [self.bucket, self.s3path, suffix]
path = '/'.join(path_items)
s3_format = "s3:
formatted_path = path.replace('
full_path = s3_format.format(formatted_path)
return full_path
|
Format the s3 path properly.
Args:
suffix (str): suffix to add on to an s3 path
Returns:
str: formatted path
|
juraj-google-style
|
def date_to_epoch(year, month, day):
return int(date_to_delorean(year, month, day).epoch)
|
Converts a date to epoch in UTC
Args:
year: int between 1 and 9999.
month: int between 1 and 12.
day: int between 1 and 31.
Returns:
Int epoch in UTC from date.
|
juraj-google-style
|
def Parse(self, rdf_data):
if self._filter:
return list(self._filter.Parse(rdf_data, self.expression))
return rdf_data
|
Process rdf data through the filter.
Filters sift data according to filter rules. Data that passes the filter
rule is kept, other data is dropped.
If no filter method is provided, the data is returned as a list.
Otherwise, a items that meet filter conditions are returned in a list.
Args:
rdf_data: Host data that has already been processed by a Parser into RDF.
Returns:
A list containing data items that matched the filter rules.
|
codesearchnet
|
def pymmh3_hash128_x64(key: Union[bytes, bytearray], seed: int) -> int:
def fmix(k):
k ^= k >> 33
k = (k * 0xff51afd7ed558ccd) & 0xFFFFFFFFFFFFFFFF
k ^= k >> 33
k = (k * 0xc4ceb9fe1a85ec53) & 0xFFFFFFFFFFFFFFFF
k ^= k >> 33
return k
length = len(key)
nblocks = int(length / 16)
h1 = seed
h2 = seed
c1 = 0x87c37b91114253d5
c2 = 0x4cf5ad432745937f
for block_start in range(0, nblocks * 8, 8):
k1 = (
key[2 * block_start + 7] << 56 |
key[2 * block_start + 6] << 48 |
key[2 * block_start + 5] << 40 |
key[2 * block_start + 4] << 32 |
key[2 * block_start + 3] << 24 |
key[2 * block_start + 2] << 16 |
key[2 * block_start + 1] << 8 |
key[2 * block_start + 0]
)
k2 = (
key[2 * block_start + 15] << 56 |
key[2 * block_start + 14] << 48 |
key[2 * block_start + 13] << 40 |
key[2 * block_start + 12] << 32 |
key[2 * block_start + 11] << 24 |
key[2 * block_start + 10] << 16 |
key[2 * block_start + 9] << 8 |
key[2 * block_start + 8]
)
k1 = (c1 * k1) & 0xFFFFFFFFFFFFFFFF
k1 = (k1 << 31 | k1 >> 33) & 0xFFFFFFFFFFFFFFFF
k1 = (c2 * k1) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
h1 = (h1 << 27 | h1 >> 37) & 0xFFFFFFFFFFFFFFFF
h1 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF
h1 = (h1 * 5 + 0x52dce729) & 0xFFFFFFFFFFFFFFFF
k2 = (c2 * k2) & 0xFFFFFFFFFFFFFFFF
k2 = (k2 << 33 | k2 >> 31) & 0xFFFFFFFFFFFFFFFF
k2 = (c1 * k2) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
h2 = (h2 << 31 | h2 >> 33) & 0xFFFFFFFFFFFFFFFF
h2 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF
h2 = (h2 * 5 + 0x38495ab5) & 0xFFFFFFFFFFFFFFFF
tail_index = nblocks * 16
k1 = 0
k2 = 0
tail_size = length & 15
if tail_size >= 15:
k2 ^= key[tail_index + 14] << 48
if tail_size >= 14:
k2 ^= key[tail_index + 13] << 40
if tail_size >= 13:
k2 ^= key[tail_index + 12] << 32
if tail_size >= 12:
k2 ^= key[tail_index + 11] << 24
if tail_size >= 11:
k2 ^= key[tail_index + 10] << 16
if tail_size >= 10:
k2 ^= key[tail_index + 9] << 8
if tail_size >= 9:
k2 ^= key[tail_index + 8]
if tail_size > 8:
k2 = (k2 * c2) & 0xFFFFFFFFFFFFFFFF
k2 = (k2 << 33 | k2 >> 31) & 0xFFFFFFFFFFFFFFFF
k2 = (k2 * c1) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
if tail_size >= 8:
k1 ^= key[tail_index + 7] << 56
if tail_size >= 7:
k1 ^= key[tail_index + 6] << 48
if tail_size >= 6:
k1 ^= key[tail_index + 5] << 40
if tail_size >= 5:
k1 ^= key[tail_index + 4] << 32
if tail_size >= 4:
k1 ^= key[tail_index + 3] << 24
if tail_size >= 3:
k1 ^= key[tail_index + 2] << 16
if tail_size >= 2:
k1 ^= key[tail_index + 1] << 8
if tail_size >= 1:
k1 ^= key[tail_index + 0]
if tail_size > 0:
k1 = (k1 * c1) & 0xFFFFFFFFFFFFFFFF
k1 = (k1 << 31 | k1 >> 33) & 0xFFFFFFFFFFFFFFFF
k1 = (k1 * c2) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
h1 ^= length
h2 ^= length
h1 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF
h2 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF
h1 = fmix(h1)
h2 = fmix(h2)
h1 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF
h2 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF
return h2 << 64 | h1
|
Implements 128-bit murmur3 hash for x64, as per ``pymmh3``, with some
bugfixes.
Args:
key: data to hash
seed: seed
Returns:
integer hash
|
juraj-google-style
|
def GetFile(self, map_name, dst_file, current_file, location=None):
if map_name == config.MAP_PASSWORD:
return self.GetPasswdFile(dst_file, current_file)
elif map_name == config.MAP_GROUP:
return self.GetGroupFile(dst_file, current_file)
elif map_name == config.MAP_SHADOW:
return self.GetShadowFile(dst_file, current_file)
elif map_name == config.MAP_NETGROUP:
return self.GetNetgroupFile(dst_file, current_file)
elif map_name == config.MAP_AUTOMOUNT:
return self.GetAutomountFile(dst_file, current_file, location=location)
raise error.UnsupportedMap('Source can not fetch %s' % map_name)
|
Retrieve a file from this source.
Args:
map_name: A string representation of the map whose file you want
dst_file: Temporary filename to write to.
current_file: Path to the current cache.
location: optional field used by automounts to indicate a specific map
Returns:
path to new file
Raises:
UnsupportedMap: for unknown source maps
|
github-repos
|
def execute_edit(args, root_dir=None):
EDITOR = os.environ.get('EDITOR', 'vim')
key = args['key']
status = command_factory('status')({}, root_dir=root_dir)
if ((not isinstance(status['data'], str)) and (key in status['data'])):
if (status['data'][key]['status'] in ['queued', 'stashed']):
command = status['data'][key]['command']
else:
print("Entry is not 'queued' or 'stashed'")
sys.exit(1)
else:
print('No entry with this key')
sys.exit(1)
with tempfile.NamedTemporaryFile(suffix='.tmp') as tf:
tf.write(command.encode('utf-8'))
tf.flush()
call([EDITOR, tf.name])
tf.seek(0)
edited_command = tf.read().decode('utf-8')
print_command_factory('edit')({'key': key, 'command': edited_command}, root_dir=root_dir)
|
Edit a existing queue command in the daemon.
Args:
args['key'] int: The key of the queue entry to be edited
root_dir (string): The path to the root directory the daemon is running in.
|
codesearchnet
|
def add_outbound_connection(self, uri):
LOGGER.debug('Adding connection to %s', uri)
conn = OutboundConnection(connections=self._connections, endpoint=uri, dispatcher=self._dispatcher, zmq_identity=self._zmq_identity, secured=self._secured, server_public_key=self._server_public_key, server_private_key=self._server_private_key, future_callback_threadpool=self._future_callback_threadpool, heartbeat=True, connection_timeout=self._connection_timeout)
self.outbound_connections[uri] = conn
conn.start()
self._add_connection(conn, uri)
connect_message = ConnectionRequest(endpoint=self._public_endpoint)
conn.send(validator_pb2.Message.NETWORK_CONNECT, connect_message.SerializeToString(), callback=partial(self._connect_callback, connection=conn))
return conn
|
Adds an outbound connection to the network.
Args:
uri (str): The zmq-style (e.g. tcp://hostname:port) uri
to attempt to connect to.
|
codesearchnet
|
def component_mget(self, zip_data, components):
if not isinstance(components, list):
print("Components param must be a list")
return
query_params = {"components": ",".join(components)}
return self.fetch_identifier_component(
"zip/component_mget", zip_data, query_params)
|
Call the zip component_mget endpoint
Args:
- zip_data - As described in the class docstring.
- components - A list of strings for each component to include in the request.
Example: ["zip/details", "zip/volatility"]
|
juraj-google-style
|
def _run_async(self, urls):
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
|
Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
|
juraj-google-style
|
def log_softmax(logits, axis=None, name=None, dim=None):
axis = deprecation.deprecated_argument_lookup('axis', axis, 'dim', dim)
if axis is None:
axis = -1
return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)
|
Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
|
github-repos
|
def write(self, message, cur_time=None):
if cur_time is None:
cur_time = time.time()
lines = self._line_buffer.add_string(message)
for line in lines:
timestamp = ''
if self._prepend_timestamp:
timestamp = datetime.datetime.utcfromtimestamp(
cur_time).isoformat() + ' '
line = u'{}{}{}'.format(self._line_prepend, timestamp, line)
self._fsapi.push(self._filename, line)
|
Write some text to the pusher.
Args:
message: a string to push for this file.
cur_time: used for unit testing. override line timestamp.
|
juraj-google-style
|
def unzip(self, overwrite: bool = False):
if self.zip_content and not overwrite:
raise FileExistsError(str(self.temp_dir))
LOGGER.debug('unzipping miz to temp dir')
try:
with ZipFile(str(self.miz_path)) as zip_file:
LOGGER.debug('reading infolist')
self.zip_content = [f.filename for f in zip_file.infolist()]
self._extract_files_from_zip(zip_file)
except BadZipFile:
raise BadZipFile(str(self.miz_path))
except:
LOGGER.exception('error while unzipping miz file: %s', self.miz_path)
raise
LOGGER.debug('checking miz content')
for miz_item in ['mission', 'options', 'warehouses', 'l10n/DEFAULT/dictionary', 'l10n/DEFAULT/mapResource']:
if not Path(self.temp_dir.joinpath(miz_item)).exists():
LOGGER.error('missing file in miz: %s', miz_item)
raise FileNotFoundError(miz_item)
self._check_extracted_content()
LOGGER.debug('all files have been found, miz successfully unzipped')
|
Flattens a MIZ file into the temp dir
Args:
overwrite: allow overwriting exiting files
|
juraj-google-style
|
def get_full_psd_matrix(self):
if (self.matrix_m is not None):
return (self.matrix_h, self.matrix_m)
h_columns = []
for i in range((self.nn_params.num_hidden_layers + 1)):
current_col_elems = []
for j in range(i):
current_col_elems.append(tf.zeros([self.nn_params.sizes[j], self.nn_params.sizes[i]]))
if (i == 0):
current_col_elems.append(utils.diag(self.lambda_lu[i]))
else:
current_col_elems.append(utils.diag((self.lambda_lu[i] + self.lambda_quad[i])))
if (i < self.nn_params.num_hidden_layers):
current_col_elems.append(tf.matmul(utils.diag(((- 1) * self.lambda_quad[(i + 1)])), self.nn_params.weights[i]))
for j in range((i + 2), (self.nn_params.num_hidden_layers + 1)):
current_col_elems.append(tf.zeros([self.nn_params.sizes[j], self.nn_params.sizes[i]]))
current_column = tf.concat(current_col_elems, 0)
h_columns.append(current_column)
self.matrix_h = tf.concat(h_columns, 1)
self.matrix_h = (self.matrix_h + tf.transpose(self.matrix_h))
self.matrix_m = tf.concat([tf.concat([tf.reshape(self.nu, (1, 1)), tf.transpose(self.vector_g)], axis=1), tf.concat([self.vector_g, self.matrix_h], axis=1)], axis=0)
return (self.matrix_h, self.matrix_m)
|
Function that returns the tf graph corresponding to the entire matrix M.
Returns:
matrix_h: unrolled version of tf matrix corresponding to H
matrix_m: unrolled tf matrix corresponding to M
|
codesearchnet
|
async def get(self, uid: int, cached_msg: CachedMessage=None, requirement: FetchRequirement=FetchRequirement.METADATA) -> Optional[MessageT]:
...
|
Return the message with the given UID.
Args:
uid: The message UID.
cached_msg: The last known cached message.
requirement: The data required from each message.
Raises:
IndexError: The UID is not valid in the mailbox.
|
codesearchnet
|
def monitor(service_addr, duration_ms, level=1):
return _pywrap_profiler_plugin.monitor(_strip_prefix(service_addr, _GRPC_PREFIX), duration_ms, level, True)
|
Sends grpc requests to profiler server to perform on-demand monitoring.
The monitoring result is a light weight performance summary of your model
execution. This method will block the caller thread until it receives the
monitoring result. This method currently supports Cloud TPU only.
Args:
service_addr: gRPC address of profiler service e.g. grpc://10.0.0.2:8466.
duration_ms: Duration of monitoring in ms.
level: Choose a monitoring level between 1 and 2 to monitor your job. Level
2 is more verbose than level 1 and shows more metrics.
Returns:
A string of monitoring output.
Example usage:
```python
# Continuously send gRPC requests to the Cloud TPU to monitor the model
# execution.
for query in range(0, 100):
print(
tf.profiler.experimental.client.monitor('grpc://10.0.0.2:8466', 1000))
```
|
github-repos
|
def upload_benchmark_files(opts):
client = datastore.Client()
for fname in list_files_by_mtime(opts.datadir):
fpath = os.path.join(opts.datadir, fname)
try:
with open(fpath, 'r') as fd:
if trylock(fd):
upload_benchmark_data(client, fd.read())
shutil.move(fpath, os.path.join(opts.archivedir, fname))
except Exception as e:
print("Cannot process '%s', skipping. Error: %s" % (fpath, e))
|
Find benchmark files, process them, and upload their data to the datastore.
Locate benchmark files in the data directory, process them, and upload their
data to the datastore. After processing each file, move it to the archive
directory for safe-keeping. Each file is locked for processing, which allows
multiple uploader instances to run concurrently if needed, each one handling
different benchmark files, skipping those already locked by another.
Args:
opts: command line options object
Note: To use locking, the file is first opened, then its descriptor is used to
lock and read it. The lock is released when the file is closed. Do not open
that same file a 2nd time while the lock is already held, because when that
2nd file descriptor is closed, the lock will be released prematurely.
|
github-repos
|
def prepare_context(pipeline, context_in_string, context):
logger.debug('starting')
parsed_context = get_parsed_context(pipeline=pipeline, context_in_string=context_in_string)
context.update(parsed_context)
logger.debug('done')
|
Prepare context for pipeline run.
Args:
pipeline: dict. Dictionary representing the pipeline.
context_in_string: string. Argument string used to initialize context.
context: pypyr.context.Context. Merge any new context generated from
context_in_string into this context instance.
Returns:
None. The context instance to use for the pipeline run is contained
in the context arg, it's not passed back as a function return.
|
codesearchnet
|
def AddEvent(self, event):
self._RaiseIfNotWritable()
event_data_identifier = event.GetEventDataIdentifier()
if event_data_identifier:
if not isinstance(event_data_identifier, identifiers.FakeIdentifier):
raise IOError('Unsupported event data identifier type: {0:s}'.format(
type(event_data_identifier)))
event = self._PrepareAttributeContainer(event)
self._events.append(event)
self.number_of_events += 1
|
Adds an event.
Args:
event (EventObject): event.
Raises:
IOError: when the storage writer is closed or
if the event data identifier type is not supported.
OSError: when the storage writer is closed or
if the event data identifier type is not supported.
|
juraj-google-style
|
def from_int(i):
point = ECPointAffine.from_int(bitcoin_curve, i)
return PublicKey.from_point(point)
|
Generates a public key object from an integer.
Note:
This assumes that the upper 32 bytes of the integer
are the x component of the public key point and the
lower 32 bytes are the y component.
Args:
i (Bignum): A 512-bit integer representing the public
key point on the secp256k1 curve.
Returns:
PublicKey: A PublicKey object.
|
codesearchnet
|
def _get_device_dict_and_cores(devices):
device_map = collections.defaultdict(list)
num_cores = 0
for device in devices:
match = _TPU_DEVICE_REGEX.match(device.name)
if match:
host_id = match.group('host_id')
core_id = match.group('core_id')
device_map[host_id].append(core_id)
num_cores += 1
return DeviceDetails(device_map, num_cores)
|
Returns a dict of hosts to cores and total cores given devices names.
Returns a namedtuple with two attributes:
device_map: A map of host_ids to a list of core_ids.
total_cores: The total number of cores within the TPU system.
Args:
devices: A list of devices returned by session.list_devices()
|
github-repos
|
def get_markdown_files(self, dir_):
md_files = OrderedSet()
for (root, _, files) in os.walk(dir_):
for name in files:
split = os.path.splitext(name)
if (len(split) == 1):
continue
if (split[1] in ('.markdown', '.md', '.yaml')):
md_files.add(os.path.join(root, name))
return md_files
|
Get all the markdown files in a folder, recursively
Args:
dir_: str, a toplevel folder to walk.
|
codesearchnet
|
def annotate(self, framedata):
for artist in self.annotation_artists:
artist.remove()
self.annotation_artists = []
for annotation in self.annotations:
if (annotation[2] > framedata):
return
if (annotation[2] == framedata):
pos = annotation[0:2]
shape = self.annotations_default['shape']
color = self.annotations_default['color']
size = self.annotations_default['size']
line = self.annotations_default['line']
if (len(annotation) > 3):
shape = annotation[3].get('shape', shape)
color = annotation[3].get('color', color)
size = annotation[3].get('size', size)
line = annotation[3].get('line', line)
if ((shape == 'CIRC') and hasattr(size, '__len__')):
size = 30
if (not hasattr(color, '__len__')):
color = ((color,) * 3)
if (shape == 'RECT'):
patch = patches.Rectangle(((pos[0] - (size[0]
elif (shape == 'CIRC'):
patch = patches.CirclePolygon(pos, radius=size, fc='none', ec=color, lw=line)
self.annotation_artists.append(patch)
self.axes_processed.add_artist(self.annotation_artists[(- 1)])
|
Annotates the processed axis with given annotations for
the provided framedata.
Args:
framedata: The current frame number.
|
codesearchnet
|
def outgoing_edges(self, node):
edges = self.edges()
out_edges = []
for out_node, in_node in edges:
if node is out_node:
out_edges.append((out_node, in_node))
return tuple(out_edges)
|
Returns a ``tuple`` of outgoing edges for a **node object**.
Arguments:
- node(``object``) **node object** present in the graph to be queried
for outgoing edges.
|
juraj-google-style
|
def is_artifact_optional(chain, task_id, path):
upstream_artifacts = chain.task['payload'].get('upstreamArtifacts', [])
optional_artifacts_per_task_id = get_optional_artifacts_per_task_id(upstream_artifacts)
return (path in optional_artifacts_per_task_id.get(task_id, []))
|
Tells whether an artifact is flagged as optional or not.
Args:
chain (ChainOfTrust): the chain of trust object
task_id (str): the id of the aforementioned task
Returns:
bool: True if artifact is optional
|
codesearchnet
|
def remove(self, force=False):
return self.client.api.remove_volume(self.id, force=force)
|
Remove this volume.
Args:
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove.
|
codesearchnet
|
def cv_squared(x):
epsilon = 1e-10
float_size = tf.to_float(tf.size(x)) + epsilon
mean = tf.reduce_sum(x) / float_size
variance = tf.reduce_sum(tf.squared_difference(x, mean)) / float_size
return variance / (tf.square(mean) + epsilon)
|
The squared coefficient of variation of a sample.
Useful as a loss to encourage a positive distribution to be more uniform.
Epsilons added for numerical stability.
Returns 0 for an empty Tensor.
Args:
x: a `Tensor`.
Returns:
a `Scalar`.
|
juraj-google-style
|
def create_transformation(self, rotation=None, translation=None):
mat = None
if rotation is not None:
mat = Matrix44.from_eulers(Vector3(rotation))
if translation is not None:
trans = matrix44.create_from_translation(Vector3(translation))
if mat is None:
mat = trans
else:
mat = matrix44.multiply(mat, trans)
return mat
|
Creates a transformation matrix woth rotations and translation.
Args:
rotation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`
translation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`
Returns:
A 4x4 matrix as a :py:class:`numpy.array`
|
juraj-google-style
|
def zero(duration: int, name: str = None) -> SamplePulse:
return _sampled_zero_pulse(duration, name=name)
|
Generates zero-sampled `SamplePulse`.
Args:
duration: Duration of pulse. Must be greater than zero.
name: Name of pulse.
|
juraj-google-style
|
def _validate_bn_layer(self, layer):
if (not isinstance(layer, tf.keras.layers.BatchNormalization) and
not isinstance(layer, tf.compat.v1.layers.BatchNormalization)):
raise ValueError(
"batchnorm_layer must be an instance of BatchNormalization layer.")
if layer.renorm:
raise ValueError("BatchNorm Bijector does not support renormalization.")
if layer.virtual_batch_size:
raise ValueError(
"BatchNorm Bijector does not support virtual batch sizes.")
|
Check for valid BatchNormalization layer.
Args:
layer: Instance of `tf.layers.BatchNormalization`.
Raises:
ValueError: If batchnorm_layer argument is not an instance of
`tf.layers.BatchNormalization`, or if `batchnorm_layer.renorm=True` or
if `batchnorm_layer.virtual_batch_size` is specified.
|
juraj-google-style
|
def quarter_ellipsis_functions(xx, yy):
npxx = np.array(xx)
npyy = np.array(yy)
if np.any(npxx == npyy):
raise RuntimeError('Invalid points for quarter_ellipsis_functions')
if np.all(npxx < npyy) or np.all(npxx > npyy):
if npxx[0] < npyy[0]:
p1 = npxx
p2 = npyy
else:
p1 = npyy
p2 = npxx
c_lower = np.array([p1[0], p2[1]])
c_upper = np.array([p2[0], p1[1]])
b2 = (p2[1] - p1[1]) ** 2
else:
if npxx[0] < npyy[0]:
p1 = npxx
p2 = npyy
else:
p1 = npyy
p2 = npxx
c_lower = np.array([p2[0], p1[1]])
c_upper = np.array([p1[0], p2[1]])
b2 = (p1[1] - p2[1]) ** 2
b2overa2 = b2 / (p2[0] - p1[0]) ** 2
def lower(x):
return c_lower[1] - np.sqrt(b2 - b2overa2 * (x - c_lower[0]) ** 2)
def upper(x):
return c_upper[1] + np.sqrt(b2 - b2overa2 * (x - c_upper[0]) ** 2)
return {'lower': lower, 'upper': upper}
|
Method that creates two quarter-ellipse functions based on points xx and yy. The ellipsis is supposed to
be aligned with the axes. The two ellipsis pass through the two points xx and yy.
Args:
xx:
First point
yy:
Second point
Returns:
A dictionary with the lower and upper quarter ellipsis functions.
|
juraj-google-style
|
def get_registration_id_info(self, registration_id):
response = self.registration_info_request(registration_id)
if response.status_code == 200:
return response.json()
return None
|
Returns details related to a registration id if it exists otherwise return None
Args:
registration_id: id to be checked
Returns:
dict: info about registration id
None: if id doesn't exist
|
juraj-google-style
|
def deploy(target):
if (not os.getenv(CIRCLECI_ENV_VAR)):
raise EnvironmentError('Must be on CircleCI to run this script')
current_branch = os.getenv('CIRCLE_BRANCH')
if ((target == 'PROD') and (current_branch != 'master')):
raise EnvironmentError(f'Refusing to deploy to production from branch {current_branch!r}. Production deploys can only be made from master.')
if (target in ('PROD', 'TEST')):
pypi_username = os.getenv(f'{target}_PYPI_USERNAME')
pypi_password = os.getenv(f'{target}_PYPI_PASSWORD')
else:
raise ValueError(f"Deploy target must be 'PROD' or 'TEST', got {target!r}.")
if (not (pypi_username and pypi_password)):
raise EnvironmentError(f"Missing '{target}_PYPI_USERNAME' and/or '{target}_PYPI_PASSWORD' environment variables. These are required to push to PyPI.")
os.environ['TWINE_USERNAME'] = pypi_username
os.environ['TWINE_PASSWORD'] = pypi_password
_shell('git config --global user.email "dev@cloverhealth.com"')
_shell('git config --global user.name "Circle CI"')
_shell('git config push.default current')
ret = _shell('make version', stdout=subprocess.PIPE)
version = ret.stdout.decode('utf-8').strip()
print(f'Deploying version {version!r}...')
_shell(f'git tag -f -a {version} -m "Version {version}"')
_shell(f'sed -i.bak "s/^__version__ = .*/__version__ = {version!r}/" */version.py')
_shell('python setup.py sdist bdist_wheel')
_shell('git add ChangeLog AUTHORS */version.py')
_shell('git commit --no-verify -m "Merge autogenerated files [skip ci]"')
_pypi_push('dist')
_shell('git push --follow-tags')
print(f'Deployment complete. Latest version is {version}.')
|
Deploys the package and documentation.
Proceeds in the following steps:
1. Ensures proper environment variables are set and checks that we are on Circle CI
2. Tags the repository with the new version
3. Creates a standard distribution and a wheel
4. Updates version.py to have the proper version
5. Commits the ChangeLog, AUTHORS, and version.py file
6. Pushes to PyPI
7. Pushes the tags and newly committed files
Raises:
`EnvironmentError`:
- Not running on CircleCI
- `*_PYPI_USERNAME` and/or `*_PYPI_PASSWORD` environment variables
are missing
- Attempting to deploy to production from a branch that isn't master
|
codesearchnet
|
def recalculate_concepts(self, concepts, lang=None):
if (len(concepts) == 0):
return
if (lang is None):
items = Concept.objects.get_concept_item_mapping(concepts=Concept.objects.filter(pk__in=set(flatten(concepts.values()))))
else:
items = Concept.objects.get_concept_item_mapping(lang=lang)
environment = get_environment()
mastery_threshold = get_mastery_trashold()
for (user, concepts) in concepts.items():
all_items = list(set(flatten([items[c] for c in concepts])))
answer_counts = environment.number_of_answers_more_items(all_items, user)
correct_answer_counts = environment.number_of_correct_answers_more_items(all_items, user)
predictions = dict(list(zip(all_items, get_predictive_model().predict_more_items(environment, user, all_items, time=get_time_for_knowledge_overview()))))
new_user_stats = []
stats_to_delete_condition = Q()
for concept in concepts:
answer_aggregates = Answer.objects.filter(user=user, item__in=items[concept]).aggregate(time_spent=Sum('response_time'), sessions=Count('session', True), time_first=Min('time'), time_last=Max('time'))
stats = {'answer_count': sum((answer_counts[i] for i in items[concept])), 'correct_answer_count': sum((correct_answer_counts[i] for i in items[concept])), 'item_count': len(items[concept]), 'practiced_items_count': sum([(answer_counts[i] > 0) for i in items[concept]]), 'mastered_items_count': sum([(predictions[i] >= mastery_threshold) for i in items[concept]]), 'prediction': (sum([predictions[i] for i in items[concept]]) / len(items[concept])), 'time_spent': (answer_aggregates['time_spent'] / 1000), 'session_count': answer_aggregates['sessions'], 'time_first': answer_aggregates['time_first'].timestamp(), 'time_last': answer_aggregates['time_last'].timestamp()}
stats_to_delete_condition |= Q(user=user, concept=concept)
for (stat_name, value) in stats.items():
new_user_stats.append(UserStat(user_id=user, concept_id=concept, stat=stat_name, value=value))
self.filter(stats_to_delete_condition).delete()
self.bulk_create(new_user_stats)
|
Recalculated given concepts for given users
Args:
concepts (dict): user id (int -> set of concepts to recalculate)
lang(Optional[str]): language used to get items in all concepts (cached).
Defaults to None, in that case are get items only in used concepts
|
codesearchnet
|
def filter(self, nodes):
filtered_dag = DAG()
for node in nodes:
filtered_dag.add_node_if_not_exists(node)
for edge in self.all_downstreams(node):
filtered_dag.add_node_if_not_exists(edge)
for (node, edges) in self.graph.items():
if (node in filtered_dag.graph):
filtered_dag.graph[node] = edges
return filtered_dag
|
Returns a new DAG with only the given nodes and their
dependencies.
Args:
nodes (list): The nodes you are interested in.
Returns:
:class:`stacker.dag.DAG`: The filtered graph.
|
codesearchnet
|
def configure_sbi(self, sbi_config: dict, schema_path: str = None):
if not self.active:
raise RuntimeError("Unable to add SBIs to inactive subarray!")
sbi_config['subarray_id'] = self._id
sbi = SchedulingBlockInstance.from_config(sbi_config, schema_path)
self._add_sbi_id(sbi_config['id'])
return sbi
|
Add a new SBI to the database associated with this subarray.
Args:
sbi_config (dict): SBI configuration.
schema_path (str, optional): Path to the SBI config schema.
|
juraj-google-style
|
def load(self, file_name):
new_rundata = self.loader(file_name)
new_rundata = self.inspect(new_rundata)
return new_rundata
|
Load a raw data-file
Args:
file_name (path)
Returns:
loaded test
|
juraj-google-style
|
def delete(filething):
t = OggTheora(filething)
filething.fileobj.seek(0)
t.delete(filething)
|
delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file.
|
juraj-google-style
|
def Validate(self, value):
if value is None:
return None
if not isinstance(value, self.rdfclass):
try:
r = self.rdfclass()
r.FromDict(value)
return r
except (AttributeError, TypeError, rdfvalue.InitializeError):
raise TypeValueError("Value for arg %s should be an %s" %
(self.name, self.rdfclass.__name__))
return value
|
Validate the value.
Args:
value: Value is expected to be a dict-like object that a given RDFStruct
can be initialized from.
Raises:
TypeValueError: If the value is not a valid dict-like object that a given
RDFStruct can be initialized from.
Returns:
A valid instance of self.rdfclass or None.
|
juraj-google-style
|
def get_value(data, key):
ref = data
try:
for subkey in key.split('.'):
if isinstance(ref, dict):
ref = ref[subkey]
else:
print('CRITICAL: Cannot use subkey %s on non-dictionary element' % subkey)
return None
return ref
except KeyError:
return None
|
Follow the dot notation to get the proper field, then perform the action
Args:
data: the data as a dictionary (required to be a dictionary)
key: the key (as dot notation) into the data that gives the field (IP.src)
Returns:
the value of the field(subfield) if it exist, otherwise None
|
juraj-google-style
|
def get(self, key, default=None):
return self._fetch_cmd(b'get', [key], False).get(key, default)
|
The memcached "get" command, but only for one key, as a convenience.
Args:
key: str, see class docs for details.
default: value that will be returned if the key was not found.
Returns:
The value for the key, or default if the key wasn't found.
|
codesearchnet
|
def structure_2_lmpdata(structure, ff_elements=None, atom_style='charge'):
s = structure.get_sorted_structure()
(a, b, c) = s.lattice.abc
m = s.lattice.matrix
xhi = a
xy = np.dot(m[1], (m[0] / xhi))
yhi = np.sqrt(((b ** 2) - (xy ** 2)))
xz = np.dot(m[2], (m[0] / xhi))
yz = ((np.dot(m[1], m[2]) - (xy * xz)) / yhi)
zhi = np.sqrt((((c ** 2) - (xz ** 2)) - (yz ** 2)))
box_bounds = [[0.0, xhi], [0.0, yhi], [0.0, zhi]]
box_tilt = [xy, xz, yz]
box_tilt = (None if (not any(box_tilt)) else box_tilt)
box = LammpsBox(box_bounds, box_tilt)
new_latt = Lattice([[xhi, 0, 0], [xy, yhi, 0], [xz, yz, zhi]])
s.lattice = new_latt
symbols = list(s.symbol_set)
if ff_elements:
symbols.extend(ff_elements)
elements = sorted((Element(el) for el in set(symbols)))
mass_info = [tuple(([i.symbol] * 2)) for i in elements]
ff = ForceField(mass_info)
topo = Topology(s)
return LammpsData.from_ff_and_topologies(box=box, ff=ff, topologies=[topo], atom_style=atom_style)
|
Converts a structure to a LammpsData object with no force field
parameters and topologies.
Args:
structure (Structure): Input structure.
ff_elements ([str]): List of strings of elements that must be
present due to force field settings but not necessarily in
the structure. Default to None.
atom_style (str): Choose between "atomic" (neutral) and
"charge" (charged). Default to "charge".
Returns:
LammpsData
|
codesearchnet
|
def get_metric(name, constructor, *args, **kwargs):
metric = _registered_metrics.get(name)
if metric is not None:
return metric
else:
return constructor(name, *args, **kwargs)
|
Return an existing metric or create a new one for the given name.
Args:
name: The name of the metric.
constructor: A class to instantiate if a new metric is required.
*args: Additional positional args to pass to the constructor.
**kwargs: Keyword args for the constructor.
Returns:
The current metric registered to name, or a new one created by
invoking constructor(name, *args, **kwargs).
|
github-repos
|
def _translate_name(name):
underscored = inflection.underscore(name)
dasherized = inflection.dasherize(underscored)
words = dasherized.split('-')
last_word = words.pop()
words.append(inflection.pluralize(last_word))
return '-'.join(words)
|
Translate the class name to the API endpoint.
For example, Car would become cars, FastCar would become fast-cars.
Args:
name (string): Camel case name (singular)
Returns:
string: A pluraised, dasherized string.
|
codesearchnet
|
def resize(self, container, height, width):
params = {'h': height, 'w': width}
url = self._url("/containers/{0}/resize", container)
res = self._post(url, params=params)
self._raise_for_status(res)
|
Resize the tty session.
Args:
container (str or dict): The container to resize
height (int): Height of tty session
width (int): Width of tty session
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
juraj-google-style
|
def GetValueLength(rd, pos):
rd = bytearray(rd)
key = rd[pos]
if (key == LONG_ITEM_ENCODING):
if ((pos + 1) < len(rd)):
return (3, rd[(pos + 1)])
else:
raise errors.HidError('Malformed report descriptor')
else:
code = (key & 3)
if (code <= 2):
return (1, code)
elif (code == 3):
return (1, 4)
raise errors.HidError('Cannot happen')
|
Get value length for a key in rd.
For a key at position pos in the Report Descriptor rd, return the length
of the associated value. This supports both short and long format
values.
Args:
rd: Report Descriptor
pos: The position of the key in rd.
Returns:
(key_size, data_len) where key_size is the number of bytes occupied by
the key and data_len is the length of the value associated by the key.
|
codesearchnet
|
def on_message(self, message):
try:
self.log.debug("Got message %s", message)
d = json_decode(message)
response = deserialize_object(d, Response)
if isinstance(response, (Return, Error)):
request = self._request_lookup.pop(response.id)
if isinstance(response, Error):
response.message = ResponseError(response.message)
else:
request = self._request_lookup[response.id]
cothread.Callback(request.callback, response)
except Exception:
self.log.exception("on_message(%r) failed", message)
|
Pass response from server to process receive queue
Args:
message(str): Received message
|
juraj-google-style
|
def __init__(self,
initializer=None,
age=None,
base="aff4:/flows",
queue=DEFAULT_FLOW_QUEUE,
flow_name=None):
if initializer is None:
if flow_name is None:
flow_name = random.UInt32()
if isinstance(flow_name, int):
initializer = RDFURN(base).Add("%s:%X" % (queue.Basename(), flow_name))
else:
initializer = RDFURN(base).Add("%s:%s" % (queue.Basename(), flow_name))
else:
if isinstance(initializer, RDFURN):
try:
self.ValidateID(initializer.Basename())
except ValueError as e:
raise InitializeError(
"Invalid URN for SessionID: %s, %s" % (initializer, e))
super(SessionID, self).__init__(initializer=initializer, age=age)
|
Constructor.
Args:
initializer: A string or another RDFURN.
age: The age of this entry.
base: The base namespace this session id lives in.
queue: The queue to use.
flow_name: The name of this flow or its random id.
Raises:
InitializeError: The given URN cannot be converted to a SessionID.
|
juraj-google-style
|
def NewFromJSON(data):
return Comment(
body=data.get('body', None),
posted_at=data.get('posted_at', None),
user=User.NewFromJSON(data.get('user', None))
)
|
Create a new Comment instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a Comment.
Returns:
A Comment instance.
|
juraj-google-style
|
def from_coffeescript(cls, func, v_func, args={}):
compiled = nodejs_compile(func, lang='coffeescript', file='???')
if ('error' in compiled):
raise CompilationError(compiled.error)
v_compiled = nodejs_compile(v_func, lang='coffeescript', file='???')
if ('error' in v_compiled):
raise CompilationError(v_compiled.error)
return cls(func=compiled.code, v_func=v_compiled.code, args=args)
|
Create a ``CustomJSTransform`` instance from a pair of CoffeeScript
snippets. The function bodies are translated to JavaScript functions
using node and therefore require return statements.
The ``func`` snippet namespace will contain the variable ``x`` (the
untransformed value) at render time. The ``v_func`` snippet namespace
will contain the variable ``xs`` (the untransformed vector) at render
time.
Example:
.. code-block:: coffeescript
func = "return Math.cos(x)"
v_func = "return [Math.cos(x) for x in xs]"
transform = CustomJSTransform.from_coffeescript(func, v_func)
Args:
func (str) : a coffeescript snippet to transform a single ``x`` value
v_func (str) : a coffeescript snippet function to transform a vector ``xs``
Returns:
CustomJSTransform
|
codesearchnet
|
def GenerateDateTripsDeparturesList(self, date_start, date_end):
service_id_to_trips = defaultdict(lambda: 0)
service_id_to_departures = defaultdict(lambda: 0)
for trip in self.GetTripList():
headway_start_times = trip.GetFrequencyStartTimes()
if headway_start_times:
trip_runs = len(headway_start_times)
else:
trip_runs = 1
service_id_to_trips[trip.service_id] += trip_runs
service_id_to_departures[trip.service_id] += (
(trip.GetCountStopTimes() - 1) * trip_runs)
date_services = self.GetServicePeriodsActiveEachDate(date_start, date_end)
date_trips = []
for date, services in date_services:
day_trips = sum(service_id_to_trips[s.service_id] for s in services)
day_departures = sum(
service_id_to_departures[s.service_id] for s in services)
date_trips.append((date, day_trips, day_departures))
return date_trips
|
Return a list of (date object, number of trips, number of departures).
The list is generated for dates in the range [date_start, date_end).
Args:
date_start: The first date in the list, a date object
date_end: The first date after the list, a date object
Returns:
a list of (date object, number of trips, number of departures) tuples
|
juraj-google-style
|
def __init__(self, config=None):
self.driver = get_database_instance(config)
self.logger = logging.getLogger('Plugin')
logging.basicConfig(level=logging.INFO)
|
Initialize a :class:`~.Plugin` instance and connect to MongoDB.
Args:
*nodes (str): One or more URLs of MongoDB nodes to
connect to as the persistence layer
|
juraj-google-style
|
def state_range_type(self) -> Sequence[str]:
fluents = self.domain.state_fluents
ordering = self.domain.state_fluent_ordering
return self._fluent_range_type(fluents, ordering)
|
The range type of each state fluent in canonical order.
Returns:
Sequence[str]: A tuple of range types representing
the range of each fluent.
|
codesearchnet
|
def _percentile(self, values, percent, key=lambda x: x):
vals = sorted(values)
k = (len(vals) - 1) * (percent / 100)
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(vals[int(k)])
d0 = key(vals[int(f)]) * (c - k)
d1 = key(vals[int(c)]) * (k - f)
return d0 + d1
|
Find the percentile of a list of values.
Args:
values: A list of values for which percentiles are desired
percent: A float value from 0 to 100 representing the requested percentile.
key: optional key function to compute value from each element of N.
Return:
The percentile of the values
|
juraj-google-style
|
def load_function_def_library(library, saved_object_graph=None, load_shared_name_suffix=None, wrapper_function=None):
library_function_names = set((fdef.signature.name for fdef in library.function))
functions = {}
renamed_functions = {}
if ops.executing_eagerly_outside_functions():
graph = ops.Graph()
else:
graph = ops.get_default_graph()
if load_shared_name_suffix is None:
load_shared_name_suffix = '_load_{}'.format(ops.uid())
library_gradient_names = {}
new_gradient_op_types = {}
gradients_to_register = {}
for gdef in library.registered_gradients:
if gdef.registered_op_type:
new_op_type = custom_gradient.generate_name()
old_op_type = compat.as_bytes(gdef.registered_op_type)
library_gradient_names[old_op_type] = gdef.gradient_func
new_gradient_op_types[old_op_type] = new_op_type
gradients_to_register[gdef.gradient_func] = new_op_type
function_deps = {}
for fdef in library.function:
function_deps[fdef.signature.name] = _list_function_deps(fdef, library_function_names, library_gradient_names)
loaded_gradients = {}
for fdef in _sort_function_defs(library, function_deps):
orig_name = _fix_fdef_in_place(fdef, functions, load_shared_name_suffix, new_gradient_op_types)
structured_input_signature = None
structured_outputs = None
if saved_object_graph is not None and orig_name in saved_object_graph.concrete_functions:
proto = saved_object_graph.concrete_functions[orig_name]
structured_input_signature = nested_structure_coder.decode_proto(proto.canonicalized_input_signature)
structured_outputs = nested_structure_coder.decode_proto(proto.output_signature)
with graph.as_default():
func_graph = function_def_lib.function_def_to_graph(fdef, structured_input_signature=structured_input_signature, structured_outputs=structured_outputs)
_restore_gradient_functions(func_graph, renamed_functions, loaded_gradients)
for dep in function_deps[orig_name]:
functions[dep].add_to_graph(func_graph)
if '_input_shapes' in fdef.attr:
del fdef.attr['_input_shapes']
function_type = function_type_lib.from_structured_signature(func_graph.structured_input_signature, func_graph.structured_outputs, func_graph.function_captures.capture_types)
func = function_lib.ConcreteFunction.from_func_graph(func_graph, function_type, attrs=fdef.attr)
if wrapper_function:
func = wrapper_function(func)
func.add_to_graph(graph)
functions[orig_name] = func
renamed_functions[func.name] = func
if any((op.type == 'TRTEngineOp' for op in func_graph.get_operations())):
func.add_to_graph(ops.get_default_graph())
if orig_name in gradients_to_register:
gradient_op_type = gradients_to_register[orig_name]
loaded_gradients[compat.as_bytes(gradient_op_type)] = func
ops.RegisterGradient(gradient_op_type)(_gen_gradient_func(func))
return functions
|
Load a set of functions as concrete functions without captured inputs.
Functions names are manipulated during load such that they do not overlap
with previously created ones.
Gradients are re-registered under new names. Ops that reference the gradients
are updated to reflect the new registered names.
Args:
library: FunctionDefLibrary proto message.
saved_object_graph: SavedObjectGraph proto message. If not passed in,
concrete function structured signatures and outputs will not be set.
load_shared_name_suffix: If specified, used to uniquify shared names.
Otherwise, a unique name is generated.
wrapper_function: An object that will be wrapped on newly created functions.
Returns:
Map of original function names in the library to instances of
`ConcreteFunction` without captured inputs.
Raises:
ValueError: if functions dependencies have a cycle.
|
github-repos
|
def rename(self, source_file_names, destination_file_names):
if not len(source_file_names) == len(destination_file_names):
message = 'Unable to rename unequal number of sources and destinations.'
raise BeamIOError(message)
src_dest_pairs = list(zip(source_file_names, destination_file_names))
results = self._blobstorageIO().rename_files(src_dest_pairs)
exceptions = {(src, dest): error for src, dest, error in results if error is not None}
if exceptions:
raise BeamIOError('Rename operation failed.', exceptions)
|
Rename the files at the source list to the destination list.
Source and destination lists should be of the same size.
Args:
source_file_names: List of file paths that need to be moved
destination_file_names: List of destination_file_names for the files
Raises:
``BeamIOError``: if any of the rename operations fail
|
github-repos
|
def get_by_uri(self, uri):
self._helper.validate_resource_uri(uri)
data = self._helper.do_get(uri)
if data:
new_resource = self.new(self._connection, data)
else:
new_resource = None
return new_resource
|
Retrieves a resource by its URI
Args:
uri: URI of the resource
Returns:
Resource object
|
juraj-google-style
|
def get_provider_fn_decorations(provider_fn, default_arg_names):
if hasattr(provider_fn, _IS_WRAPPER_ATTR):
provider_decorations = getattr(provider_fn, _PROVIDER_DECORATIONS_ATTR)
if provider_decorations:
expanded_provider_decorations = []
for provider_decoration in provider_decorations:
if provider_decoration.in_scope_id is None:
provider_decoration.in_scope_id = scoping.DEFAULT_SCOPE
if provider_decoration.arg_name is not None:
expanded_provider_decorations.append(provider_decoration)
else:
expanded_provider_decorations.extend(
[ProviderDecoration(default_arg_name,
provider_decoration.annotated_with,
provider_decoration.in_scope_id)
for default_arg_name in default_arg_names])
return expanded_provider_decorations
return [ProviderDecoration(default_arg_name,
annotated_with=None,
in_scope_id=scoping.DEFAULT_SCOPE)
for default_arg_name in default_arg_names]
|
Retrieves the provider method-relevant info set by decorators.
If any info wasn't set by decorators, then defaults are returned.
Args:
provider_fn: a (possibly decorated) provider function
default_arg_names: the (possibly empty) arg names to use if none were
specified via @provides()
Returns:
a sequence of ProviderDecoration
|
juraj-google-style
|
def shape(self):
raise NotImplementedError
|
The `TensorShape` of this variable.
Returns:
A `TensorShape`.
|
github-repos
|
def expect_no_raises(message=None, extras=None):
try:
yield
except Exception as e:
e_record = records.ExceptionRecord(e)
if extras:
e_record.extras = extras
msg = message or 'Got an unexpected exception'
details = '%s: %s' % (msg, e_record.details)
logging.exception(details)
e_record.details = details
recorder.add_error(e_record)
|
Expects no exception is raised in a context.
If the expectation is not met, the test is marked as fail after its
execution finishes.
A default message is added to the exception `details`.
Args:
message: string, custom message to add to exception's `details`.
extras: An optional field for extra information to be included in test
result.
|
github-repos
|
def owner_set(self):
owners = set()
if self.has_attr() or self.has_subscript():
owners.add(self.parent)
owners.update(self.parent.owner_set)
return owners
|
Returns all the symbols (simple or composite) that own this QN.
In other words, if this symbol was modified, the symbols in the owner set
may also be affected.
Examples:
'a.b[c.d]' has two owners, 'a' and 'a.b'
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.