code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def patch(self, id, name=None, description=None, whitelisted_container_task_types=None, whitelisted_executable_task_types=None):
request_url = (self._client.base_api_url + self.detail_url.format(id=id))
data_to_patch = {}
if (name is not None):
data_to_patch['name'] = name
if (description is not None):
data_to_patch['description'] = description
if (whitelisted_container_task_types is not None):
data_to_patch['whitelisted_container_task_types'] = whitelisted_container_task_types
if (whitelisted_executable_task_types is not None):
data_to_patch['whitelisted_executable_task_types'] = whitelisted_executable_task_types
response = self._client.session.patch(request_url, data=data_to_patch)
self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_200_OK)
return self.response_data_to_model_instance(response.json())
|
Partially updates a task whitelist on the saltant server.
Args:
id (int): The ID of the task whitelist.
name (str, optional): The name of the task whitelist.
description (str, optional): A description of the task whitelist.
whitelisted_container_task_types (list, optional): A list of
whitelisted container task type IDs.
whitelisted_executable_task_types (list, optional): A list
of whitelisted executable task type IDs.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just updated.
|
codesearchnet
|
def _parse_metadata(self, message):
metadata = Metadata(source=self.actor_urn).__dict__
if 'author' in message['d']:
metadata['source_user'] = message['d']['author']['username']
else:
metadata['source_user'] = None
if 'channel_id' in message['d']:
metadata['source_channel'] = message['d']['channel_id']
else:
metadata['source_channel'] = None
metadata['user_id'] = metadata['source_user']
metadata['display_name'] = metadata['source_user']
metadata['source_connector'] = 'discord'
return metadata
|
Sets metadata in Legobot message
Args:
message (dict): Full message from Discord websocket connection"
Returns:
Legobot.Metadata
|
juraj-google-style
|
def __init__(self, log_path, ref_path, run_path, output_path):
process_worker.ProcessWorkflow.__init__(
self, log_path, timeout_seconds=FLAGS.pdiff_timeout)
self.ref_path = ref_path
self.run_path = run_path
self.output_path = output_path
|
Initializer.
Args:
log_path: Where to write the verbose logging output.
ref_path: Path to reference screenshot to diff.
run_path: Path to the most recent run screenshot to diff.
output_path: Where the diff image should be written, if any.
|
juraj-google-style
|
def has_value(self, name=None):
raise NotImplementedError('Optional.has_value()')
|
Returns a tensor that evaluates to `True` if this optional has a value.
>>> optional = tf.experimental.Optional.from_value(42)
>>> print(optional.has_value())
tf.Tensor(True, shape=(), dtype=bool)
Args:
name: (Optional.) A name for the created operation.
Returns:
A scalar `tf.Tensor` of type `tf.bool`.
|
github-repos
|
def BuildParams(self, graph_fn, dtype, input_shapes, output_shapes):
input_mask = [[False] + [True] * (len(shape) - 1) for shape in input_shapes]
output_mask = [[False] + [True] * (len(shape) - 1) if shape else [] for shape in output_shapes]
return self.BuildParamsWithMask(graph_fn, dtype, input_shapes, output_shapes, input_mask, output_mask, [], [])
|
Build test parameters.
The input_shapes and output_shapes arguments are known (static) shapes that
can be used to generate test data. To define the model, we also specify
corresponding input/output TensorSpecs. These are defined using the shape
arguments. For each input tensor we define:
input_spec = [None] + input_shape[1:]
and similarly for output shapes. This means that we leave the first (batch)
dimension unknown, the rest is just copied from the shapes arg.
Args:
graph_fn: The function to build the graph.
dtype: The element type.
input_shapes: The input shapes.
output_shapes: The output shapes.
Returns:
The test parameters.
|
github-repos
|
def nr_cases(self, institute_id=None):
query = {}
if institute_id:
query['collaborators'] = institute_id
LOG.debug('Fetch all cases with query {0}'.format(query))
nr_cases = self.case_collection.find(query).count()
return nr_cases
|
Return the number of cases
This function will change when we migrate to 3.7.1
Args:
collaborator(str): Institute id
Returns:
nr_cases(int)
|
codesearchnet
|
def cn_occupation_energy(self, delta_occupation=None):
nn_occupations = self.site_specific_nn_occupation()
if delta_occupation:
for site in delta_occupation:
assert (site in nn_occupations)
nn_occupations[site] += delta_occupation[site]
return sum([self.cn_occupation_energies[s][n] for (s, n) in nn_occupations.items()])
|
The coordination-number dependent energy for this site.
Args:
delta_occupation (:obj:Dict(Str:Int), optional): A dictionary of a change in (site-type specific) coordination number, e.g. { 'A' : 1, 'B' : -1 }.
If this is not None, the coordination-number dependent energy is calculated including these changes in neighbour-site occupations. Defaults to None
Returns:
(Float): The coordination-number dependent energy for this site.
|
codesearchnet
|
def str2dict(str_in):
dict_out = safe_eval(str_in)
if (not isinstance(dict_out, dict)):
dict_out = None
return dict_out
|
Extracts a dict from a string.
Args:
str_in (string) that contains python dict
Returns:
(dict) or None if no valid dict was found
Raises:
-
|
codesearchnet
|
def from_pretrained(cls, pretrained_processor_name_or_path, speaker_embeddings_dict_path='speaker_embeddings_path.json', **kwargs):
if speaker_embeddings_dict_path is not None:
speaker_embeddings_path = cached_file(pretrained_processor_name_or_path, speaker_embeddings_dict_path, subfolder=kwargs.pop('subfolder', None), cache_dir=kwargs.pop('cache_dir', None), force_download=kwargs.pop('force_download', False), proxies=kwargs.pop('proxies', None), resume_download=kwargs.pop('resume_download', None), local_files_only=kwargs.pop('local_files_only', False), token=kwargs.pop('use_auth_token', None), revision=kwargs.pop('revision', None), _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False)
if speaker_embeddings_path is None:
logger.warning(f'`{os.path.join(pretrained_processor_name_or_path, speaker_embeddings_dict_path)}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionary if wanted, otherwise set `speaker_embeddings_dict_path=None`.')
speaker_embeddings = None
else:
with open(speaker_embeddings_path) as speaker_embeddings_json:
speaker_embeddings = json.load(speaker_embeddings_json)
else:
speaker_embeddings = None
tokenizer = AutoTokenizer.from_pretrained(pretrained_processor_name_or_path, **kwargs)
return cls(tokenizer=tokenizer, speaker_embeddings=speaker_embeddings)
|
Instantiate a Bark processor associated with a pretrained model.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained [`BarkProcessor`] hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a processor saved using the [`~BarkProcessor.save_pretrained`]
method, e.g., `./my_model_directory/`.
speaker_embeddings_dict_path (`str`, *optional*, defaults to `"speaker_embeddings_path.json"`):
The name of the `.json` file containing the speaker_embeddings dictionary located in
`pretrained_model_name_or_path`. If `None`, no speaker_embeddings is loaded.
**kwargs
Additional keyword arguments passed along to both
[`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`].
|
github-repos
|
class TFSharedEmbeddings(keras.layers.Layer):
def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float]=None, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.initializer_range = hidden_size ** (-0.5) if initializer_range is None else initializer_range
warnings.warn('`TFSharedEmbeddings` is scheduled for deletion in v4.32, use `keras.layers.Embedding` instead.', DeprecationWarning)
def build(self, input_shape):
self.weight = self.add_weight('weight', shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range))
super().build(input_shape)
def get_config(self):
config = {'vocab_size': self.vocab_size, 'hidden_size': self.hidden_size, 'initializer_range': self.initializer_range}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs: tf.Tensor, mode: str='embedding') -> tf.Tensor:
if mode == 'embedding':
return self._embedding(inputs)
elif mode == 'linear':
return self._linear(inputs)
else:
raise ValueError(f'mode {mode} is not valid.')
def _embedding(self, input_ids):
return tf.gather(self.weight, input_ids)
def _linear(self, inputs):
first_dims = shape_list(inputs)[:-1]
x = tf.reshape(inputs, [-1, self.hidden_size])
logits = tf.matmul(x, self.weight, transpose_b=True)
return tf.reshape(logits, first_dims + [self.vocab_size])
|
Construct shared token embeddings.
The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language
modeling.
Args:
vocab_size (`int`):
The size of the vocabulary, e.g., the number of unique tokens.
hidden_size (`int`):
The size of the embedding vectors.
initializer_range (`float`, *optional*):
The standard deviation to use when initializing the weights. If no value is provided, it will default to
\\(1/\sqrt{hidden\_size}\\).
kwargs (`Dict[str, Any]`, *optional*):
Additional keyword arguments passed along to the `__init__` of `keras.layers.Layer`.
|
github-repos
|
def total_seconds(td):
secs = td.seconds + td.days * 24 * 3600
if td.microseconds:
secs += 1
return secs
|
convert a timedelta to seconds.
This is patterned after timedelta.total_seconds, which is only
available in python 27.
Args:
td: a timedelta object.
Returns:
total seconds within a timedelta. Rounded up to seconds.
|
juraj-google-style
|
def _check_call_func(self, node):
func = utils.safe_infer(node.func)
types = ("str", "unicode")
methods = ("format",)
if is_method_call(func, types, methods) and not is_complex_format_str(
func.bound
):
self.add_message("logging-format-interpolation", node=node)
|
Checks that function call is not format_string.format().
Args:
node (astroid.node_classes.Call):
Call AST node to be checked.
|
juraj-google-style
|
def log_variable_sizes(var_list=None, tag=None, verbose=False):
if var_list is None:
var_list = tf.trainable_variables()
if tag is None:
tag = "Trainable Variables"
if not var_list:
return
name_to_var = {v.name: v for v in var_list}
total_size = 0
for v_name in sorted(list(name_to_var)):
v = name_to_var[v_name]
v_size = int(np.prod(np.array(v.shape.as_list())))
if verbose:
tf.logging.info("Weight %s\tshape %s\tsize %d",
v.name[:-2].ljust(80),
str(v.shape).ljust(20), v_size)
total_size += v_size
tf.logging.info("%s Total size: %d", tag, total_size)
|
Log the sizes and shapes of variables, and the total size.
Args:
var_list: a list of variables; defaults to trainable_variables
tag: a string; defaults to "Trainable Variables"
verbose: bool, if True, log every weight; otherwise, log total size only.
|
juraj-google-style
|
def local_file(self, filename):
LOG.info('Retrieving "%s" from "%s".', filename, self.runway_dir)
file_contents = ''
file_path = os.path.join(self.runway_dir, filename)
try:
with open(file_path, 'rt') as lookup_file:
file_contents = lookup_file.read()
except FileNotFoundError:
LOG.warning('File missing "%s".', file_path)
raise
LOG.debug('Local file contents:\n%s', file_contents)
return file_contents
|
Read the local file in _self.runway_dir_.
Args:
filename (str): Name of file to retrieve relative to root of
_runway_dir_.
Returns:
str: Contents of local file.
Raises:
FileNotFoundError: Requested file missing.
|
codesearchnet
|
def Process(self, parser_mediator, date_time, syslog_tokens, **kwargs):
body = syslog_tokens.get('body', None)
if (not body):
raise AttributeError('Missing required attribute: body')
for (key, grammar) in iter(self.MESSAGE_GRAMMARS):
try:
tokens = grammar.parseString(body)
syslog_tokens.update(tokens.asDict())
self.ParseMessage(parser_mediator, key, date_time, syslog_tokens)
return
except pyparsing.ParseException:
pass
raise errors.WrongPlugin('Unable to create event from: {0:s}'.format(body))
|
Processes the data structure produced by the parser.
Args:
parser_mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
date_time (dfdatetime.DateTimeValues): date and time values.
syslog_tokens (dict[str, str]): names of the fields extracted by the
syslog parser and the matching grammar, and values are the values of
those fields.
Raises:
AttributeError: If the syslog_tokens do not include a 'body' attribute.
WrongPlugin: If the plugin is unable to parse the syslog tokens.
|
codesearchnet
|
def combine(self, x):
depth = tf.shape(x)[(- 1)]
x *= tf.expand_dims(self._nonpadding, (- 1))
ret = tf.unsorted_segment_sum(x, self._flat_indices, num_segments=(self._batch * self._length))
ret = tf.reshape(ret, [self._batch, self._length, depth])
return ret
|
Return the output from the experts.
When one example goes to multiple experts, the outputs are summed.
Args:
x: a Tensor with shape [batch, num_experts, expert_capacity, depth]
Returns:
a `Tensor` with shape `[batch, length, depth]
|
codesearchnet
|
def scalar_mul(scalar, x, name=None):
base_dtype = dtypes.as_dtype(x.dtype).base_dtype
scalar = ops.convert_to_tensor(scalar, dtype=base_dtype, name='scalar')
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, indexed_slices.IndexedSlices):
return indexed_slices.IndexedSlices(gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape)
else:
return gen_math_ops.mul(scalar, x, name)
else:
raise ValueError(f'The input scalar must be a 0-D value. Received shape {shape}.')
|
Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
This is a special case of `tf.math.multiply`, where the first value must be a
`scalar`. Unlike the general form of `tf.math.multiply`, this is operation is
guaranteed to be efficient for `tf.IndexedSlices`.
>>> x = tf.reshape(tf.range(30, dtype=tf.float32), [10, 3])
>>> with tf.GradientTape() as g:
... g.watch(x)
... y = tf.gather(x, [1, 2]) # IndexedSlices
... z = tf.math.scalar_mul(10.0, y)
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
name: A name for the operation (optional).
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
|
github-repos
|
def compute_dtype(self):
return self._dtype_policy.compute_dtype
|
The dtype of the layer's computations.
This is equivalent to `Layer.dtype_policy.compute_dtype`. Unless
mixed precision is used, this is the same as `Layer.dtype`, the dtype of
the weights.
Layers automatically cast their inputs to the compute dtype, which causes
computations and the output to be in the compute dtype as well. This is done
by the base Layer class in `Layer.__call__`, so you do not have to insert
these casts if implementing your own layer.
Layers often perform certain internal computations in higher precision when
`compute_dtype` is float16 or bfloat16 for numeric stability. The output
will still typically be float16 or bfloat16 in such cases.
Returns:
The layer's compute dtype.
|
github-repos
|
def update_video(self, video_id, title='', description='', keywords='', access_control=AccessControl.Unlisted):
if (not self.authenticated):
raise ApiError(_('Authentication is required'))
entry = self.fetch_video(video_id)
extension = self._access_control(access_control)
if extension:
entry.extension_elements = extension
if title:
entry.media.title.text = title
if description:
entry.media.description.text = description
success = Api.yt_service.UpdateVideoEntry(entry)
return success
|
Updates the video
Authentication is required
Params:
entry: video entry fetch via 'fetch_video()'
title: string
description: string
keywords: string
Returns:
a video entry on success
None otherwise
|
codesearchnet
|
def functions(start=None, end=None):
start, end = fix_addresses(start, end)
for func_t in idautils.Functions(start, end):
yield Function(func_t)
|
Get all functions in range.
Args:
start: Start address of the range. Defaults to IDB start.
end: End address of the range. Defaults to IDB end.
Returns:
This is a generator that iterates over all the functions in the IDB.
|
juraj-google-style
|
def calculate_oobatake_dS(seq, temp):
seq = ssbio.protein.sequence.utils.cast_to_str(seq)
dS = 0
temp += 273.15
T0 = 298.15
dCp_sum = _sum_of_dCp(seq)
for aa in seq:
S0 = oobatake_dictionary[aa]['dS']
dS += S0
return dS + dCp_sum * math.log(temp / T0)
|
Get dS using Oobatake method in units cal/mol.
Args:
seq (str, Seq, SeqRecord): Amino acid sequence
temp (float): Temperature in degrees C
Returns:
float: dS in units cal/mol
|
juraj-google-style
|
def list_autoscale_settings(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/microsoft.insights/', '/autoscaleSettings?api-version=', INSIGHTS_API])
return do_get(endpoint, access_token)
|
List the autoscale settings in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of autoscale settings.
|
codesearchnet
|
def disconnect_sync(self, conn_id):
done = threading.Event()
result = {}
def disconnect_done(conn_id, adapter_id, status, reason):
result['success'] = status
result['failure_reason'] = reason
done.set()
self.disconnect_async(conn_id, disconnect_done)
done.wait()
return result
|
Synchronously disconnect from a connected device
Args:
conn_id (int): A unique identifier that will refer to this connection
Returns:
dict: A dictionary with two elements
'success': a bool with the result of the connection attempt
'failure_reason': a string with the reason for the failure if we failed
|
codesearchnet
|
class Mamba2Output(ModelOutput):
last_hidden_state: Optional[torch.FloatTensor] = None
cache_params: Optional[Mamba2Cache] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
Class for the MAMBA2 model outputs.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
cache_params (`Mamba2Cache`):
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
avoid providing the old `input_ids`.
Includes both the State space model state matrices after the selective scan, and the Convolutional states
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
github-repos
|
def beta_to_uni(text, strict=False):
param_key = (strict,)
try:
t = _BETA_CONVERSION_TRIES[param_key]
except KeyError:
t = _create_conversion_trie(*param_key)
_BETA_CONVERSION_TRIES[param_key] = t
transform = []
idx = 0
possible_word_boundary = False
while (idx < len(text)):
if (possible_word_boundary and _penultimate_sigma_word_final(transform)):
transform[(- 2)] = _FINAL_LC_SIGMA
step = t.longest_prefix(text[idx:(idx + _MAX_BETA_TOKEN_LEN)])
if step:
possible_word_boundary = (text[idx] in _BETA_PUNCTUATION)
(key, value) = step
transform.append(value)
idx += len(key)
else:
possible_word_boundary = True
transform.append(text[idx])
idx += 1
if (possible_word_boundary and _penultimate_sigma_word_final(transform)):
transform[(- 2)] = _FINAL_LC_SIGMA
elif ((len(transform) > 0) and (transform[(- 1)] == _MEDIAL_LC_SIGMA)):
transform[(- 1)] = _FINAL_LC_SIGMA
converted = ''.join(transform)
return converted
|
Converts the given text from betacode to unicode.
Args:
text: The beta code text to convert. All of this text must be betacode.
strict: Flag to allow for flexible diacritic order on input.
Returns:
The converted text.
|
codesearchnet
|
def handle_subscribe(self, request, path):
ret = []
if path:
name = path[0]
if (name not in self.children):
self.children[name] = NotifierNode(getattr(self.data, name, None), self)
ret += self.children[name].handle_subscribe(request, path[1:])
else:
serialized = serialize_object(self.data)
if request.delta:
self.delta_requests.append(request)
ret.append(request.delta_response([[[], serialized]]))
else:
self.update_requests.append(request)
ret.append(request.update_response(serialized))
return ret
|
Add to the list of request to notify, and notify the initial value of
the data held
Args:
request (Subscribe): The subscribe request
path (list): The relative path from ourself
Returns:
list: [(callback, Response)] that need to be called
|
codesearchnet
|
def evaluate_ising(linear, quad, state):
if _numpy and isinstance(state, np.ndarray):
return evaluate_ising(linear, quad, state.tolist())
energy = 0.0
for index, value in uniform_iterator(linear):
energy += state[index] * value
for (index_a, index_b), value in six.iteritems(quad):
energy += value * state[index_a] * state[index_b]
return energy
|
Calculate the energy of a state given the Hamiltonian.
Args:
linear: Linear Hamiltonian terms.
quad: Quadratic Hamiltonian terms.
state: Vector of spins describing the system state.
Returns:
Energy of the state evaluated by the given energy function.
|
juraj-google-style
|
def __init__(self, nonce_id=None, nonce_value=None):
super(Nonce, self).__init__(tag=enums.Tags.NONCE)
self._nonce_id = None
self._nonce_value = None
self.nonce_id = nonce_id
self.nonce_value = nonce_value
|
Construct a Nonce struct.
Args:
nonce_id (bytes): A binary string representing the ID of the nonce
value. Optional, defaults to None. Required for encoding and
decoding.
nonce_value (bytes): A binary string representing a random value.
Optional, defaults to None. Required for encoding and decoding.
|
juraj-google-style
|
def _empty_dict_pylist_from_row_partitions(row_partitions, nrows):
if not row_partitions:
return [{} for _ in range(nrows)]
else:
values = _empty_dict_pylist_from_row_partitions(row_partitions[1:], row_partitions[0].row_splits()[-1])
splits = row_partitions[0].row_splits()
return [values[splits[i]:splits[i + 1]] for i in range(len(splits) - 1)]
|
Returns a python list of empty dicts from the given row partitions.
Args:
row_partitions: The row-partitions describing the ragged shape of the
result.
nrows: The number of rows in the outermost row-partition. (Or if
`len(row_partitions)==0`, then the number of empty dicts to return.)
Returns:
A nested python list whose leaves (if any) are empty python dicts.
|
github-repos
|
def _cursor_pb(cursor_pair):
if cursor_pair is not None:
data, before = cursor_pair
value_pbs = [_helpers.encode_value(value) for value in data]
return query_pb2.Cursor(values=value_pbs, before=before)
|
Convert a cursor pair to a protobuf.
If ``cursor_pair`` is :data:`None`, just returns :data:`None`.
Args:
cursor_pair (Optional[Tuple[list, bool]]): Two-tuple of
* a list of field values.
* a ``before`` flag
Returns:
Optional[google.cloud.firestore_v1beta1.types.Cursor]: A
protobuf cursor corresponding to the values.
|
juraj-google-style
|
def writeCmdMsg(self, msg):
ekm_log(((('(writeCmdMsg | ' + self.getContext()) + ') ') + msg))
self.m_command_msg = msg
|
Internal method to set the command result string.
Args:
msg (str): Message built during command.
|
codesearchnet
|
def _jacobian_both(nodes, degree, dimension):
r
_, num_nodes = nodes.shape
result = np.empty((2 * dimension, num_nodes - degree - 1), order="F")
result[:dimension, :] = jacobian_s(nodes, degree, dimension)
result[dimension:, :] = jacobian_t(nodes, degree, dimension)
return result
|
r"""Compute :math:`s` and :math:`t` partial of :math:`B`.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): Array of nodes in a surface.
degree (int): The degree of the surface.
dimension (int): The dimension the surface lives in.
Returns:
numpy.ndarray: Nodes of the Jacobian surfaces in
B |eacute| zier form.
|
juraj-google-style
|
def __get_state_by_id(cls, job_id):
state = model.MapreduceState.get_by_job_id(job_id)
if (state is None):
raise ValueError(('Job state for job %s is missing.' % job_id))
return state
|
Get job state by id.
Args:
job_id: job id.
Returns:
model.MapreduceState for the job.
Raises:
ValueError: if the job state is missing.
|
codesearchnet
|
def build_synchronize_decorator():
lock = threading.Lock()
def lock_decorator(fn):
@functools.wraps(fn)
def lock_decorated(*args, **kwargs):
with lock:
return fn(*args, **kwargs)
return lock_decorated
return lock_decorator
|
Returns a decorator which prevents concurrent calls to functions.
Usage:
synchronized = build_synchronize_decorator()
@synchronized
def read_value():
...
@synchronized
def write_value(x):
...
Returns:
make_threadsafe (fct): The decorator which lock all functions to which it
is applied under a same lock
|
codesearchnet
|
def get_list(self, obj_class, data, subset):
url = obj_class.get_url(data)
if (obj_class.can_list and obj_class.can_get):
if ((subset and (len(subset) == 1) and (subset[0].upper() == 'BASIC')) and (obj_class is jssobjects.Computer)):
url += '/subset/basic'
result = self.jss.get(url)
if obj_class.container:
result = result.find(obj_class.container)
return self._build_jss_object_list(result, obj_class)
elif obj_class.can_get:
xmldata = self.jss.get(url)
return obj_class(self.jss, xmldata)
else:
raise JSSMethodNotAllowedError(obj_class.__class__.__name__)
|
Get a list of objects as JSSObjectList.
Args:
obj_class: The JSSObject subclass type to search for.
data: None
subset: Some objects support a subset for listing; namely
Computer, with subset="basic".
Returns:
JSSObjectList
|
codesearchnet
|
def jump( self ):
potential_jumps = self.potential_jumps()
if not potential_jumps:
raise BlockedLatticeError('No moves are possible in this lattice')
all_transitions = transitions.Transitions( self.potential_jumps() )
random_jump = all_transitions.random()
delta_t = all_transitions.time_to_jump()
self.time += delta_t
self.update_site_occupation_times( delta_t )
self.update( random_jump )
return( all_transitions.time_to_jump() )
|
Select a jump at random from all potential jumps, then update the lattice state.
Args:
None
Returns:
None
|
juraj-google-style
|
def take_bug_report(self, test_name=None, begin_time=None, timeout=300, destination=None):
prefix = DEFAULT_BUG_REPORT_NAME
if test_name:
prefix = '%s,%s' % (DEFAULT_BUG_REPORT_NAME, test_name)
if begin_time is None:
begin_time = mobly_logger.get_log_file_timestamp()
new_br = True
try:
stdout = self.adb.shell('bugreportz -v').decode('utf-8')
if 'not found' in stdout:
new_br = False
except adb.AdbError:
new_br = False
if destination is None:
destination = os.path.join(self.log_path, 'BugReports')
br_path = utils.abs_path(destination)
utils.create_dir(br_path)
filename = self.generate_filename(prefix, str(begin_time), 'txt')
if new_br:
filename = filename.replace('.txt', '.zip')
full_out_path = os.path.join(br_path, filename)
self.wait_for_boot_completion()
self.log.debug('Start taking bugreport.')
if new_br:
out = self.adb.shell('bugreportz', timeout=timeout).decode('utf-8')
if not out.startswith('OK'):
raise DeviceError(self, 'Failed to take bugreport: %s' % out)
br_out_path = out.split(':')[1].strip()
self.adb.pull([br_out_path, full_out_path])
self.adb.shell(['rm', br_out_path])
else:
self.adb.bugreport(' > "%s"' % full_out_path, shell=True, timeout=timeout)
self.log.debug('Bugreport taken at %s.', full_out_path)
return full_out_path
|
Takes a bug report on the device and stores it in a file.
Args:
test_name: Name of the test method that triggered this bug report.
begin_time: Timestamp of when the test started. If not set, then
this will default to the current time.
timeout: float, the number of seconds to wait for bugreport to
complete, default is 5min.
destination: string, path to the directory where the bugreport
should be saved.
Returns:
A string that is the absolute path to the bug report on the host.
|
github-repos
|
def space(self, newlines=1):
space = Space()
for line in range(newlines):
space.add_line('\n')
self._container.structure.insert(self._idx, space)
self._idx += 1
return self
|
Creates a vertical space of newlines
Args:
newlines (int): number of empty lines
Returns:
self for chaining
|
juraj-google-style
|
def relative_batch_tokens_ids_to_midi(self, tokens: np.ndarray, beatstep: np.ndarray, beat_offset_idx: int=0, bars_per_batch: int=2, cutoff_time_idx: int=12):
beat_offset_idx = 0 if beat_offset_idx is None else beat_offset_idx
notes = self.relative_batch_tokens_ids_to_notes(tokens=tokens, beat_offset_idx=beat_offset_idx, bars_per_batch=bars_per_batch, cutoff_time_idx=cutoff_time_idx)
midi = self.notes_to_midi(notes, beatstep, offset_sec=beatstep[beat_offset_idx])
return midi
|
Converts tokens to Midi. This method calls `relative_batch_tokens_ids_to_notes` method to convert batch tokens
to notes then uses `notes_to_midi` method to convert them to Midi.
Args:
tokens (`numpy.ndarray`):
Denotes tokens which alongside beatstep will be converted to Midi.
beatstep (`np.ndarray`):
We get beatstep from feature extractor which is also used to get Midi.
beat_offset_idx (`int`, *optional*, defaults to 0):
Denotes beat offset index for each note in generated Midi.
bars_per_batch (`int`, *optional*, defaults to 2):
A parameter to control the Midi output generation.
cutoff_time_idx (`int`, *optional*, defaults to 12):
Denotes the cutoff time index for each note in generated Midi.
|
github-repos
|
def compress_mean(x, dim, compression_factor):
dims = x.shape.dims
pos = dims.index(dim)
compressed_dim = mtf.Dimension(dim.name, dim.size
compression_factor_dim = mtf.Dimension(
"compression_factor", compression_factor)
new_shape = (
dims[:pos] + [compressed_dim, compression_factor_dim] + dims[pos + 1:])
x = mtf.reshape(x, new_shape)
x = mtf.reduce_mean(x, reduced_dim=compression_factor_dim)
return x
|
Compress by taking group means.
Args:
x: a Tensor
dim: a dimension in x.shape
compression_factor: an integer
Returns:
a Tensor
|
juraj-google-style
|
def deploy(app_id, version, promote, quiet):
gae_app = GaeApp.for_branch(git.current_branch().name)
if ((gae_app is None) and (None in (app_id, version))):
msg = "Can't find an AppEngine app setup for branch <35>{}<32> and--project and --version were not given."
log.err(msg, git.current_branch().name)
sys.exit(1)
if (version is not None):
gae_app.version = version
if (app_id is not None):
gae_app.app_id = app_id
gae_app.deploy(promote, quiet)
|
Deploy the app to AppEngine.
Args:
app_id (str):
AppEngine App ID. Overrides config value app_id if given.
version (str):
AppEngine project version. Overrides config values if given.
promote (bool):
If set to **True** promote the current remote app version to the one
that's being deployed.
quiet (bool):
If set to **True** this will pass the ``--quiet`` flag to gcloud
command.
|
codesearchnet
|
def _broadcast_dynamic_shape_extended_helper(a: DynamicRaggedShape, b: DynamicRaggedShape) -> Tuple[DynamicRaggedShape, _Broadcaster, _Broadcaster]:
assert a.rank <= b.rank
assert 2 <= b.rank
assert 1 <= a.rank
a_rps = a._as_row_partitions()
b_rps = b._as_row_partitions()
if len(a_rps) < len(b_rps):
a_nrows = a[0]
a_nrows_static = tensor_util.constant_value(a_nrows)
if a_nrows_static is not None:
a_nrows = a_nrows_static
neg_one_a_rp = RowPartition.from_uniform_row_length(uniform_row_length=a_nrows, nrows=1, nvals=a_nrows)
neg_one_b_rp = b_rps[-(len(a_rps) + 1)]
neg_one_ac, neg_one_bc = _broadcast_dynamic_shape_first_layer(constant_op.constant(1, dtype=b_rps[0].dtype), neg_one_b_rp.nrows())
c_zero, ac_zero, bc_zero = _broadcast_dynamic_shape_next_layer(neg_one_ac, neg_one_bc, neg_one_a_rp, neg_one_b_rp)
b_rps_tail = b_rps[-len(a_rps):] if len(a_rps) >= 1 else []
c_suffix, ac_layers, bc_layers = _broadcast_dynamic_shape_from_rps(ac_zero, bc_zero, a_rps, b_rps_tail)
return _broadcast_dynamic_shape_extended_complete(a=a, b=b, b_rps=b_rps, c_suffix=[c_zero] + c_suffix, ac=[ac_zero] + ac_layers, bc_suffix=[neg_one_bc, bc_zero] + bc_layers)
else:
assert len(a_rps) == len(b_rps)
ac_zero, bc_zero = _broadcast_dynamic_shape_first_layer(a_rps[0].nrows(), b_rps[0].nrows())
c_rps, a_layers, b_layers = _broadcast_dynamic_shape_from_rps(ac_zero, bc_zero, a_rps, b_rps)
return _broadcast_dynamic_shape_extended_complete(a=a, b=b, b_rps=b_rps, c_suffix=c_rps, ac=[ac_zero] + a_layers, bc_suffix=[bc_zero] + b_layers)
|
Helper for broadcast_dynamic_shape_extended.
Here, we force:
a.rank <= b.rank
2 <= b.rank
1 <= a.rank
Args:
a: a DynamicRaggedShape
b: a DynamicRaggedShape
Returns:
A triple of a shape and two broadcasters.
|
github-repos
|
def read_chunks(self, chunk_size, start, step, count) -> bytes:
return self.mglo.read_chunks(chunk_size, start, step, count)
|
Read the content.
Read and concatenate the chunks of size chunk_size
using offsets calculated from start, step and stop.
Args:
chunk_size (int): The chunk size.
start (int): First offset.
step (int): Offset increment.
count (int): The number of offsets.
Returns:
bytes
|
juraj-google-style
|
def __init__(self, namespace: Optional[str], name: Optional[str], urn: Optional[str]=None, labels: Optional[Dict[str, str]]=None) -> None:
if not urn:
if not namespace:
raise ValueError('Metric namespace must be non-empty')
if not name:
raise ValueError('Metric name must be non-empty')
self.namespace = namespace
self.name = name
self.urn = urn
self.labels = labels if labels else {}
|
Initializes ``MetricName``.
Note: namespace and name should be set for user metrics,
urn and labels should be set for an arbitrary metric to package into a
MonitoringInfo.
Args:
namespace: A string with the namespace of a metric.
name: A string with the name of a metric.
urn: URN to populate on a MonitoringInfo, when sending to RunnerHarness.
labels: Labels to populate on a MonitoringInfo
|
github-repos
|
def update_current_state(self, value: str,
force: bool = False) -> datetime:
value = value.lower()
if not force:
current_state = self.current_state
if current_state == 'unknown':
allowed_transitions = self._allowed_states
else:
allowed_transitions = self._allowed_transitions[current_state]
allowed_transitions.append(current_state)
LOG.debug('Updating current state of %s to %s', self._id, value)
if value not in allowed_transitions:
raise ValueError("Invalid current state update: '{}'. '{}' "
"can be transitioned to states: {}"
.format(value, current_state,
allowed_transitions))
return self._update_state('current', value)
|
Update the current state.
Args:
value (str): New value for sdp state
force (bool): If true, ignore allowed transitions
Returns:
datetime, update timestamp
Raises:
ValueError: If the specified current state is not allowed.
|
juraj-google-style
|
def compute_mask_offsets(shard_id2num_examples):
total_num_examples = sum(shard_id2num_examples)
mask_offsets = []
total_num_examples = 0
for num_examples_in_shard in shard_id2num_examples:
mask_offsets.append(total_num_examples % 100)
total_num_examples += num_examples_in_shard
return mask_offsets
|
Return the list of offsets associated with each shards.
Args:
shard_id2num_examples: `list[int]`, mapping shard_id=>num_examples
Returns:
mask_offsets: `list[int]`, offset to skip for each of the shard
|
juraj-google-style
|
def files_info(self, *, id: str, **kwargs) -> SlackResponse:
kwargs.update({'id': id})
return self.api_call('files.info', http_verb='GET', params=kwargs)
|
Gets information about a team file.
Args:
id (str): The file id. e.g. 'F1234467890'
|
codesearchnet
|
def get(self, name):
return self.prepare_model(self.client.api.inspect_plugin(name))
|
Gets a plugin.
Args:
name (str): The name of the plugin.
Returns:
(:py:class:`Plugin`): The plugin.
Raises:
:py:class:`docker.errors.NotFound` If the plugin does not
exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def load(self, label_lookup_path, uid_lookup_path):
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
|
Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
|
juraj-google-style
|
def _ProcessUnknownEnums(message, encoded_message):
if (not encoded_message):
return message
decoded_message = json.loads(six.ensure_str(encoded_message))
for field in message.all_fields():
if (isinstance(field, messages.EnumField) and (field.name in decoded_message) and (message.get_assigned_value(field.name) is None)):
message.set_unrecognized_field(field.name, decoded_message[field.name], messages.Variant.ENUM)
return message
|
Add unknown enum values from encoded_message as unknown fields.
ProtoRPC diverges from the usual protocol buffer behavior here and
doesn't allow unknown fields. Throwing on unknown fields makes it
impossible to let servers add new enum values and stay compatible
with older clients, which isn't reasonable for us. We simply store
unrecognized enum values as unknown fields, and all is well.
Args:
message: Proto message we've decoded thus far.
encoded_message: JSON string we're decoding.
Returns:
message, with any unknown enums stored as unrecognized fields.
|
codesearchnet
|
def get_modules():
ret = list()
valid_extensions = ('.psd1', '.psm1', '.cdxml', '.xaml', '.dll')
root_paths = []
home_dir = os.environ.get('HOME', os.environ.get('HOMEPATH'))
system_dir = '{0}\\System32'.format(os.environ.get('WINDIR', 'C:\\Windows'))
program_files = os.environ.get('ProgramFiles', 'C:\\Program Files')
default_paths = ['{0}/.local/share/powershell/Modules'.format(home_dir), '/usr/local/share/powershell/Modules', '{0}\\WindowsPowerShell\\v1.0\\Modules\\'.format(system_dir), '{0}\\WindowsPowerShell\\Modules'.format(program_files)]
default_paths = ';'.join(default_paths)
ps_module_path = os.environ.get('PSModulePath', default_paths)
ps_module_path = ps_module_path.split(';')
for item in ps_module_path:
if os.path.exists(item):
root_paths.append(item)
if (not root_paths):
log.error('Default paths not found')
return ret
for root_path in root_paths:
if (not os.path.isdir(root_path)):
continue
for (root_dir, sub_dirs, file_names) in salt.utils.path.os_walk(root_path):
for file_name in file_names:
(base_name, file_extension) = os.path.splitext(file_name)
if (file_extension.lower() in valid_extensions):
dir_name = os.path.basename(os.path.normpath(root_dir))
if ((dir_name not in ret) and (base_name.lower() == dir_name.lower())):
del sub_dirs[:]
ret.append(dir_name)
return ret
|
Get a list of the PowerShell modules which are potentially available to be
imported. The intent is to mimic the functionality of ``Get-Module
-ListAvailable | Select-Object -Expand Name``, without the delay of loading
PowerShell to do so.
Returns:
list: A list of modules available to Powershell
Example:
.. code-block:: python
import salt.utils.powershell
modules = salt.utils.powershell.get_modules()
|
codesearchnet
|
def _ReadRecordHeader(self, file_object, record_header_offset):
data_type_map = self._GetDataTypeMap('keychain_record_header')
(record_header, _) = self._ReadStructureFromFileObject(file_object, record_header_offset, data_type_map)
return record_header
|
Reads the record header.
Args:
file_object (file): file-like object.
record_header_offset (int): offset of the record header relative to
the start of the file.
Returns:
keychain_record_header: record header.
Raises:
ParseError: if the record header cannot be read.
|
codesearchnet
|
def _GetModuleCodeObjects(module):
visit_recorder = _VisitRecorder()
current = [module]
code_objects = set()
while current:
current = _FindCodeObjectsReferents(module, current, visit_recorder)
code_objects |= current
current = [code_object.co_consts for code_object in current]
return code_objects
|
Gets all code objects defined in the specified module.
There are two BFS traversals involved. One in this function and the other in
_FindCodeObjectsReferents. Only the BFS in _FindCodeObjectsReferents has
a depth limit. This function does not. The motivation is that this function
explores code object of the module and they can have any arbitrary nesting
level. _FindCodeObjectsReferents, on the other hand, traverses through class
definitions and random references. It's much more expensive and will likely
go into unrelated objects.
There is also a limit on how many total objects are going to be traversed in
all. This limit makes sure that if something goes wrong, the lookup doesn't
hang.
Args:
module: module to explore.
Returns:
Set of code objects defined in module.
|
codesearchnet
|
def restore_collection(backup):
for (k, v) in six.iteritems(backup):
del tf.get_collection_ref(k)[:]
tf.get_collection_ref(k).extend(v)
|
Restore from a collection backup.
Args:
backup (dict):
|
codesearchnet
|
def gather(params, indices, validate_indices=None, name=None, axis=None, batch_dims=0):
if name is None:
name = 'gather'
with ops.name_scope(name):
if axis is None:
axis = batch_dims
axis = array_ops.get_positive_axis(axis, params.shape.rank, ndims_name='params.shape.rank')
indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(indices, name='indices')
def leaf_op(p):
return array_ops.gather(p, indices, validate_indices=validate_indices, axis=axis, batch_dims=batch_dims, name=None)
return _extend_op_single(params, leaf_op)
|
tf.gather for structured tensors.
Does not support (yet) checks on illegal axis values, et cetera.
Indices must be a ragged or dense tensor.
Args:
params: a structured tensor to be gathered
indices: a ragged tensor or tensor to gather by.
validate_indices: whether to validate the indices
name: the name of the op(s).
axis: the axis in params to gather on.
batch_dims: the number of batch dimensions.
Returns:
the params reorganized according to indices.
|
github-repos
|
def save_screenshot(driver, name):
if hasattr(driver, 'save_screenshot'):
screenshot_dir = os.environ.get('SCREENSHOT_DIR')
if (not screenshot_dir):
LOGGER.warning('The SCREENSHOT_DIR environment variable was not set; not saving a screenshot')
return
elif (not os.path.exists(screenshot_dir)):
os.makedirs(screenshot_dir)
image_name = os.path.join(screenshot_dir, (name + '.png'))
driver.save_screenshot(image_name)
else:
msg = u"Browser does not support screenshots. Could not save screenshot '{name}'".format(name=name)
LOGGER.warning(msg)
|
Save a screenshot of the browser.
The location of the screenshot can be configured
by the environment variable `SCREENSHOT_DIR`. If not set,
this defaults to the current working directory.
Args:
driver (selenium.webdriver): The Selenium-controlled browser.
name (str): A name for the screenshot, which will be used in the output file name.
Returns:
None
|
codesearchnet
|
def load(self, *modules):
for module in modules:
if isinstance(module, six.string_types):
try:
module = get_object(module)
except Exception as e:
self.errors[module] = e
continue
self.modules[module.__package__] = module
for (loader, module_name, is_pkg) in pkgutil.walk_packages(module.__path__):
full_name = '{}.{}'.format(_package(module), module_name)
try:
self.modules[full_name] = get_object(full_name)
if is_pkg:
self.load(self.modules[full_name])
except Exception as e:
self.errors[full_name] = e
|
Load one or more modules.
Args:
modules: Either a string full path to a module or an actual module
object.
|
codesearchnet
|
def __init__(self, context_name = 'default'):
if context_name in self.contexts:
raise Error("A context named '%s' already exists" % (context_name,))
self.name = context_name
self.handlers = {}
self.contexts[self.name] = self
|
Create a new Bubbler context
Params:
context_name (string):
Name of this context
Raises:
bubbler.Error:
If this context name already exists
|
juraj-google-style
|
def changes(self, **kwargs):
path = ('%s/%s/changes' % (self.manager.path, self.get_id()))
return self.manager.gitlab.http_get(path, **kwargs)
|
List the merge request changes.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: List of changes
|
codesearchnet
|
def parse(self, body):
if isinstance(body, six.string_types):
body = json.loads(body)
version = body['version']
self.version = version
session = body['session']
self.session.new = session['new']
self.session.session_id = session['sessionId']
application_id = session['application']['applicationId']
self.session.application.application_id = application_id
if (('attributes' in session) and session['attributes']):
self.session.attributes = session.get('attributes', {})
else:
self.session.attributes = {}
self.session.user.user_id = session['user']['userId']
self.session.user.access_token = session['user'].get('accessToken', 0)
request = body['request']
if (request['type'] == 'LaunchRequest'):
self.request = LaunchRequest()
elif (request['type'] == 'IntentRequest'):
self.request = IntentRequest()
self.request.intent = Intent()
intent = request['intent']
self.request.intent.name = intent['name']
if (('slots' in intent) and intent['slots']):
for (name, slot) in six.iteritems(intent['slots']):
self.request.intent.slots[name] = Slot()
self.request.intent.slots[name].name = slot['name']
self.request.intent.slots[name].value = slot.get('value')
elif (request['type'] == 'SessionEndedRequest'):
self.request = SessionEndedRequest()
self.request.reason = request['reason']
self.request.type = request['type']
self.request.request_id = request['requestId']
self.request.timestamp = request['timestamp']
return self
|
Parse JSON request, storing content in object attributes.
Args:
body: str. HTTP request body.
Returns:
self
|
codesearchnet
|
def get_student_certificate(self, username, course_id):
resp = self.requester.get(
urljoin(
self.base_url,
'/api/certificates/v0/certificates/{username}/courses/{course_key}/'.format(
username=username,
course_key=course_id
)
)
)
resp.raise_for_status()
return Certificate(resp.json())
|
Returns an Certificate object with the user certificates
Args:
username (str): an edx user's username
course_id (str): an edX course id.
Returns:
Certificate: object representing the student certificate for a course
|
juraj-google-style
|
def read(self, n):
if self._EOF:
return ""
while self._seg_index <= self._last_seg_index:
result = self._read_from_seg(n)
if result != "":
return result
else:
self._next_seg()
self._EOF = True
return ""
|
Read data from file segs.
Args:
n: max bytes to read. Must be positive.
Returns:
some bytes. May be smaller than n bytes. "" when no more data is left.
|
juraj-google-style
|
def console_get_width(con: tcod.console.Console) -> int:
return int(lib.TCOD_console_get_width(_console(con)))
|
Return the width of a console.
Args:
con (Console): Any Console instance.
Returns:
int: The width of a Console.
.. deprecated:: 2.0
Use `Console.width` instead.
|
juraj-google-style
|
def lookup_prefix(self, prefix, timestamp=timestamp_now):
prefix = prefix.strip().upper()
if ((self._lookuptype == 'clublogxml') or (self._lookuptype == 'countryfile')):
return self._check_data_for_date(prefix, timestamp, self._prefixes, self._prefixes_index)
elif (self._lookuptype == 'redis'):
(data_dict, index) = self._get_dicts_from_redis('_prefix_', '_prefix_index_', self._redis_prefix, prefix)
return self._check_data_for_date(prefix, timestamp, data_dict, index)
raise KeyError
|
Returns lookup data of a Prefix
Args:
prefix (string): Prefix of a Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the country specific data of the Prefix
Raises:
KeyError: No matching Prefix found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code shows how to obtain the information for the prefix "DH" from the countryfile.com
database (default database).
>>> from pyhamtools import LookupLib
>>> myLookupLib = LookupLib()
>>> print myLookupLib.lookup_prefix("DH")
{
'adif': 230,
'country': u'Fed. Rep. of Germany',
'longitude': 10.0,
'cqz': 14,
'ituz': 28,
'latitude': 51.0,
'continent': u'EU'
}
Note:
This method is available for
- clublogxml
- countryfile
- redis
|
codesearchnet
|
def _freeze_concrete_function(self):
if len(self._funcs) == 0:
raise ValueError('No ConcreteFunction is specified.')
if len(self._funcs) > 1:
raise ValueError('This converter can only convert a single ConcreteFunction. Converting multiple functions is under development.')
frozen_func, graph_def = _convert_to_constants.convert_variables_to_constants_v2_as_graph(self._funcs[0], lower_control_flow=False)
input_tensors = [tensor for tensor in frozen_func.inputs if tensor.dtype != _dtypes.resource]
output_tensors = frozen_func.outputs
return (graph_def, input_tensors, output_tensors, frozen_func)
|
Convert the given ConcreteFunction to frozen graph.
Returns:
graph_def: The frozen GraphDef.
input_tensors: List of input tensors.
output_tensors: List of output tensors.
frozen_func: The frozen ConcreteFunction.
Raises:
ValueError: none or multiple ConcreteFunctions provided.
|
github-repos
|
def get_changelog(repo_path, from_commit=None):
repo = dulwich.repo.Repo(repo_path)
tags = get_tags(repo)
refs = get_refs(repo)
changelog = []
maj_version = 0
feat_version = 0
fix_version = 0
start_including = False
cur_line = ''
if from_commit is None:
start_including = True
for commit_sha, children in reversed(
get_children_per_first_parent(repo_path).items()
):
commit = repo.get_object(commit_sha)
maj_version, feat_version, fix_version = get_version(
commit=commit,
tags=tags,
maj_version=maj_version,
feat_version=feat_version,
fix_version=fix_version,
)
version = '%s.%s.%s' % (maj_version, feat_version, fix_version)
if (
start_including or commit_sha.startswith(from_commit)
or fuzzy_matches_refs(from_commit, refs.get(commit_sha, []))
):
cur_line = pretty_commit(
commit,
version,
)
for child in children:
cur_line += pretty_commit(repo.get_object(child), version=None)
start_including = True
changelog.append(cur_line)
return '\n'.join(reversed(changelog))
|
Given a repo path and an option commit/tag/refspec to start from, will
get the rpm compatible changelog
Args:
repo_path (str): path to the git repo
from_commit (str): refspec (partial commit hash, tag, branch, full
refspec, partial refspec) to start the changelog from
Returns:
str: Rpm compatible changelog
|
juraj-google-style
|
def get_by_ip_hostname(self, ip_hostname):
resources = self._client.get_all()
resources_filtered = [x for x in resources if (x['credentials']['ip_hostname'] == ip_hostname)]
if resources_filtered:
return resources_filtered[0]
else:
return None
|
Retrieve a storage system by its IP.
Works only with API version <= 300.
Args:
ip_hostname: Storage system IP or hostname.
Returns:
dict
|
codesearchnet
|
def map_defun(fn, elems, output_dtypes, output_shapes, max_intra_op_parallelism=1):
if not isinstance(elems, list):
raise ValueError(f'`elems` must be a list of tensors, but was {elems}.')
if not isinstance(output_dtypes, list):
raise ValueError(f'`output_dtypes` must be a list of `tf.DType` objects, but was {output_dtypes}.')
if not isinstance(output_shapes, list):
raise ValueError(f'`output_shapes` must be a list of `tf.TensorShape` objects, but was {output_shapes}.')
concrete_fn = fn.get_concrete_function()
elems = [ops.convert_to_tensor(e) for e in elems]
output_shapes = [tensor_shape.TensorShape(s) for s in output_shapes]
return gen_dataset_ops.map_defun(elems, concrete_fn.captured_inputs, output_dtypes, output_shapes, concrete_fn, max_intra_op_parallelism)
|
Map a function on the list of tensors unpacked from `elems` on dimension 0.
Args:
fn: A function (`function.defun`) that takes a list of tensors and returns
another list of tensors. The output list has the same types as
output_dtypes. The elements of the output list have the same dimension 0
as `elems`, and the remaining dimensions correspond to those of
`fn_output_shapes`.
elems: A list of tensors.
output_dtypes: A list of dtypes corresponding to the output types of the
function.
output_shapes: A list of `TensorShape`s corresponding to the output shapes
from each invocation of the function on slices of inputs.
max_intra_op_parallelism: An integer. If positive, sets the max parallelism
limit of each function call to this.
Raises:
ValueError: if any of the inputs are malformed.
Returns:
A list of `Tensor` objects with the same types as `output_dtypes`.
|
github-repos
|
def element_wise_op(array, other, op, ty):
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
other_var = weld_obj.update(other)
if isinstance(other, WeldObject):
other_var = other.obj_id
weld_obj.dependencies[other_var] = other
weld_template =
weld_obj.weld_code = weld_template % {"array": array_var,
"other": other_var,
"ty": ty, "op": op}
return weld_obj
|
Operation of series and other, element-wise (binary operator add)
Args:
array (WeldObject / Numpy.ndarray): Input array
other (WeldObject / Numpy.ndarray): Second Input array
op (str): Op string used to compute element-wise operation (+ / *)
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
|
juraj-google-style
|
def get_variation_from_id(self, experiment_key, variation_id):
variation_map = self.variation_id_map.get(experiment_key)
if variation_map:
variation = variation_map.get(variation_id)
if variation:
return variation
else:
self.logger.error('Variation ID "%s" is not in datafile.' % variation_id)
self.error_handler.handle_error(exceptions.InvalidVariationException(enums.Errors.INVALID_VARIATION_ERROR))
return None
self.logger.error('Experiment key "%s" is not in datafile.' % experiment_key)
self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))
return None
|
Get variation given experiment and variation ID.
Args:
experiment: Key representing parent experiment of variation.
variation_id: ID representing the variation.
Returns
Object representing the variation.
|
juraj-google-style
|
def __init__(self, max_str_len: int=100):
self.training_bar = None
self.prediction_bar = None
self.max_str_len = max_str_len
|
Initialize the callback with optional max_str_len parameter to control string truncation length.
Args:
max_str_len (`int`):
Maximum length of strings to display in logs.
Longer strings will be truncated with a message.
|
github-repos
|
def pop_chunk(self, chunk_max_size):
if self._total_length < chunk_max_size:
res = self._tobytes()
self.clear()
return res
first_iteration = True
while True:
try:
data = self._deque.popleft()
data_length = len(data)
self._total_length -= data_length
if first_iteration:
if data_length == chunk_max_size:
return data
elif data_length > chunk_max_size:
view = self._get_pointer_or_memoryview(data,
data_length)
self.appendleft(view[chunk_max_size:])
return view[:chunk_max_size]
else:
chunk_write_buffer = WriteBuffer()
else:
if chunk_write_buffer._total_length + data_length \
> chunk_max_size:
view = self._get_pointer_or_memoryview(data,
data_length)
limit = chunk_max_size - \
chunk_write_buffer._total_length - data_length
self.appendleft(view[limit:])
data = view[:limit]
chunk_write_buffer.append(data)
if chunk_write_buffer._total_length >= chunk_max_size:
break
except IndexError:
self._has_view = False
break
first_iteration = False
return chunk_write_buffer._tobytes()
|
Pops a chunk of the given max size.
Optimized to avoid too much string copies.
Args:
chunk_max_size (int): max size of the returned chunk.
Returns:
string (bytes) with a size <= chunk_max_size.
|
juraj-google-style
|
def listNodes(self, vendorSpecific=None):
response = self.listNodesResponse(vendorSpecific)
return self._read_dataone_type_response(response, 'NodeList')
|
See Also: listNodesResponse()
Args:
vendorSpecific:
Returns:
|
juraj-google-style
|
def any_to_datetime(self, time_input, tz=None):
dt_value = self.unix_time_to_datetime(time_input, tz)
if (dt_value is None):
dt_value = self.date_to_datetime(time_input, tz)
if (dt_value is None):
dt_value = self.human_date_to_datetime(time_input, tz)
if (dt_value is None):
raise RuntimeError('Could not format input ({}) to datetime string.'.format(time_input))
return dt_value
|
Return datetime object from multiple formats.
Formats:
#. Human Input (e.g 30 days ago, last friday)
#. ISO 8601 (e.g. 2017-11-08T16:52:42Z)
#. Loose Date format (e.g. 2017 12 25)
#. Unix Time/Posix Time/Epoch Time (e.g. 1510686617 or 1510686617.298753)
Args:
time_input (string): The time input string (see formats above).
tz (string): The time zone for the returned data.
Returns:
(datetime.datetime): Python datetime.datetime object.
|
codesearchnet
|
def register_for_auto_class(cls, auto_class='TFAutoModel'):
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f'{auto_class} is not a valid auto class.')
cls._auto_class = auto_class
|
Register this class with a given auto class. This should only be used for custom models as the ones in the
library are already mapped with an auto class.
Args:
auto_class (`str` or `type`, *optional*, defaults to `"TFAutoModel"`):
The auto class to register this new model with.
|
github-repos
|
def remove_redistribution(self, protocol):
protocols = ['bgp', 'rip', 'static', 'connected']
if (protocol not in protocols):
raise ValueError('redistributed protocol must bebgp, connected, rip or static')
cmd = 'no redistribute {}'.format(protocol)
return self.configure_ospf(cmd)
|
Removes a protocol redistribution to OSPF
Args:
protocol (str): protocol to redistribute
route_map_name (str): route-map to be used to
filter the protocols
Returns:
bool: True if the command completes successfully
Exception:
ValueError: This will be raised if the protocol pass is not one
of the following: [rip, bgp, static, connected]
|
codesearchnet
|
def _get_degree(num_nodes):
d_float = 0.5 * (np.sqrt(8.0 * num_nodes + 1.0) - 3.0)
d_int = int(np.round(d_float))
if (d_int + 1) * (d_int + 2) == 2 * num_nodes:
return d_int
else:
raise ValueError(num_nodes, "not a triangular number")
|
Get the degree of the current surface.
Args:
num_nodes (int): The number of control points for a
B |eacute| zier surface.
Returns:
int: The degree :math:`d` such that :math:`(d + 1)(d + 2)/2`
equals ``num_nodes``.
Raises:
ValueError: If ``num_nodes`` isn't a triangular number.
|
juraj-google-style
|
class MimiDecoderOutput(ModelOutput):
audio_values: Optional[torch.FloatTensor] = None
decoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None
|
Args:
audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*):
Decoded audio values, obtained using the decoder part of Mimi.
decoder_past_key_values (`Cache`, *optional*):
Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer.
This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
The model will output the same cache format that is fed as input.
If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
have their past key value states given to this model).
|
github-repos
|
def kill_mprocess(process):
if process and proc_alive(process):
process.terminate()
process.communicate()
return not proc_alive(process)
|
kill process
Args:
process - Popen object for process
|
juraj-google-style
|
def getargspec(obj):
if isinstance(obj, functools.partial):
return _get_argspec_for_partial(obj)
decorators, target = tf_decorator.unwrap(obj)
spec = next((d.decorator_argspec for d in decorators if d.decorator_argspec is not None), None)
if spec:
return spec
try:
return _getargspec(target)
except TypeError:
pass
if isinstance(target, type):
try:
return _getargspec(target.__init__)
except TypeError:
pass
try:
return _getargspec(target.__new__)
except TypeError:
pass
return _getargspec(type(target).__call__)
|
TFDecorator-aware replacement for `inspect.getargspec`.
Note: `getfullargspec` is recommended as the python 2/3 compatible
replacement for this function.
Args:
obj: A function, partial function, or callable object, possibly decorated.
Returns:
The `ArgSpec` that describes the signature of the outermost decorator that
changes the callable's signature, or the `ArgSpec` that describes
the object if not decorated.
Raises:
ValueError: When callable's signature can not be expressed with
ArgSpec.
TypeError: For objects of unsupported types.
|
github-repos
|
def wait_until_final(self, poll_interval=1, timeout=60):
start_time = time.time()
elapsed = 0
while (self.status != "complete" and
(timeout <= 0 or elapsed < timeout)):
time.sleep(poll_interval)
self.refresh()
elapsed = time.time() - start_time
|
It will poll the URL to grab the latest status resource in a given
timeout and time interval.
Args:
poll_interval (int): how often to poll the status service.
timeout (int): how long to poll the URL until giving up. Use <= 0
to wait forever
|
juraj-google-style
|
def save_graph_def(file_name, frozen_graph_def):
tf.io.write_graph(frozen_graph_def, os.path.dirname(file_name), os.path.basename(file_name), as_text=False)
tf.compat.v1.logging.info('Saved frozen graph to %s', file_name)
|
Writes a graph def file out to disk.
Args:
file_name: Where to save the file.
frozen_graph_def: GraphDef proto object to save.
|
github-repos
|
def ws_db004(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `ws_db004`'.format(value))
self._ws_db004 = value
|
Corresponds to IDD Field `ws_db004`
Mean wind speed coincident with 0.4% dry-bulb temperature
Args:
value (float): value for IDD Field `ws_db004`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def pretty_dump(fn):
@wraps(fn)
def pretty_dump_wrapper(*args, **kwargs):
response.content_type = 'application/json; charset=utf-8'
return json.dumps(fn(*args, **kwargs), indent=4, separators=(',', ': '))
return pretty_dump_wrapper
|
Decorator used to output prettified JSON.
``response.content_type`` is set to ``application/json; charset=utf-8``.
Args:
fn (fn pointer): Function returning any basic python data structure.
Returns:
str: Data converted to prettified JSON.
|
codesearchnet
|
def __init__(self, key_type=None, value_type=None, min_length=None, max_length=None, empty=True):
super(DictTypeChecker, self).__init__(base_type=dict)
self.key_type = key_type
self.value_type = value_type
self.min_length = min_length
self.max_length = max_length
self.empty = empty
|
Initialization method.
Args:
key_type (type): the type of the dict keys.
value_type (type): the type of the dict values.
min_length (int): minimum length of the dict (included).
max_length (int): maximum length of the dict (included).
empty (bool): whether empty dict is allowed.
|
juraj-google-style
|
def _get_argspec_for_partial(obj):
n_prune_args = len(obj.args)
partial_keywords = obj.keywords or {}
args, varargs, keywords, defaults = getargspec(obj.func)
args = args[n_prune_args:]
no_default = object()
all_defaults = [no_default] * len(args)
if defaults:
all_defaults[-len(defaults):] = defaults
for kw, default in iter(partial_keywords.items()):
if kw in args:
idx = args.index(kw)
all_defaults[idx] = default
elif not keywords:
raise ValueError(f'{obj} does not have a **kwargs parameter, but contains an unknown partial keyword {kw}.')
first_default = next((idx for idx, x in enumerate(all_defaults) if x is not no_default), None)
if first_default is None:
return ArgSpec(args, varargs, keywords, None)
invalid_default_values = [args[i] for i, j in enumerate(all_defaults) if j is no_default and i > first_default]
if invalid_default_values:
raise ValueError(f'{obj} has some keyword-only arguments, which are not supported: {invalid_default_values}.')
return ArgSpec(args, varargs, keywords, tuple(all_defaults[first_default:]))
|
Implements `getargspec` for `functools.partial` objects.
Args:
obj: The `functools.partial` object
Returns:
An `inspect.ArgSpec`
Raises:
ValueError: When callable's signature can not be expressed with
ArgSpec.
|
github-repos
|
def set_custom_getter_compose(custom_getter):
tf.get_variable_scope().set_custom_getter(_compose_custom_getters(tf.get_variable_scope().custom_getter, custom_getter))
|
Set a custom getter in the current variable scope.
Do not overwrite the existing custom getter - rather compose with it.
Args:
custom_getter: a custom getter.
|
codesearchnet
|
def __init__(self, resolver_context, encoding='utf-8'):
super(ZipFileSystem, self).__init__(resolver_context)
self._file_object = None
self._zip_file = None
self.encoding = encoding
|
Initializes a file system.
Args:
resolver_context (Context): a resolver context.
encoding (Optional[str]): encoding of the file entry name.
|
juraj-google-style
|
def _publish_to_subscribers(event: Event):
subscribers = get_subscribers(event.object_type)
for sub in subscribers:
DB.prepend_to_list(_keys.published(event.object_type, sub), event.id, pipeline=True)
event_dict = deepcopy(event.config)
event_dict.pop('id')
DB.set_hash_value(_keys.data(event.object_type, sub), event.id, str(event_dict), pipeline=True)
DB.publish(event.object_type, event.id, pipeline=True)
|
Publish and event to all subscribers.
- Adds the event id to the published event list for all subscribers.
- Adds the event data to the published event data for all subscribers.
- Publishes the event id notification to all subscribers.
Args:
event (Event): Event object to publish.
|
codesearchnet
|
def resolve_import(name, is_from, is_star):
if (name.startswith('.') or is_builtin(name)):
return None
ret = _resolve_import(name)
if ((ret is None) and is_from and (not is_star)):
(package, _) = name.rsplit('.', 1)
ret = _resolve_import(package)
return ret
|
Use python to resolve an import.
Args:
name: The fully qualified module name.
Returns:
The path to the module source file or None.
|
codesearchnet
|
def send_message(msg: 'EFBMsg') -> Optional['EFBMsg']:
global middlewares, master, slaves
if msg is None:
return
for i in middlewares:
m = i.process_message(msg)
if m is None:
return None
assert m is not None
msg = m
msg.verify()
if msg.deliver_to.channel_id == master.channel_id:
return master.send_message(msg)
elif msg.deliver_to.channel_id in slaves:
return slaves[msg.deliver_to.channel_id].send_message(msg)
else:
raise EFBChannelNotFound(msg)
|
Deliver a message to the destination channel.
Args:
msg (EFBMsg): The message
Returns:
The message sent by the destination channel,
includes the updated message ID from there.
Returns ``None`` if the message is not sent.
|
juraj-google-style
|
def enable_sns_notification(self, region, trailName):
ct = self.session.client('cloudtrail', region_name=region)
ct.update_trail(Name=trailName, SnsTopicName=self.topic_name)
auditlog(event='cloudtrail.enable_sns_notification', actor=self.ns, data={'account': self.account.account_name, 'region': region})
self.log.info('Enabled SNS notifications for trail {} in {}/{}'.format(trailName, self.account.account_name, region))
|
Enable SNS notifications for a Trail
Args:
region (`str`): Name of the AWS region
trailName (`str`): Name of the CloudTrail Trail
Returns:
`None`
|
codesearchnet
|
def grepPDF(self, path):
with open(path, 'rb') as pdf_file_obj:
match = set()
text = ''
pdf_reader = PyPDF2.PdfFileReader(pdf_file_obj)
pages = pdf_reader.numPages
for page in range(pages):
page_obj = pdf_reader.getPage(page)
text += ('\n' + page_obj.extractText())
match.update(set((x.lower() for x in re.findall(self._keywords, text, re.IGNORECASE))))
return match
|
Parse PDF files text content for keywords.
Args:
path: PDF file path.
Returns:
match: set of unique occurrences of every match.
|
codesearchnet
|
def convert_builtin_to_typing(typ):
if getattr(typ, '__origin__', None) in _BUILTINS_TO_TYPING:
args = map(convert_builtin_to_typing, typ.__args__)
typ = _BUILTINS_TO_TYPING[typ.__origin__].copy_with(tuple(args))
return typ
|
Convert recursively a given builtin to a typing object.
Args:
typ (`builtins`): builtin object that exist in _BUILTINS_TO_TYPING.
Returns:
type: The given builtins converted to a type.
|
github-repos
|
def __init__(self, validator_map):
self.validators = dict(validator_map)
v_sorted = sorted(self.validators.items(), key=lambda t: t[0])
self.validator_descriptions = ['{}:<{}>'.format(k, v) for k, v in v_sorted]
self.name = 'dict({})'.format(', '.join(self.validator_descriptions))
self.description = '\nDict options: \n '
self.description += '\n '.join(self.validator_descriptions)
self.kv_regex = re.compile(r'[=:]+')
|
Create a dictonary type from a dictionary of other types
Args:
validator_map -- a mapping from names to types
Examples:
>>> Dict({'a': int, 'b': int})('a:1,b:2')
{'a': 1, 'b': 2}
>>> Dict({'a': str, 'b': int})('a:asdf b=1234')
{'a': 'asdf', 'b': 1234}
>>> Dict({'a': Int() | Keyword('', None), 'b': Int()})('a,b=1')
{'a': None, 'b': 1}
|
juraj-google-style
|
def __call__(self, class_logits, box_regression):
class_logits = cat(class_logits, dim=0)
box_regression = cat(box_regression, dim=0)
device = class_logits.device
if not hasattr(self, "_proposals"):
raise RuntimeError("subsample needs to be called before")
proposals = self._proposals
labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0)
regression_targets = cat(
[proposal.get_field("regression_targets") for proposal in proposals], dim=0
)
classification_loss = F.cross_entropy(class_logits, labels)
sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1)
labels_pos = labels[sampled_pos_inds_subset]
if self.cls_agnostic_bbox_reg:
map_inds = torch.tensor([4, 5, 6, 7], device=device)
else:
map_inds = 4 * labels_pos[:, None] + torch.tensor(
[0, 1, 2, 3], device=device)
box_loss = smooth_l1_loss(
box_regression[sampled_pos_inds_subset[:, None], map_inds],
regression_targets[sampled_pos_inds_subset],
size_average=False,
beta=1,
)
box_loss = box_loss / labels.numel()
return classification_loss, box_loss
|
Computes the loss for Faster R-CNN.
This requires that the subsample method has been called beforehand.
Arguments:
class_logits (list[Tensor])
box_regression (list[Tensor])
Returns:
classification_loss (Tensor)
box_loss (Tensor)
|
juraj-google-style
|
async def request(context, url, timeout=60, method='get', good=(200,), retry=tuple(range(500, 512)), return_type='text', **kwargs):
session = context.session
loggable_url = get_loggable_url(url)
async with async_timeout.timeout(timeout):
log.debug('{} {}'.format(method.upper(), loggable_url))
async with session.request(method, url, **kwargs) as resp:
log.debug('Status {}'.format(resp.status))
message = 'Bad status {}'.format(resp.status)
if (resp.status in retry):
raise ScriptWorkerRetryException(message)
if (resp.status not in good):
raise ScriptWorkerException(message)
if (return_type == 'text'):
return (await resp.text())
elif (return_type == 'json'):
return (await resp.json())
else:
return resp
|
Async aiohttp request wrapper.
Args:
context (scriptworker.context.Context): the scriptworker context.
url (str): the url to request
timeout (int, optional): timeout after this many seconds. Default is 60.
method (str, optional): The request method to use. Default is 'get'.
good (list, optional): the set of good status codes. Default is (200, )
retry (list, optional): the set of status codes that result in a retry.
Default is tuple(range(500, 512)).
return_type (str, optional): The type of value to return. Takes
'json' or 'text'; other values will return the response object.
Default is text.
**kwargs: the kwargs to send to the aiohttp request function.
Returns:
object: the response text() if return_type is 'text'; the response
json() if return_type is 'json'; the aiohttp request response
object otherwise.
Raises:
ScriptWorkerRetryException: if the status code is in the retry list.
ScriptWorkerException: if the status code is not in the retry list or
good list.
|
codesearchnet
|
def CanSplit(self, must_split):
current = self.next_token
previous = current.previous_token
if current.is_pseudo:
return False
if not must_split and subtypes.DICTIONARY_KEY_PART in current.subtypes and (subtypes.DICTIONARY_KEY not in current.subtypes) and (not style.Get('ALLOW_MULTILINE_DICTIONARY_KEYS')):
return False
if not must_split and subtypes.DICTIONARY_VALUE in current.subtypes and (not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE')):
return False
if previous and previous.value == '(' and (current.value == ')'):
token = previous.previous_token
while token:
prev = token.previous_token
if not prev or prev.name not in {'NAME', 'DOT'}:
break
token = token.previous_token
if token and subtypes.DICTIONARY_VALUE in token.subtypes:
if not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE'):
return False
if previous and previous.value == '.' and (current.value == '.'):
return False
return current.can_break_before
|
Determine if we can split before the next token.
Arguments:
must_split: (bool) A newline was required before this token.
Returns:
True if the line can be split before the next token.
|
github-repos
|
def assignment_propagation(node):
n_reads = read_counts(node)
to_remove = []
for succ in gast.walk(node):
if (isinstance(succ, gast.Assign) and isinstance(succ.value, gast.Name) and
len(succ.targets) == 1 and isinstance(succ.targets[0], gast.Name)):
rhs_name = succ.value.id
rhs_defs = [def_[1] for def_ in anno.getanno(succ, 'definitions_in')
if def_[0] == rhs_name]
if (len(rhs_defs) == 1 and isinstance(rhs_defs[0], gast.Assign) and
n_reads[rhs_defs[0]] == 1 and
isinstance(rhs_defs[0].value, gast.Name) and
isinstance(rhs_defs[0].targets[0], gast.Name)):
to_remove.append(rhs_defs[0])
succ.value = rhs_defs[0].value
transformers.Remove(to_remove).visit(node)
anno.clearanno(node)
return node
|
Perform assignment propagation.
Assignment propagation is not a compiler optimization as much as a
readability optimization. If a variable name is used only once, it gets
renamed when possible e.g. `y = x; z = y` will become `z = x`.
Args:
node: The AST to optimize.
Returns:
The optimized AST.
|
juraj-google-style
|
def foreach_worker(self, fn):
results = ray.get([w.foreach_worker.remote(fn) for w in self.workers])
return results
|
Apply the given function to each remote worker.
Returns:
List of results from applying the function.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.