code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = text_outputs[0][:, 0, :]
text_features = self.text_projection(last_hidden_state)
return text_features
|
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`AlignTextModel`].
Examples:
```python
>>> from transformers import AutoTokenizer, AlignModel
>>> model = AlignModel.from_pretrained("kakaobrain/align-base")
>>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> text_features = model.get_text_features(**inputs)
```
|
github-repos
|
def __eq__(self, other):
res = False
if type(self) is type(other) and \
self.name == other.name and \
self.size == other.size:
res = True
return res
|
Two Registers are the same if they are of the same type
(i.e. quantum/classical), and have the same name and size.
Args:
other (Register): other Register
Returns:
bool: are self and other equal.
|
juraj-google-style
|
def label_contains(node, triggers):
for trigger in triggers:
if (trigger.trigger_word in node.label):
(yield TriggerNode(trigger, node))
|
Determine if node contains any of the trigger_words provided.
Args:
node(Node): CFG node to check.
trigger_words(list[Union[Sink, Source]]): list of trigger words to look for.
Returns:
Iterable of TriggerNodes found. Can be multiple because multiple
trigger_words can be in one node.
|
codesearchnet
|
def __call__(self, x: core.Tensor) -> Mapping[str, core.Tensor]:
out = nn_ops.conv2d(x, self.filters, strides=[1, 1, 2, 1], dilations=[1, 1, 1, 1], padding='SAME', data_format='NHWC')
return {'output': out}
|
Performs a 2D convolution operation.
Args:
x: Input tensor to perform convolution on.
Returns:
A map of: output key -> output result.
|
github-repos
|
def _has_valid_abs_ref(self, i, construction_table):
c_table = construction_table
abs_refs = constants.absolute_refs
A = np.empty((3, 3))
row = c_table.index.get_loc(i)
if (row > 2):
message = 'The index {i} is not from the first three, rows'.format
raise ValueError(message(i=i))
for k in range(3):
if (k < row):
A[k] = self.loc[(c_table.iloc[(row, k)], ['x', 'y', 'z'])]
else:
A[k] = abs_refs[c_table.iloc[(row, k)]]
(v1, v2) = ((A[2] - A[1]), (A[1] - A[0]))
K = np.cross(v1, v2)
zero = np.full(3, 0.0)
return (not (np.allclose(K, zero) or np.allclose(v1, zero) or np.allclose(v2, zero)))
|
Checks, if ``i`` uses valid absolute references.
Checks for each index from first to third row of the
``construction_table``, if the references are colinear.
This case has to be specially treated, because the references
are not only atoms (to fix internal degrees of freedom) but also points
in cartesian space called absolute references.
(to fix translational and rotational degrees of freedom)
Args:
i (label): The label has to be in the first three rows.
construction_table (pd.DataFrame):
Returns:
bool:
|
codesearchnet
|
def score_braycurtis(self, term1, term2, **kwargs):
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return (1 - distance.braycurtis(t1_kde, t2_kde))
|
Compute a weighting score based on the "City Block" distance between
the kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
|
codesearchnet
|
def tag_sharding_attribute_for_dequeued_tensors(dequeues, dims):
nest.assert_shallow_structure(dequeues, dims)
return nest.map_structure_up_to(dequeues, _tag_sharding_attribute_for_dequeued_tensor, dequeues, dims)
|
Tags appropriate XLA sharding attribute to the dequeued tensors.
Args:
dequeues: A list of dequeued tensors on TPU.
dims: A list of integer describes how the tensor is partitioned.
Returns:
The same dequeues with appropriate xla_sharding attribute.
|
github-repos
|
def Delete(self, queue, tasks, mutation_pool=None):
if (queue is None):
return
if (mutation_pool is None):
raise ValueError("Mutation pool can't be none.")
mutation_pool.QueueDeleteTasks(queue, tasks)
|
Removes the tasks from the queue.
Note that tasks can already have been removed. It is not an error
to re-delete an already deleted task.
Args:
queue: A queue to clear.
tasks: A list of tasks to remove. Tasks may be Task() instances or integers
representing the task_id.
mutation_pool: A MutationPool object to schedule deletions on.
Raises:
ValueError: Mutation pool was not passed in.
|
codesearchnet
|
def getcallargs_forhints(func, *type_args, **type_kwargs):
try:
signature = get_signature(func)
except ValueError as e:
logging.warning('Could not get signature for function: %s: %s', func, e)
return {}
try:
bindings = signature.bind(*type_args, **type_kwargs)
except TypeError as e:
raise TypeCheckError(e)
bound_args = bindings.arguments
for param in signature.parameters.values():
if param.name in bound_args:
if param.kind == param.VAR_POSITIONAL:
bound_args[param.name] = _normalize_var_positional_hint(bound_args[param.name])
elif param.kind == param.VAR_KEYWORD:
bound_args[param.name] = _normalize_var_keyword_hint(bound_args[param.name], param.name)
elif param.annotation != param.empty:
bound_args[param.name] = param.annotation
elif param.kind == param.VAR_POSITIONAL:
bound_args[param.name] = _ANY_VAR_POSITIONAL
elif param.kind == param.VAR_KEYWORD:
bound_args[param.name] = _ANY_VAR_KEYWORD
elif param.default is not param.empty:
bound_args[param.name] = typehints.Any
else:
raise ValueError('Unexpected unbound parameter: %s' % param.name)
return dict(bound_args)
|
Bind type_args and type_kwargs to func.
Works like inspect.getcallargs, with some modifications to support type hint
checks.
For unbound args, will use annotations and fall back to Any (or variants of
Any).
Returns:
A mapping from parameter name to argument.
|
github-repos
|
def poll(self, timeout=None):
p = select.poll()
p.register(self._fd, (select.POLLIN | select.POLLPRI))
events = p.poll(int((timeout * 1000)))
if (len(events) > 0):
return True
return False
|
Poll for data available for reading from the serial port.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking poll, or negative or None for a blocking poll. Default is
a blocking poll.
Args:
timeout (int, float, None): timeout duration in seconds.
Returns:
bool: ``True`` if data is available for reading from the serial port, ``False`` if not.
|
codesearchnet
|
def _SetExtractionPreferredTimeZone(self, knowledge_base):
if self._preferred_time_zone:
try:
knowledge_base.SetTimeZone(self._preferred_time_zone)
except ValueError:
logger.warning(
'Unsupported time zone: {0:s}, defaulting to {1:s}'.format(
self._preferred_time_zone, knowledge_base._time_zone.zone))
|
Sets the preferred time zone before extraction.
Args:
knowledge_base (KnowledgeBase): contains information from the source
data needed for parsing.
|
juraj-google-style
|
def __init__(self, breakpoints_func=None):
self._run_key_to_original_graphs = dict()
self._run_key_to_debug_graphs = dict()
if breakpoints_func:
assert callable(breakpoints_func)
self._breakpoints_func = breakpoints_func
|
Constructor of RunStates.
Args:
breakpoint_func: A callable of the signatuer:
def breakpoint_func():
which returns all the currently activated breakpoints.
|
juraj-google-style
|
def set_s3_bucket(self, region, name, bucketName):
ct = self.session.client('cloudtrail', region_name=region)
ct.update_trail(Name=name, S3BucketName=bucketName)
auditlog(
event='cloudtrail.set_s3_bucket',
actor=self.ns,
data={
'account': self.account.account_name,
'region': region
}
)
self.log.info('Updated S3BucketName to {} for {} in {}/{}'.format(
bucketName,
name,
self.account.account_name,
region
))
|
Sets the S3 bucket location for logfile delivery
Args:
region (`str`): Name of the AWS region
name (`str`): Name of the CloudTrail Trail
bucketName (`str`): Name of the S3 bucket to deliver log files to
Returns:
`None`
|
juraj-google-style
|
def search_variants(self, variant_ids):
query = {'_id': {'$in': variant_ids}}
return self.db.variant.find(query)
|
Make a batch search for variants in the database
Args:
variant_ids(list(str)): List of variant ids
Returns:
res(pymngo.Cursor(variant_obj)): The result
|
juraj-google-style
|
def verify_json(self, json, user_key, user_id, device_id):
try:
signatures = json.pop('signatures')
except KeyError:
return False
key_id = 'ed25519:{}'.format(device_id)
try:
signature_base64 = signatures[user_id][key_id]
except KeyError:
json['signatures'] = signatures
return False
unsigned = json.pop('unsigned', None)
try:
olm.ed25519_verify(user_key, encode_canonical_json(json), signature_base64)
success = True
except olm.utility.OlmVerifyError:
success = False
json['signatures'] = signatures
if unsigned:
json['unsigned'] = unsigned
return success
|
Verifies a signed key object's signature.
The object must have a 'signatures' key associated with an object of the form
`user_id: {key_id: signature}`.
Args:
json (dict): The JSON object to verify.
user_key (str): The public ed25519 key which was used to sign the object.
user_id (str): The user who owns the device.
device_id (str): The device who owns the key.
Returns:
True if the verification was successful, False if not.
|
codesearchnet
|
def _FlushCache(cls, format_categories):
if definitions.FORMAT_CATEGORY_ARCHIVE in format_categories:
cls._archive_remainder_list = None
cls._archive_scanner = None
cls._archive_store = None
if definitions.FORMAT_CATEGORY_COMPRESSED_STREAM in format_categories:
cls._compressed_stream_remainder_list = None
cls._compressed_stream_scanner = None
cls._compressed_stream_store = None
if definitions.FORMAT_CATEGORY_FILE_SYSTEM in format_categories:
cls._file_system_remainder_list = None
cls._file_system_scanner = None
cls._file_system_store = None
if definitions.FORMAT_CATEGORY_STORAGE_MEDIA_IMAGE in format_categories:
cls._storage_media_image_remainder_list = None
cls._storage_media_image_scanner = None
cls._storage_media_image_store = None
if definitions.FORMAT_CATEGORY_VOLUME_SYSTEM in format_categories:
cls._volume_system_remainder_list = None
cls._volume_system_scanner = None
cls._volume_system_store = None
|
Flushes the cached objects for the specified format categories.
Args:
format_categories (set[str]): format categories.
|
juraj-google-style
|
def get_country_by_name(self, country_name) -> 'Country':
VALID_STR.validate(country_name, 'get_country_by_name', exc=ValueError)
if country_name not in self._countries_by_name.keys():
for country in self.countries:
if country.country_name == country_name:
return country
raise ValueError(country_name)
else:
return self._countries_by_name[country_name]
|
Gets a country in this coalition by its name
Args:
country_name: country name
Returns: Country
|
juraj-google-style
|
def close(self, virtual_account_id, data={}, **kwargs):
url = '{}/{}'.format(self.base_url, virtual_account_id)
data['status'] = 'closed'
return self.patch_url(url, data, **kwargs)
|
Close Virtual Account from given Id
Args:
virtual_account_id :
Id for which Virtual Account objects has to be Closed
|
codesearchnet
|
def check_subword_sampling(tokenizer: PreTrainedTokenizer, text: Optional[str]=None, test_sentencepiece_ignore_case: bool=True) -> None:
text = 'This is a test for subword regularization.' if text is None else text
if test_sentencepiece_ignore_case:
text = text.lower()
tokens_list = []
for _ in range(5):
tokens_list.append(tokenizer.tokenize(text))
combinations = itertools.combinations(tokens_list, 2)
subword_sampling_found = False
for combination in combinations:
if combination[0] != combination[1]:
subword_sampling_found = True
unittest.TestCase().assertTrue(subword_sampling_found)
for tokens in tokens_list:
if test_sentencepiece_ignore_case:
unittest.TestCase().assertEqual(text, tokenizer.convert_tokens_to_string(tokens).lower())
else:
unittest.TestCase().assertEqual(text, tokenizer.convert_tokens_to_string(tokens))
|
Check if the tokenizer generates different results when subword regularization is enabled.
Subword regularization augments training data with subword sampling.
This has a random component.
Args:
tokenizer: The tokenizer to check.
text: The text to use for the checks.
test_sentencepiece_ignore_case: See `TokenizerTesterMixin.test_sentencepiece_ignore_case`.
|
github-repos
|
def __init__(self, project: str=None, retry: Retry=None, timeout: float=120, metadata: Sequence[Tuple[str, str]]=(), catalog_name: str='default_catalog', event_store: str='default_event_store'):
self.project = project
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.catalog_name = catalog_name
self.event_store = event_store
|
Initializes a :class:`WriteUserEvent` transform.
Args:
project (str): Optional. GCP project name in which the catalog
data will be imported.
retry: Optional. Designation of what
errors, if any, should be retried.
timeout (float): Optional. The amount of time, in seconds, to wait
for the request to complete.
metadata: Optional. Strings which
should be sent along with the request as metadata.
catalog_name (str): Optional. Name of the catalog.
Default: 'default_catalog'
event_store (str): Optional. Name of the event store.
Default: 'default_event_store'
|
github-repos
|
def get_path_from_query_string(req):
if req.args.get('path') is None:
raise exceptions.UserError('Path not found in query string')
return req.args.get('path')
|
Gets path from query string
Args:
req (flask.request): Request object from Flask
Returns:
path (str): Value of "path" parameter from query string
Raises:
exceptions.UserError: If "path" is not found in query string
|
juraj-google-style
|
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
|
Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
juraj-google-style
|
def update_panel(self, panel_obj, version=None, date_obj=None):
LOG.info('Updating panel %s', panel_obj['panel_name'])
date = panel_obj['date']
if version:
LOG.info('Updating version from {0} to version {1}'.format(panel_obj['version'], version))
panel_obj['version'] = version
if date_obj:
date = date_obj
else:
date = (date_obj or dt.datetime.now())
panel_obj['date'] = date
updated_panel = self.panel_collection.find_one_and_replace({'_id': panel_obj['_id']}, panel_obj, return_document=pymongo.ReturnDocument.AFTER)
return updated_panel
|
Replace a existing gene panel with a new one
Keeps the object id
Args:
panel_obj(dict)
version(float)
date_obj(datetime.datetime)
Returns:
updated_panel(dict)
|
codesearchnet
|
def list(sandbox_name, results=15, start=0):
result = util.callm("%s/%s" % ('sandbox', 'list'), {'sandbox':sandbox_name, 'results': results, 'start': start})
assets = result['response']['assets']
start = result['response']['start']
total = result['response']['total']
return ResultList(assets, start, total)
|
Returns a list of all assets available in this sandbox
Args:
sandbox_name (str): A string representing the name of the sandbox
Kwargs:
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of asset dictionaries
Example:
>>> sandbox.list('bluenote')
[{}, {}]
>>>
|
juraj-google-style
|
def get_contract_data(self, contract_name):
contract_data_path = self.output_dir + '/{0}.json'.format(contract_name)
with open(contract_data_path, 'r') as contract_data_file:
contract_data = json.load(contract_data_file)
abi = contract_data['abi']
bytecode = contract_data['evm']['bytecode']['object']
return abi, bytecode
|
Returns the contract data for a given contract
Args:
contract_name (str): Name of the contract to return.
Returns:
str, str: ABI and bytecode of the contract
|
juraj-google-style
|
def __init__(self, batch_env):
self._batch_env = batch_env
batch_dims = (len(self._batch_env),)
observ_shape = self._parse_shape(self._batch_env.observation_space)
observ_dtype = self._parse_dtype(self._batch_env.observation_space)
action_shape = self._parse_shape(self._batch_env.action_space)
action_dtype = self._parse_dtype(self._batch_env.action_space)
with tf.variable_scope('env_temporary'):
self._observ = tf.Variable(
lambda: tf.zeros(batch_dims + observ_shape, observ_dtype),
name='observ', trainable=False)
self._action = tf.Variable(
lambda: tf.zeros(batch_dims + action_shape, action_dtype),
name='action', trainable=False)
self._reward = tf.Variable(
lambda: tf.zeros(batch_dims, tf.float32),
name='reward', trainable=False)
self._done = tf.Variable(
lambda: tf.cast(tf.ones(batch_dims), tf.bool),
name='done', trainable=False)
|
Batch of environments inside the TensorFlow graph.
Args:
batch_env: Batch environment.
|
juraj-google-style
|
def create_temp_grad(node, namer, tangent=False):
if not isinstance(node, (gast.Subscript, gast.Name)):
raise TypeError
def _name_temp_grad(node):
name = namer.temp_grad(node.id, tangent)
temp_node = gast.Name(id=name, annotation=None, ctx=None)
return temp_node
if isinstance(node, gast.Subscript):
temp_node = _name_temp_grad(node.value)
else:
temp_node = _name_temp_grad(node)
anno.setanno(temp_node, 'temp_adjoint_var', node)
return temp_node
|
Create a variable to store partial gradients.
Args:
node: See `create_grad`.
namer: See `create_grad`.
tangent: See `create_grad`.
Returns:
node: See `create_grad`. Returns a node representing the partial gradient.
Note that this is always a simple variable e.g. the temporary partial
of `x[i]` can be something like `_dxi`.
Nodes are given an annotation `temp_adjoint_var`.
|
juraj-google-style
|
def SetConfiguredUsers(self, users):
prefix = self.logger.name + '-'
with tempfile.NamedTemporaryFile(
mode='w', prefix=prefix, delete=True) as updated_users:
updated_users_file = updated_users.name
for user in users:
updated_users.write(user + '\n')
updated_users.flush()
if not os.path.exists(self.google_users_dir):
os.makedirs(self.google_users_dir)
shutil.copy(updated_users_file, self.google_users_file)
file_utils.SetPermissions(self.google_users_file, mode=0o600, uid=0, gid=0)
|
Set the list of configured Google user accounts.
Args:
users: list, the username strings of the Linux accounts.
|
juraj-google-style
|
def call_remoteckan(self, *args, **kwargs):
requests_kwargs = kwargs.get('requests_kwargs', dict())
credentials = self._get_credentials()
if credentials:
requests_kwargs['auth'] = credentials
kwargs['requests_kwargs'] = requests_kwargs
apikey = kwargs.get('apikey', self.get_api_key())
kwargs['apikey'] = apikey
return self.remoteckan().call_action(*args, **kwargs)
|
Calls the remote CKAN
Args:
*args: Arguments to pass to remote CKAN call_action method
**kwargs: Keyword arguments to pass to remote CKAN call_action method
Returns:
Dict: The response from the remote CKAN call_action method
|
codesearchnet
|
def filepaths_in_dir(path):
filepaths = []
for root, directories, filenames in os.walk(path):
for filename in filenames:
filepath = os.path.join(root, filename)
filepath = filepath.replace(path, '').lstrip('/')
filepaths.append(filepath)
return filepaths
|
Find all files in a directory, and return the relative paths to those files.
Args:
path (str): the directory path to walk
Returns:
list: the list of relative paths to all files inside of ``path`` or its
subdirectories.
|
juraj-google-style
|
def DeregisterCredentials(cls, credentials):
if credentials.type_indicator not in cls._credentials:
raise KeyError(
'Credential object not set for type indicator: {0:s}.'.format(
credentials.type_indicator))
del cls._credentials[credentials.type_indicator]
|
Deregisters a path specification credentials.
Args:
credentials (Credentials): credentials.
Raises:
KeyError: if credential object is not set for the corresponding
type indicator.
|
juraj-google-style
|
def get_unscaled_gradients(self, grads):
loss_scale_reciprocal = 1.0 / self.loss_scale
return [_multiply_gradient(g, loss_scale_reciprocal) if g is not None else None for g in grads]
|
Unscales the gradients by the loss scale.
This method is only needed if you compute gradients manually, e.g. with
`tf.GradientTape`. In that case, call this method to unscale the gradients
after computing them with `tf.GradientTape`. If you use
`LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss
scaling is automatically applied and this method is unneeded.
If this method is called, `get_scaled_loss` should also be called. See
the `tf.keras.mixed_precision.LossScaleOptimizer` doc for an
example.
Args:
grads: A list of tensors, each which will be divided by the loss scale.
Can have None values, which are ignored.
Returns:
A new list the same size as `grads`, where every non-None value in `grads`
is divided by `LossScaleOptimizer.loss_scale`.
|
github-repos
|
def get_fieldset_index(fieldsets, index_or_name):
if isinstance(index_or_name, six.integer_types):
return index_or_name
for (key, value) in enumerate(fieldsets):
if (value[0] == index_or_name):
return key
raise KeyError("Key not found: '{}'.".format(index_or_name))
|
Return the index of a fieldset in the ``fieldsets`` list.
Args:
fieldsets (list): The original ``fieldsets`` list.
index_or_name (int or str): The value of the reference element, or directly its numeric index.
Returns:
(int) The index of the fieldset in the ``fieldsets`` list.
|
codesearchnet
|
def make_ordered_column_names(include_label=True):
result = ['clicked'] if include_label else []
for name in _INTEGER_COLUMN_NAMES:
result.append(name)
for name in _CATEGORICAL_COLUMN_NAMES:
result.append(name)
return result
|
Returns the column names in the dataset in the order as they appear.
Args:
include_label: Indicates whether the label feature should be included.
Returns:
A list of column names in the dataset.
|
github-repos
|
def input_defs(self, transitive: bool=True) -> List['SymbolDefinition']:
parent_func = self.parent_func()
var_producers: Dict[str, Set[SymbolDefinition]] = {arg: set() for arg in parent_func.args}
var_producers[parent_func.name] = set()
def analyze_var_producers(k: pg.KeyPath, v: Any, p: pg.Symbolic):
del k, p
if v is self:
return pg.TraverseAction.STOP
if isinstance(v, SymbolDefinition):
var_entry = set([v])
if transitive:
for var_name in v.input_vars():
var_entry.update(var_producers[var_name])
var_producers[v.name] = var_entry
if v is not parent_func and isinstance(v, Function):
return pg.TraverseAction.CONTINUE
return pg.TraverseAction.ENTER
pg.traverse(parent_func, analyze_var_producers)
dependencies: Set[SymbolDefinition] = set()
for var_name in self.input_vars():
if var_name not in var_producers:
raise ValueError(f"Undefined variable {repr(var_name)} found in function '{parent_func.name}' line
dependencies.update(var_producers[var_name])
return sorted(dependencies, key=lambda x: x.line_number())
|
Returns the symbol definitions for the inputs of this code entity.
Args:
transitive: If True, transitive inputs will be included.
Otherwise, only the direct dependencies will be included.
Returns:
A list of `SymbolDefinition` in their declaration order that produce
the inputs required for current code entity.
|
github-repos
|
def replace_case(self, case_obj):
LOG.info('Saving case %s', case_obj['_id'])
case_obj['updated_at'] = (datetime.datetime.now(),)
updated_case = self.case_collection.find_one_and_replace({'_id': case_obj['_id']}, case_obj, return_document=pymongo.ReturnDocument.AFTER)
return updated_case
|
Replace a existing case with a new one
Keeps the object id
Args:
case_obj(dict)
Returns:
updated_case(dict)
|
codesearchnet
|
def register_extension(self, group, name, extension):
if isinstance(extension, str):
name, extension = self.load_extension(extension)[0]
if group not in self._registered_extensions:
self._registered_extensions[group] = []
self._registered_extensions[group].append((name, extension))
|
Register an extension.
Args:
group (str): The type of the extension
name (str): A name for the extension
extension (str or class): If this is a string, then it will be
interpreted as a path to import and load. Otherwise it
will be treated as the extension object itself.
|
juraj-google-style
|
def serve(self, server=None):
if server is None:
from wsgiref.simple_server import make_server
server = lambda app: make_server('', 8000, app).serve_forever()
print('Listening on 0.0.0.0:8000')
try:
server(self)
finally:
server.socket.close()
|
Serve app using wsgiref or provided server.
Args:
- server (callable): An callable
|
juraj-google-style
|
def _log_submission(submission, student_item):
logger.info(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.format(
submission_uuid=submission["uuid"],
course_id=student_item["course_id"],
item_id=student_item["item_id"],
anonymous_student_id=student_item["student_id"]
)
)
|
Log the creation of a submission.
Args:
submission (dict): The serialized submission model.
student_item (dict): The serialized student item model.
Returns:
None
|
juraj-google-style
|
def db_en020(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `db_en020`'.format(value))
self._db_en020 = value
|
Corresponds to IDD Field `db_en020`
mean coincident dry-bulb temperature to
Enthalpy corresponding to 2.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `db_en020`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def total_covariance_fn(self):
return self._total_covariance_fn
|
The total covariance of the process between two times.
Returns:
A Python callable returning the integrated covariances between two times.
The callable accepts two real `Tensor` arguments. The first argument
is the left end point and the second is the right end point of the time
interval for which the total covariance is needed.
The shape of the two input arguments and their dtypes must match.
The output of the callable is a `Tensor` of shape
`times_shape + [dim, dim]` containing the integrated covariance matrix
between the start times and end times.
|
github-repos
|
def run(self, dag):
num_dag_qubits = sum([qreg.size for qreg in dag.qregs.values()])
if (num_dag_qubits > self.coupling_map.size()):
raise TranspilerError('Number of qubits greater than device.')
self.property_set['layout'] = Layout.generate_trivial_layout(*dag.qregs.values())
|
Pick a layout by assigning n circuit qubits to device qubits 0, .., n-1.
Args:
dag (DAGCircuit): DAG to find layout for.
Raises:
TranspilerError: if dag wider than self.coupling_map
|
codesearchnet
|
def __init__(self,
input_file="mol.qin",
output_file="mol.qout",
rca_gdm_thresh=1.0E-3,
scf_max_cycles=200):
self.input_file = input_file
self.output_file = output_file
self.scf_max_cycles = scf_max_cycles
self.geom_max_cycles = geom_max_cycles
self.qcinp = QCInput.from_file(self.input_file)
self.outdata = None
self.errors = None
self.qchem_job = qchem_job
|
Initializes the error handler from a set of input and output files.
Args:
input_file (str): Name of the QChem input file.
output_file (str): Name of the QChem output file.
rca_gdm_thresh (float): The threshold for the prior scf algorithm.
If last deltaE is larger than the threshold try RCA_DIIS
first, else, try DIIS_GDM first.
scf_max_cycles (int): The max iterations to set to fix SCF failure.
|
juraj-google-style
|
def from_file(filename, file_format="xyz"):
mols = list(pb.readfile(str(file_format), str(filename)))
return BabelMolAdaptor(mols[0].OBMol)
|
Uses OpenBabel to read a molecule from a file in all supported formats.
Args:
filename: Filename of input file
file_format: String specifying any OpenBabel supported formats.
Returns:
BabelMolAdaptor object
|
juraj-google-style
|
def output(self, value):
return super(Source, self).output(self.stream, value)
|
SPL output port assignment expression.
Arguments:
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
|
juraj-google-style
|
def AddArguments(cls, argument_group):
argument_group.add_argument(
'--name', '--timeline_name', '--timeline-name',
dest='timeline_name', type=str, action='store',
default=cls._DEFAULT_NAME, required=False, help=(
'The name of the timeline in Timesketch. Default: '
'hostname if present in the storage file. If no hostname '
'is found then manual input is used.'))
argument_group.add_argument(
'--index', dest='index', type=str, action='store',
default=cls._DEFAULT_UUID, required=False, help=(
'The name of the Elasticsearch index. Default: Generate a random '
'UUID'))
argument_group.add_argument(
'--flush_interval', '--flush-interval', dest='flush_interval',
type=int, action='store', default=cls._DEFAULT_FLUSH_INTERVAL,
required=False, help=(
'The number of events to queue up before sent in bulk '
'to Elasticsearch.'))
argument_group.add_argument(
'--doc_type', dest='document_type', type=str,
action='store', default=cls._DEFAULT_DOCUMENT_TYPE, help=(
'Name of the document type that will be used in ElasticSearch.'))
argument_group.add_argument(
'--username', dest='username', type=str,
action='store', default=cls._DEFAULT_USERNAME, help=(
'Username of a Timesketch user that will own the timeline.'))
|
Adds command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
|
juraj-google-style
|
def get(self, vrf=None):
match = '^router ospf .*'
if vrf:
match += (' vrf %s' % vrf)
config = self.get_block(match)
if (not config):
return None
response = dict()
response.update(self._parse_router_id(config))
response.update(self._parse_vrf(config))
response.update(self._parse_networks(config))
response.update(self._parse_ospf_process_id(config))
response.update(self._parse_redistribution(config))
response.update(self._parse_shutdown(config))
return response
|
Returns the OSPF routing configuration
Args:
vrf (str): VRF name to return OSPF routing config for
Returns:
dict:
keys: router_id (int): OSPF router-id
vrf (str): VRF of the OSPF process
networks (dict): All networks that
are advertised in OSPF
ospf_process_id (int): OSPF proc id
redistribution (dict): All protocols that
are configured to be
redistributed in OSPF
shutdown (bool): Gives the current shutdown
off the process
|
codesearchnet
|
def MapFile(self, key_path_prefix, registry_file):
self._registry_files[key_path_prefix.upper()] = registry_file
registry_file.SetKeyPathPrefix(key_path_prefix)
|
Maps the Windows Registry file to a specific key path prefix.
Args:
key_path_prefix (str): key path prefix.
registry_file (WinRegistryFile): Windows Registry file.
|
codesearchnet
|
def _shape_union(shapes):
return Shape(sorted(list(set(sum([s.dims for s in shapes], [])))))
|
A shape containing the union of all dimensions in the input shapes.
Args:
shapes: a list of Shapes
Returns:
a Shape
|
codesearchnet
|
def get_problem_name(base_name, was_reversed=False, was_copy=False):
if any(base_name.endswith(suffix) for suffix in ("_rev", "_copy")):
raise ValueError("`base_name` cannot end in '_rev' or '_copy'")
name = base_name
if was_copy:
name = "%s_copy" % name
if was_reversed:
name = "%s_rev" % name
return name
|
Construct a problem name from base and reversed/copy options.
Inverse of `parse_problem_name`.
Args:
base_name: base problem name. Should not end in "_rev" or "_copy"
was_reversed: if the problem is to be reversed
was_copy: if the problem is to be copied
Returns:
string name consistent with use with `parse_problem_name`.
Raises:
ValueError if `base_name` ends with "_rev" or "_copy"
|
juraj-google-style
|
def raw_dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
val = row[idx]
name = col[0]
if name == Field.Time_Stamp or name == Field.Meter_Address:
d[name] = str(val)
continue
if name == "Raw_A" or name == "Raw_B":
d[name] = str(val)
continue
return d
|
Sqlite callback accepting the cursor and the original row as a tuple.
Simple return of JSON safe types, including raw read hex strings.
Args:
cursor (sqlite cursor): Original cursory
row (sqlite row tuple): Original row.
Returns:
dict: modified row.
|
juraj-google-style
|
def fetch(args: List[str], env: Dict[str, str] = None,
encoding: str = sys.getdefaultencoding()) -> str:
stdout, _ = run(args, env=env, capture_stdout=True,
echo_stdout=False, encoding=encoding)
log.debug(stdout)
return stdout
|
Run a command and returns its stdout.
Args:
args: the command-line arguments
env: the operating system environment to use
encoding: the encoding to use for ``stdout``
Returns:
the command's ``stdout`` output
|
juraj-google-style
|
def SetUpdateTimestamp(self, value):
if value is None or isinstance(value, int):
self._last_update_timestamp = value
else:
raise TypeError('timestamp can only be int or None, not %r', value)
|
Set the last update timestamp of this map.
Args:
value: An int containing seconds since epoch, or None.
Raises:
TypeError: The argument is not an int or None.
|
github-repos
|
def repl_member_add(self, params):
repl_config = self.config
member_id = max([member['_id'] for member in repl_config['members']]) + 1
member_config = self.member_create(params, member_id)
repl_config['members'].append(member_config)
if not self.repl_update(repl_config):
self.member_del(member_id, reconfig=True)
raise ReplicaSetError("Could not add member to ReplicaSet.")
return member_id
|
create new mongod instances and add it to the replica set.
Args:
params - mongod params
return True if operation success otherwise False
|
juraj-google-style
|
async def pull(self, from_image: str, *, auth: Optional[Union[(MutableMapping, str, bytes)]]=None, tag: str=None, repo: str=None, stream: bool=False) -> Mapping:
image = from_image
params = {'fromImage': image}
headers = {}
if repo:
params['repo'] = repo
if tag:
params['tag'] = tag
if (auth is not None):
(registry, has_registry_host, _) = image.partition('/')
if (not has_registry_host):
raise ValueError('Image should have registry host when auth information is provided')
headers['X-Registry-Auth'] = compose_auth_header(auth, registry)
response = (await self.docker._query('images/create', 'POST', params=params, headers=headers))
return (await json_stream_result(response, stream=stream))
|
Similar to `docker pull`, pull an image locally
Args:
fromImage: name of the image to pull
repo: repository name given to an image when it is imported
tag: if empty when pulling an image all tags
for the given image to be pulled
auth: special {'auth': base64} pull private repo
|
codesearchnet
|
def from_filenames(filenames, transformations=None, primitive=True, extend_collection=False):
allcifs = []
for fname in filenames:
with open(fname, 'r') as f:
allcifs.append(f.read())
return CifTransmuter('\n'.join(allcifs), transformations, primitive=primitive, extend_collection=extend_collection)
|
Generates a TransformedStructureCollection from a cif, possibly
containing multiple structures.
Args:
filenames: List of strings of the cif files
transformations: New transformations to be applied to all
structures
primitive: Same meaning as in __init__.
extend_collection: Same meaning as in __init__.
|
codesearchnet
|
async def run_commentator(video_mode: str) -> None:
pya = pyaudio.PyAudio()
video_mode_enum = video.VideoMode(video_mode)
input_processor = video.VideoIn(video_mode=video_mode_enum) + audio_io.PyAudioIn(pya)
async def input_stream():
try:
while True:
await asyncio.sleep(1)
finally:
yield content_api.ProcessorPart('Ending the stream')
commentator_processor = commentator.create_live_commentator(API_KEY)
consume_output = audio_io.PyAudioOut(pya)
live_commentary_agent = input_processor + commentator_processor + consume_output
async for _ in live_commentary_agent(input_stream()):
pass
|
Runs a live commentator in a CLI environment.
The commentator is run from a CLI environment. The audio and video input and
output are connected to the local machine's default input and output devices.
Args:
video_mode: The video mode to use for the video. Can be CAMERA or SCREEN.
|
github-repos
|
def _CheckIsFile(self, file_entry):
if (definitions.FILE_ENTRY_TYPE_FILE not in self._file_entry_types):
return False
return file_entry.IsFile()
|
Checks the is_file find specification.
Args:
file_entry (FileEntry): file entry.
Returns:
bool: True if the file entry matches the find specification, False if not.
|
codesearchnet
|
def attribute_string(self):
escape_map = {ord('='): '%3D', ord(','): '%2C', ord(';'): '%3B', ord('&'): '%26', ord('\t'): '%09'}
list_type = type(list())
attrs = self.attributes
if (type(attrs) is OrderedDict):
reserved_attrs = []
other_attrs = []
for (name, value) in attrs.items():
name = name.translate(escape_map)
if (type(value) == list_type):
value = ','.join([i.translate(escape_map) for i in value])
else:
value = value.translate(escape_map)
out_attr = '{0}={1}'.format(name, value)
if name[0].isupper():
reserved_attrs.append(out_attr)
else:
other_attrs.append(out_attr)
out_attrs = ';'.join((reserved_attrs + other_attrs))
else:
out_attrs = attrs
return out_attrs
|
Restore an entries attributes in original format, escaping reserved
characters when necessary
Returns:
str: escaped attributes as tag=value pairs, separated by semi-colon
|
codesearchnet
|
def _deserialization_helper(self, state, ray_forking):
worker = ray.worker.get_global_worker()
worker.check_connected()
if state["ray_forking"]:
actor_handle_id = state["actor_handle_id"]
else:
actor_handle_id = compute_actor_handle_id_non_forked(
state["actor_handle_id"], worker.current_task_id)
self.__init__(
state["actor_id"],
state["module_name"],
state["class_name"],
state["actor_cursor"],
state["actor_method_names"],
state["method_signatures"],
state["method_num_return_vals"],
state["actor_creation_dummy_object_id"],
state["actor_method_cpus"],
state["actor_driver_id"],
actor_handle_id=actor_handle_id)
|
This is defined in order to make pickling work.
Args:
state: The serialized state of the actor handle.
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
|
juraj-google-style
|
def samefile(path1, path2):
path1, path1_is_storage = format_and_is_storage(path1)
path2, path2_is_storage = format_and_is_storage(path2)
if not path1_is_storage and not path2_is_storage:
return os_path_samefile(path1, path2)
if not path1_is_storage or not path2_is_storage:
return False
with handle_os_exceptions():
system = get_instance(path1)
if system is not get_instance(path2):
return False
elif system.relpath(path1) != system.relpath(path2):
return False
return True
|
Return True if both pathname arguments refer to the same file or directory.
Equivalent to "os.path.samefile".
Args:
path1 (path-like object): Path or URL.
path2 (path-like object): Path or URL.
Returns:
bool: True if same file or directory.
|
juraj-google-style
|
def main(jlink_serial, device):
buf = StringIO.StringIO()
jlink = pylink.JLink(log=buf.write, detailed_log=buf.write)
jlink.open(serial_no=jlink_serial)
jlink.set_tif(pylink.enums.JLinkInterfaces.SWD)
jlink.connect(device, verbose=True)
sys.stdout.write(('ARM Id: %d\n' % jlink.core_id()))
sys.stdout.write(('CPU Id: %d\n' % jlink.core_cpu()))
sys.stdout.write(('Core Name: %s\n' % jlink.core_name()))
sys.stdout.write(('Device Family: %d\n' % jlink.device_family()))
|
Prints the core's information.
Args:
jlink_serial (str): the J-Link serial number
device (str): the target CPU
Returns:
Always returns ``0``.
Raises:
JLinkException: on error
|
codesearchnet
|
def from_graph(cls, graph, linear_energy_ranges, quadratic_energy_ranges):
get_env().enable_infix_notation = True
theta = cls.empty(dimod.SPIN)
theta.add_offset(Symbol('offset', REAL))
def Linear(v):
'Create a Symbol for the linear bias including the energy range\n constraints.'
bias = Symbol('h_{}'.format(v), REAL)
(min_, max_) = linear_energy_ranges[v]
theta.assertions.add(LE(bias, limitReal(max_)))
theta.assertions.add(GE(bias, limitReal(min_)))
return bias
def Quadratic(u, v):
'Create a Symbol for the quadratic bias including the energy range\n constraints.'
bias = Symbol('J_{},{}'.format(u, v), REAL)
if ((v, u) in quadratic_energy_ranges):
(min_, max_) = quadratic_energy_ranges[(v, u)]
else:
(min_, max_) = quadratic_energy_ranges[(u, v)]
theta.assertions.add(LE(bias, limitReal(max_)))
theta.assertions.add(GE(bias, limitReal(min_)))
return bias
for v in graph.nodes:
theta.add_variable(v, Linear(v))
for (u, v) in graph.edges:
theta.add_interaction(u, v, Quadratic(u, v))
return theta
|
Create Theta from a graph and energy ranges.
Args:
graph (:obj:`networkx.Graph`):
Provides the structure for Theta.
linear_energy_ranges (dict):
A dict of the form {v: (min, max), ...} where min and max are the
range of values allowed to v.
quadratic_energy_ranges (dict):
A dict of the form {(u, v): (min, max), ...} where min and max
are the range of values allowed to (u, v).
Returns:
:obj:`.Theta`
|
codesearchnet
|
def from_dict(cls, pwinput_dict):
pwinput = cls(structure=Structure.from_dict(pwinput_dict['structure']),
pseudo=pwinput_dict['pseudo'],
control=pwinput_dict['sections']['control'],
system=pwinput_dict['sections']['system'],
electrons=pwinput_dict['sections']['electrons'],
ions=pwinput_dict['sections']['ions'],
cell=pwinput_dict['sections']['cell'],
kpoints_mode=pwinput_dict['kpoints_mode'],
kpoints_grid=pwinput_dict['kpoints_grid'],
kpoints_shift=pwinput_dict['kpoints_shift'])
return pwinput
|
Load a PWInput object from a dictionary.
Args:
pwinput_dict (dict): dictionary with PWInput data
Returns:
PWInput object
|
juraj-google-style
|
def _check_positional_parameters(method_signature, base_signature, is_subtype, ctx):
check_types = True
for base_param_pos, base_param_name in enumerate(base_signature.param_names):
if base_param_pos == 0 or base_param_pos < base_signature.posonly_count:
continue
if base_param_name == '_':
continue
if base_param_pos < method_signature.posonly_count:
return SignatureError(SignatureErrorType.POSITIONAL_PARAMETER_COUNT_MISMATCH, 'Too many positional-only parameters in overriding method.')
elif base_param_pos < len(method_signature.param_names):
method_param_name = method_signature.param_names[base_param_pos]
else:
if method_signature.varargs_name:
break
return SignatureError(SignatureErrorType.POSITIONAL_PARAMETER_COUNT_MISMATCH, 'Not enough positional parameters in overriding method.')
method_param_name = method_signature.param_names[base_param_pos]
if method_param_name not in (base_param_name, '_'):
log.warning('Name mismatch for parameter %r.', base_param_name)
if not ctx.options.overriding_renamed_parameter_count_checks:
return None
check_types = False
remaining_method_params = method_signature.param_names[len(base_signature.param_names):] if not base_signature.varargs_name else []
for method_param_name in remaining_method_params:
if method_param_name in base_signature.kwonly_params:
continue
if method_param_name not in method_signature.defaults:
return SignatureError(SignatureErrorType.DEFAULT_PARAMETER_MISMATCH, f"Parameter '{method_param_name}' must have a default value.")
if not check_types:
return None
return _check_positional_parameter_annotations(method_signature, base_signature, is_subtype)
|
Checks that the positional parameters of the overriding method match.
Args:
method_signature: signature of the overriding method.
base_signature: signature of the overridden method.
is_subtype: a binary function to compare types.
ctx: Context
Returns:
SignatureError if a mismatch is detected. Otherwise returns None.
|
github-repos
|
def new_space(self, name=None, bases=None, formula=None, refs=None):
space = self._impl.model.currentspace = self._impl.new_space(name=name, bases=get_impls(bases), formula=formula, refs=refs)
return space.interface
|
Create a child space.
Args:
name (str, optional): Name of the space. Defaults to ``SpaceN``,
where ``N`` is a number determined automatically.
bases (optional): A space or a sequence of spaces to be the base
space(s) of the created space.
formula (optional): Function to specify the parameters of
dynamic child spaces. The signature of this function is used
for setting parameters for dynamic child spaces.
This function should return a mapping of keyword arguments
to be passed to this method when the dynamic child spaces
are created.
Returns:
The new child space.
|
codesearchnet
|
def stream_matching(self, address, name):
matching = [x for x in self.entries if x.valid and x.target.matches(address, name)]
rpc_list = []
for var in matching:
rpc_list.extend(var.generate_rpcs(address))
return rpc_list
|
Return the RPCs needed to stream matching config variables to the given tile.
This function will return a list of tuples suitable for passing to
EmulatedDevice.deferred_rpc.
Args:
address (int): The address of the tile that we wish to stream to
name (str or bytes): The 6 character name of the target tile.
Returns:
list of tuple: The list of RPCs to send to stream these variables to a tile.
|
juraj-google-style
|
def DeleteFeaturesFromFeatureLayer(self, url, sql, chunksize=0):
fl = None
try:
fl = FeatureLayer(url=url, securityHandler=self._securityHandler)
totalDeleted = 0
if (chunksize > 0):
qRes = fl.query(where=sql, returnIDsOnly=True)
if ('error' in qRes):
print(qRes)
return qRes
elif ('objectIds' in qRes):
oids = qRes['objectIds']
total = len(oids)
if (total == 0):
return {'success': True, 'message': 'No features matched the query'}
i = 0
print(('%s features to be deleted' % total))
while (i <= len(oids)):
oidsDelete = ','.join((str(e) for e in oids[i:(i + chunksize)]))
if (oidsDelete == ''):
continue
else:
results = fl.deleteFeatures(objectIds=oidsDelete)
if ('deleteResults' in results):
totalDeleted += len(results['deleteResults'])
print(('%s%% Completed: %s/%s ' % (int(((totalDeleted / float(total)) * 100)), totalDeleted, total)))
i += chunksize
else:
print(results)
return {'success': True, 'message': ('%s deleted' % totalDeleted)}
qRes = fl.query(where=sql, returnIDsOnly=True)
if ('objectIds' in qRes):
oids = qRes['objectIds']
if (len(oids) > 0):
print(('%s features to be deleted' % len(oids)))
results = fl.deleteFeatures(where=sql)
if ('deleteResults' in results):
totalDeleted += len(results['deleteResults'])
return {'success': True, 'message': ('%s deleted' % totalDeleted)}
else:
return results
return {'success': True, 'message': ('%s deleted' % totalDeleted)}
else:
print(qRes)
else:
results = fl.deleteFeatures(where=sql)
if (results is not None):
if ('deleteResults' in results):
return {'success': True, 'message': (totalDeleted + len(results['deleteResults']))}
else:
return results
except:
(line, filename, synerror) = trace()
raise common.ArcRestHelperError({'function': 'DeleteFeaturesFromFeatureLayer', 'line': line, 'filename': filename, 'synerror': synerror})
finally:
fl = None
del fl
gc.collect()
|
Removes features from a hosted feature service layer by SQL query.
Args:
url (str): The URL of the feature service layer.
sql (str): The SQL query to apply against the feature service.
Those features that satisfy the query will be deleted.
chunksize (int): The maximum amount of features to remove at a time. Defaults to 0.
Returns:
The result from :py:func:`arcrest.agol.services.FeatureLayer.deleteFeatures`.
Notes:
If you want to delete all features, it is suggested to use the SQL query ``"1=1"``.
|
codesearchnet
|
def convert_upsample_bilinear(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting upsample...')
if (names == 'short'):
tf_name = ('UPSL' + random_string(4))
elif (names == 'keep'):
tf_name = w_name
else:
tf_name = (w_name + str(random.random()))
output_size = params['output_size']
align_corners = (params['align_corners'] > 0)
def target_layer(x, size=output_size, align_corners=align_corners):
import tensorflow as tf
x = tf.transpose(x, [0, 2, 3, 1])
x = tf.image.resize_images(x, size, align_corners=align_corners)
x = tf.transpose(x, [0, 3, 1, 2])
return x
lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[inputs[0]])
|
Convert upsample_bilinear2d layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
codesearchnet
|
def xmlstring(self, pretty_print=False):
s = ElementTree.tostring(self.xml(), xml_declaration=False, pretty_print=pretty_print, encoding='utf-8')
if (sys.version < '3'):
if isinstance(s, str):
s = unicode(s, 'utf-8')
elif isinstance(s, bytes):
s = str(s, 'utf-8')
s = s.replace('ns0:', '')
s = s.replace(':ns0', '')
return s
|
Serialises this FoLiA element and all its contents to XML.
Returns:
str: a string with XML representation for this element and all its children
|
codesearchnet
|
def from_json(cls, json, image_config=None):
cls.image_config = image_config
return cls(**{
attr: json.get(attr if key is None else key)
for attr, key in cls.JSON_MAPPING.items()
})
|
Create a model instance
Arguments:
json (:py:class:`dict`): The parsed JSON data.
image_config (:py:class:`dict`): The API image configuration
data.
Returns:
:py:class:`BaseModel`: The model instance.
|
juraj-google-style
|
def _update_context_field_binary_composition(present_locations, expression):
if (not any((isinstance(expression.left, ContextField), isinstance(expression.right, ContextField)))):
raise AssertionError(u'Received a BinaryComposition {} without any ContextField operands. This should never happen.'.format(expression))
if isinstance(expression.left, ContextField):
context_field = expression.left
(location_name, _) = context_field.location.get_location_name()
if (location_name not in present_locations):
return TrueLiteral
if isinstance(expression.right, ContextField):
context_field = expression.right
(location_name, _) = context_field.location.get_location_name()
if (location_name not in present_locations):
return TrueLiteral
return expression
|
Lower BinaryCompositions involving non-existent ContextFields to True.
Args:
present_locations: set of all locations in the current MatchQuery that have not been pruned
expression: BinaryComposition with at least one ContextField operand
Returns:
TrueLiteral iff either ContextField operand is not in `present_locations`,
and the original expression otherwise
|
codesearchnet
|
def wind_speed(self, value=999.0):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `wind_speed`'.format(value))
if (value < 0.0):
raise ValueError('value need to be greater or equal 0.0 for field `wind_speed`')
if (value > 40.0):
raise ValueError('value need to be smaller 40.0 for field `wind_speed`')
self._wind_speed = value
|
Corresponds to IDD Field `wind_speed`
Args:
value (float): value for IDD Field `wind_speed`
Unit: m/s
value >= 0.0
value <= 40.0
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def __new__(cls, name, bases, attrs):
new_class = super(PipelineFormMediaMetaClass, cls).__new__(
cls, name, bases, attrs)
if 'css_packages' in attrs:
new_class.css = PipelineFormMediaProperty(
cls._get_css_files, new_class, attrs.get('css') or {})
if 'js_packages' in attrs:
new_class.js = PipelineFormMediaProperty(
cls._get_js_files, new_class, attrs.get('js') or [])
return new_class
|
Construct the class.
Args:
name (bytes):
The name of the class.
bases (tuple):
The base classes for the class.
attrs (dict):
The attributes going into the class.
Returns:
type:
The new class.
|
juraj-google-style
|
def google_api_build_errors(config, auth, api_call, errors):
if 'bigquery' in errors:
errors['bigquery']['schema'] = ERROR_SCHEMA
errors['bigquery']['format'] = 'JSON'
errors['bigquery']['skip_rows'] = 0
errors['bigquery']['disposition'] = 'WRITE_TRUNCATE'
table_create(config, errors['bigquery'].get('auth', auth), config.project, errors['bigquery']['dataset'], errors['bigquery']['table'], errors['bigquery']['schema'], overwrite=False)
return errors
|
Builds the BigQuery table to house the Google API call errors.
Optional piece of the recipe, will create a BigQuery table for errors.
Takes errors, which defines a bigquery endpoint, and adds fields.
Args:
auth (string): either "user" or "service" to make the BigQuery call.
api_call (dict): the JSON for the API call as defined in recipe.
errors (dict): defines where the data will be written
Returns (dict):
A modified results JSON with additional API values added.
Raises:
ValueError: If a required key in the recipe is missing.
|
github-repos
|
def logical_and(x1, x2):
if any_symbolic_tensors((x1, x2)):
return LogicalAnd().symbolic_call(x1, x2)
return backend.numpy.logical_and(x1, x2)
|
Computes the element-wise logical AND of the given input tensors.
Zeros are treated as `False` and non-zeros are treated as `True`.
Args:
x1: Input tensor.
x2: Input tensor.
Returns:
Output tensor, element-wise logical AND of the inputs.
|
github-repos
|
def _update_state_from_shard_states(self, state, shard_states, control):
state.active_shards, state.aborted_shards, state.failed_shards = 0, 0, 0
total_shards = 0
processed_counts = []
processed_status = []
state.counters_map.clear()
for s in shard_states:
total_shards += 1
status = 'unknown'
if s.active:
state.active_shards += 1
status = 'running'
if s.result_status == model.ShardState.RESULT_SUCCESS:
status = 'success'
elif s.result_status == model.ShardState.RESULT_ABORTED:
state.aborted_shards += 1
status = 'aborted'
elif s.result_status == model.ShardState.RESULT_FAILED:
state.failed_shards += 1
status = 'failed'
state.counters_map.add_map(s.counters_map)
processed_counts.append(s.counters_map.get(context.COUNTER_MAPPER_CALLS))
processed_status.append(status)
state.set_processed_counts(processed_counts, processed_status)
state.last_poll_time = datetime.datetime.utcfromtimestamp(self._time())
spec = state.mapreduce_spec
if total_shards != spec.mapper.shard_count:
logging.error("Found %d shard states. Expect %d. "
"Issuing abort command to job '%s'",
total_shards, spec.mapper.shard_count,
spec.mapreduce_id)
model.MapreduceControl.abort(spec.mapreduce_id)
state.active = bool(state.active_shards)
if not control and (state.failed_shards or state.aborted_shards):
model.MapreduceControl.abort(spec.mapreduce_id)
if not state.active:
if state.failed_shards or not total_shards:
state.result_status = model.MapreduceState.RESULT_FAILED
elif state.aborted_shards:
state.result_status = model.MapreduceState.RESULT_ABORTED
else:
state.result_status = model.MapreduceState.RESULT_SUCCESS
self._finalize_outputs(spec, state)
self._finalize_job(spec, state)
else:
@db.transactional(retries=5)
def _put_state():
fresh_state = model.MapreduceState.get_by_job_id(spec.mapreduce_id)
if not fresh_state.active:
logging.warning(
"Job %s is not active. Looks like spurious task execution. "
"Dropping controller task.", spec.mapreduce_id)
return
config = util.create_datastore_write_config(spec)
state.put(config=config)
_put_state()
|
Update mr state by examing shard states.
Args:
state: current mapreduce state as MapreduceState.
shard_states: an iterator over shard states.
control: model.MapreduceControl entity.
|
juraj-google-style
|
def check_and_update_resources(num_cpus, num_gpus, resources):
if resources is None:
resources = {}
resources = resources.copy()
assert "CPU" not in resources
assert "GPU" not in resources
if num_cpus is not None:
resources["CPU"] = num_cpus
if num_gpus is not None:
resources["GPU"] = num_gpus
if "CPU" not in resources:
resources["CPU"] = multiprocessing.cpu_count()
gpu_ids = ray.utils.get_cuda_visible_devices()
if ("GPU" in resources and gpu_ids is not None
and resources["GPU"] > len(gpu_ids)):
raise Exception("Attempting to start raylet with {} GPUs, "
"but CUDA_VISIBLE_DEVICES contains {}.".format(
resources["GPU"], gpu_ids))
if "GPU" not in resources:
resources["GPU"] = _autodetect_num_gpus()
if gpu_ids is not None:
resources["GPU"] = min(resources["GPU"], len(gpu_ids))
resources = {
resource_label: resource_quantity
for resource_label, resource_quantity in resources.items()
if resource_quantity != 0
}
for _, resource_quantity in resources.items():
assert (isinstance(resource_quantity, int)
or isinstance(resource_quantity, float))
if (isinstance(resource_quantity, float)
and not resource_quantity.is_integer()):
raise ValueError(
"Resource quantities must all be whole numbers. Received {}.".
format(resources))
if resource_quantity < 0:
raise ValueError(
"Resource quantities must be nonnegative. Received {}.".format(
resources))
if resource_quantity > ray_constants.MAX_RESOURCE_QUANTITY:
raise ValueError("Resource quantities must be at most {}.".format(
ray_constants.MAX_RESOURCE_QUANTITY))
return resources
|
Sanity check a resource dictionary and add sensible defaults.
Args:
num_cpus: The number of CPUs.
num_gpus: The number of GPUs.
resources: A dictionary mapping resource names to resource quantities.
Returns:
A new resource dictionary.
|
juraj-google-style
|
def _UpdateSudoer(self, user, sudoer=False):
if sudoer:
self.logger.info('Adding user %s to the Google sudoers group.', user)
command = self.gpasswd_add_cmd.format(user=user, group=self.google_sudoers_group)
else:
self.logger.info('Removing user %s from the Google sudoers group.', user)
command = self.gpasswd_remove_cmd.format(user=user, group=self.google_sudoers_group)
try:
subprocess.check_call(command.split(' '))
except subprocess.CalledProcessError as e:
self.logger.warning('Could not update user %s. %s.', user, str(e))
return False
else:
self.logger.debug('Removed user %s from the Google sudoers group.', user)
return True
|
Update sudoer group membership for a Linux user account.
Args:
user: string, the name of the Linux user account.
sudoer: bool, True if the user should be a sudoer.
Returns:
bool, True if user update succeeded.
|
codesearchnet
|
def stage_tc_batch(self, owner, staging_data):
batch = self.tcex.batch(owner)
for group in staging_data.get('group') or []:
variable = group.pop('variable', None)
path = group.pop('path', None)
data = self.path_data(group, path)
if group.get('xid') is None:
group['xid'] = self.stage_tc_batch_xid(group.get('type'), group.get('name'), owner)
group['ownerName'] = owner
batch.add_group(group)
if variable is not None and data is not None:
self.stage_redis(variable, self.stage_tc_group_entity(data))
for indicator in staging_data.get('indicator') or []:
variable = indicator.pop('variable', None)
path = indicator.pop('path', None)
if indicator.get('xid') is None:
indicator['xid'] = self.stage_tc_batch_xid(
indicator.get('type'), indicator.get('summary'), owner
)
indicator['ownerName'] = owner
batch.add_indicator(indicator)
data = self.path_data(dict(indicator), path)
if variable is not None and data is not None:
self.stage_redis(variable, self.stage_tc_indicator_entity(data))
batch_results = batch.submit()
self.log.debug('[stage] Batch Results: {}'.format(batch_results))
for error in batch_results.get('errors') or []:
self.log.error('[stage] {}'.format(error))
|
Stage data in ThreatConnect Platform using batch API.
Args:
owner (str): The ThreatConnect owner to submit batch job.
staging_data (dict): A dict of ThreatConnect batch data.
|
juraj-google-style
|
def __new__(mcls, name, parents, attributes):
return type.__new__(mcls, name, parents, attributes)
|
Creates a new Type object (an instance of TypeMetaclass).
Args:
name (str): the name of the new type.
parents (list(str)): a list of superclasses.
attributes: (???): a map from name to value for "parameters" for defining
the new type.
|
juraj-google-style
|
def get_video_transcript_url(video_id, language_code):
video_transcript = VideoTranscript.get_or_none(video_id, language_code)
if video_transcript:
return video_transcript.url()
|
Returns course video transcript url or None if no transcript
Arguments:
video_id: it can be an edx_video_id or an external_id extracted from external sources in a video component.
language_code: language code of a video transcript
|
juraj-google-style
|
def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> IGraphNode:
return IGraphNode(graph=self._graph, index=index, name=name, external_id=external_id)
|
Returns a new `IGraphNode` instance with the given index and name.
Arguments:
index (int): The index of the node to create.
name (str): The name of the node to create.
external_id (Optional[str]): The external ID of the node.
|
juraj-google-style
|
def sym_jsonify(self, compact: bool=True, type_info: bool=True, **kwargs) -> Any:
if not compact:
json_value = super().sym_jsonify(**kwargs)
assert isinstance(json_value, dict), json_value
if self._cloneable_metadata_keys:
json_value['_cloneable_metadata_keys'] = list(self._cloneable_metadata_keys)
return json_value
if self.children:
child_nodes = [c.sym_jsonify(compact, type_info=False, **kwargs) for c in self.children]
if self.value is not None:
if len(child_nodes) == 1:
single_choice = child_nodes[0]
if isinstance(single_choice, tuple):
value = (self.value,) + single_choice
else:
value = (self.value, single_choice)
else:
value = (self.value, child_nodes)
else:
value = child_nodes
else:
value = self.value
if type_info:
json_value = {utils.JSONConvertible.TYPE_NAME_KEY: self.__class__.__serialization_key__, 'format': 'compact', 'value': symbolic.to_json(value)}
if self.metadata:
json_value['metadata'] = symbolic.to_json(self.metadata)
if self._cloneable_metadata_keys:
json_value['_cloneable_metadata_keys'] = list(self._cloneable_metadata_keys)
return json_value
else:
return value
|
Convert DNA to JSON object.
Args:
compact: Whether use compact form. If compact, the nested number structure
in DNA.parse will be used, otherwise members will be rendered out as
regular symbolic Object.
type_info: If True, type information will be included in output, otherwise
type information will not be included. Applicable when compact is set
to True.
**kwargs: Keyword arguments that will be passed to symbolic.Object if
compact is False.
Returns:
JSON representation of DNA.
|
github-repos
|
def make_graph_def_with_constant_nodes(node_sizes: Sequence[int], dtype: Optional[dtypes.DType]=None, **function_node_sizes) -> graph_pb2.GraphDef:
dtype = dtypes.float32
graph_def = graph_pb2.GraphDef()
n = 0
def add_nodes(node_list, sizes):
nonlocal n
for s in sizes:
node = node_list.add(name=f'Const_{n}', op='Const')
node.attr['value'].tensor.MergeFrom(tensor_util.make_tensor_proto(np.ones([]), dtype=dtype))
remaining_size = s - node.ByteSize()
if remaining_size < 0:
raise ValueError(f'Unable to create node of size {s} bytes.')
constant_size = [math.ceil(remaining_size / dtype.size)]
node.attr['value'].tensor.Clear()
node.attr['value'].tensor.MergeFrom(tensor_util.make_tensor_proto(np.random.random_sample(constant_size), dtype=dtype))
n += 1
add_nodes(graph_def.node, node_sizes)
for fn_name, fn_sizes in function_node_sizes.items():
fn = graph_def.library.function.add()
fn.signature.name = fn_name
add_nodes(fn.node_def, fn_sizes)
return graph_def
|
Creates a GraphDef with approximate node sizes.
Args:
node_sizes: list of ints, the approximate desired sizes of the nodes in the
GraphDef.
dtype: Dtype of encoded constant values (float32 or float64).
**function_node_sizes: Map of function name to FunctionDef node sizes (see
`node_sizes`).
Returns:
A GraphDef proto.
|
github-repos
|
def split(self, amount):
split_objs = list(self.all())
if (not split_objs):
raise NoSplitsFoundForRecurringCost()
portions = [split_obj.portion for split_obj in split_objs]
split_amounts = ratio_split(amount, portions)
return [(split_objs[i], split_amount) for (i, split_amount) in enumerate(split_amounts)]
|
Split the value given by amount according to the RecurringCostSplit's portions
Args:
amount (Decimal):
Returns:
list[(RecurringCostSplit, Decimal)]: A list with elements in the form (RecurringCostSplit, Decimal)
|
codesearchnet
|
def save_model(self, fname, pretty=False):
with open(fname, 'w') as f:
xml_str = ET.tostring(self.root, encoding='unicode')
if pretty:
parsed_xml = xml.dom.minidom.parseString(xml_str)
xml_str = parsed_xml.toprettyxml(newl='')
f.write(xml_str)
|
Saves the xml to file.
Args:
fname: output file location
pretty: attempts!! to pretty print the output
|
codesearchnet
|
def add_execution_event(self, context_id, event):
if context_id not in self._contexts:
LOGGER.warning("Context_id not in contexts, %s", context_id)
return False
context = self._contexts.get(context_id)
context.add_execution_event(event)
return True
|
Within a context, append data to the execution result.
Args:
context_id (str): the context id returned by create_context
data_type (str): type of data to append
data (bytes): data to append
Returns:
(bool): True if the operation is successful, False if
the context_id doesn't reference a known context.
|
juraj-google-style
|
class EosTokenCriteria(StoppingCriteria):
def __init__(self, eos_token_id: Union[int, List[int], torch.Tensor]):
if not isinstance(eos_token_id, torch.Tensor):
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
eos_token_id = torch.tensor(eos_token_id)
self.eos_token_id = eos_token_id
@add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
self.eos_token_id = self.eos_token_id.to(input_ids.device)
is_done = isin_mps_friendly(input_ids[:, -1], self.eos_token_id)
return is_done
|
This class can be used to stop generation whenever the "end-of-sequence" token is generated.
By default, it uses the `model.generation_config.eos_token_id`.
Args:
eos_token_id (`Union[int, List[int], torch.Tensor]`):
The id(s) of the *end-of-sequence* token.
|
github-repos
|
def AssertIterableType(iterable, expected_item_type):
if isinstance(iterable, collections.Iterator):
message = "Expected iterable container but got iterator `%s` instead"
message %= iterable
raise TypeError(message)
AssertType(iterable, collections.Iterable)
for item in iterable:
AssertType(item, expected_item_type)
|
Ensures that given iterable container has certain type.
Args:
iterable: An iterable container to assert the type for.
expected_item_type: An expected type of the container items.
Raises:
TypeError: If given container does is not an iterable or its items do not
have the expected type.
|
juraj-google-style
|
def filecmp(filename_a, filename_b):
size_a = FileIO(filename_a, 'rb').size()
size_b = FileIO(filename_b, 'rb').size()
if size_a != size_b:
return False
crc_a = file_crc32(filename_a)
crc_b = file_crc32(filename_b)
return crc_a == crc_b
|
Compare two files, returning True if they are the same, False otherwise.
We check size first and return False quickly if the files are different sizes.
If they are the same size, we continue to generating a crc for the whole file.
You might wonder: why not use Python's `filecmp.cmp()` instead? The answer is
that the builtin library is not robust to the many different filesystems
TensorFlow runs on, and so we here perform a similar comparison with
the more robust FileIO.
Args:
filename_a: string path to the first file.
filename_b: string path to the second file.
Returns:
True if the files are the same, False otherwise.
|
github-repos
|
def registration_info_request(self, registration_id):
return self.requests_session.get(
self.INFO_END_POINT + registration_id,
params={'details': 'true'}
)
|
Makes a request for registration info and returns the response object
Args:
registration_id: id to be checked
Returns:
response of registration info request
|
juraj-google-style
|
def json(self) -> dict:
content = {}
content['name'] = self.name
content['callback'] = self.callback
self.control_json['content'] = content
return self.control_json
|
Returns json compatible state of the Button instance.
Returns:
control_json: Json representation of Button state.
|
codesearchnet
|
def __init__(self, dns_ip):
self._dns_ip = dns_ip
self._resolver = ProxyResolver()
try:
self._resolver.set_proxies([self._dns_ip])
except async_dns.address.InvalidHost as e:
msg = f'RecordChecker got invalid DNS server IP: {e}.'
raise exceptions.InvalidDNSHost(msg)
|
Setup RecordChecker object.
Args:
dns_ip: DNS server IP to query.
|
juraj-google-style
|
def extract_object_files(archive_file: io.BufferedIOBase, dest_dir: str) -> None:
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
_check_archive_signature(archive_file)
extracted_files = dict()
for name, file_content in _extract_next_file(archive_file):
digest = hashlib.md5(file_content).digest()
for final_name in _generate_modified_filenames(name):
if final_name not in extracted_files:
extracted_files[final_name] = digest
with open(os.path.join(dest_dir, final_name), 'wb') as object_file:
object_file.write(file_content)
break
elif extracted_files[final_name] == digest:
break
|
Extracts object files from the archive path to the destination directory.
Extracts object files from the given BSD variant archive file. The extracted
files are written to the destination directory, which will be created if the
directory does not exist.
Colliding object file names are automatically renamed upon extraction in order
to avoid unintended overwriting.
Args:
archive_file: The archive file object pointing at its beginning.
dest_dir: The destination directory path in which the extracted object files
will be written. The directory will be created if it does not exist.
|
github-repos
|
def check_valid_values(function):
def decorated(self, X, *args, **kwargs):
if isinstance(X, pd.DataFrame):
W = X.values
else:
W = X
if not len(W):
raise ValueError('Your dataset is empty.')
if W.dtype not in [np.dtype('float64'), np.dtype('int64')]:
raise ValueError('There are non-numerical values in your data.')
if np.isnan(W).any().any():
raise ValueError('There are nan values in your data.')
return function(self, X, *args, **kwargs)
return decorated
|
Raises an exception if the given values are not supported.
Args:
function(callable): Method whose unique argument is a numpy.array-like object.
Returns:
callable: Decorated function
Raises:
ValueError: If there are missing or invalid values or if the dataset is empty.
|
juraj-google-style
|
def _convert_as_saved_model(self):
temp_dir = tempfile.mkdtemp()
try:
self._freeze_keras_model(temp_dir)
if self.saved_model_dir:
return super(TFLiteKerasModelConverter, self).convert()
finally:
shutil.rmtree(temp_dir, True)
|
Converts a Keras model as a saved model.
Returns:
The converted data in serialized format.
|
github-repos
|
def _init_from_args(self, maximum_iterations, parallel_iterations, back_prop, swap_memory, name):
if not isinstance(parallel_iterations, int) or parallel_iterations <= 0:
raise ValueError("'parallel_iterations' must be a positive integer: %s" % parallel_iterations)
self._name = ops.get_default_graph().unique_name(name)
self._maximum_iterations = maximum_iterations
self._parallel_iterations = parallel_iterations
self._back_prop = back_prop
self._swap_memory = swap_memory
self._pivot_for_pred = None
self._pivot_for_body = None
self._pivot = None
self._loop_exits = []
self._loop_enters = []
self._graph = ops.get_default_graph()
|
Creates a new `WhileContext` from arguments.
Args:
maximum_iterations: Optional upper bound on number of loop iterations.
parallel_iterations: The number of iterations allowed to run in parallel.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
Raises:
ValueError: If `parallel_iterations` has invalid value.
|
github-repos
|
def setup(app):
for (name, (default, rebuild, _)) in ref.CONFIG_VALUES.iteritems():
app.add_config_value(name, default, rebuild)
app.add_directive('javaimport', ref.JavarefImportDirective)
app.add_role('javaref', ref.JavarefRole(app))
app.connect('builder-inited', initialize_env)
app.connect('env-purge-doc', ref.purge_imports)
app.connect('env-merge-info', ref.merge_imports)
app.connect('build-finished', ref.cleanup)
|
Register the extension with Sphinx.
Args:
app: The Sphinx application.
|
codesearchnet
|
def average(self, selector=identity):
if self.closed():
raise ValueError('Attempt to call average() on a closed Queryable.')
if (not is_callable(selector)):
raise TypeError('average() parameter selector={0} is not callable'.format(repr(selector)))
total = 0
count = 0
for item in self.select(selector):
total += item
count += 1
if (count == 0):
raise ValueError('Cannot compute average() of an empty sequence.')
return (total / count)
|
Return the arithmetic mean of the values in the sequence..
All of the source sequence will be consumed.
Note: This method uses immediate execution.
Args:
selector: An optional single argument function which will be used
to project the elements of the sequence. If omitted, the
identity function is used.
Returns:
The arithmetic mean value of the projected sequence.
Raises:
ValueError: If the Queryable has been closed.
ValueError: I the source sequence is empty.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.