code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
vision_data = {}
if image_sizes is not None:
num_image_tokens = [self.image_seq_length] * len(image_sizes)
num_image_patches = [1] * len(image_sizes)
vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})
return MultiModalData(**vision_data)
|
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`List[List[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
|
github-repos
|
def PreparePairedSequenceBatch(source, target_in, pad=0):
target = target_in[(:, :(- 1))]
target_y = target_in[(:, 1:)]
source_mask = np.reshape((source != pad), (source.shape[0], 1, 1, source.shape[(- 1)]))
target_mask = MakeTargetMask(target, pad)
memory_mask = np.reshape((np.arange(target.shape[(- 1)]) < source.shape[(- 1)]), [(- 1), 1])
ntokens = np.sum((target_y != pad))
return (source, target, target_y, source_mask, target_mask, memory_mask, ntokens)
|
Build masks for this batch.
Args:
source: (batch, source_len) array of integer-coded symbols for inputs
target_in: (batch, batch_len) array of integer-coded symbols for targets
pad: int: the padding symbol used to pad the above
Returns:
Prepared batch of tuple of arrays: source, input-target, shifted-target,
source mask, target mask, source-target "memory" mask, minibatch token count
|
codesearchnet
|
def list_dir(self, context):
doc = inspect.getdoc(context)
listing = ""
listing += "\n"
listing += annotate.context_name(context) + "\n"
if doc is not None:
doc = inspect.cleandoc(doc)
listing += doc + "\n"
listing += "\nDefined Functions:\n"
is_dict = False
if isinstance(context, dict):
funs = context.keys()
is_dict = True
else:
funs = utils.find_all(context)
for fun in sorted(funs):
override_name = None
if is_dict:
override_name = fun
fun = self.find_function(context, fun)
if isinstance(fun, dict):
if is_dict:
listing += " - " + override_name + '\n'
else:
listing += " - " + fun.metadata.name + '\n'
else:
listing += " - " + fun.metadata.signature(name=override_name) + '\n'
if annotate.short_description(fun) != "":
listing += " " + annotate.short_description(fun) + '\n'
listing += "\nBuiltin Functions\n"
for bif in sorted(self.builtins.keys()):
listing += ' - ' + bif + '\n'
listing += '\n'
return listing
|
Return a listing of all of the functions in this context including builtins.
Args:
context (object): The context to print a directory for.
Returns:
str
|
juraj-google-style
|
def from_text_vision_configs(cls, text_config: Dict, vision_config: Dict, **kwargs):
config_dict = {}
config_dict['text_config'] = text_config
config_dict['vision_config'] = vision_config
return cls.from_dict(config_dict, **kwargs)
|
Instantiate a [`OwlViTConfig`] (or a derived class) from owlvit text model configuration and owlvit vision
model configuration.
Returns:
[`OwlViTConfig`]: An instance of a configuration object
|
github-repos
|
def set_token(self, token):
self.token = token
self.set_header(
'Authorization',
"Bearer {}".format(token)
)
|
Set the token for the v20 context
Args:
token: The token used to access the v20 REST api
|
juraj-google-style
|
def _print_choice_field(self, field_name: str, field: descriptor.FieldDescriptor, choice_container: message.Message) -> None:
if len(choice_container.DESCRIPTOR.oneofs) != 1:
raise ValueError(f'Invalid value for choice field {field_name}: {choice_container}.')
oneof_group = choice_container.DESCRIPTOR.oneofs[0]
set_oneof_name = choice_container.WhichOneof(oneof_group.name)
if set_oneof_name is None:
raise ValueError(f'Oneof not set on choice type: {choice_container.DESCRIPTOR.full_name}.')
value_field = choice_container.DESCRIPTOR.fields_by_name[set_oneof_name]
oneof_field_name = proto_utils.json_field_name(value_field)
oneof_field_name = oneof_field_name[0].upper() + oneof_field_name[1:]
value = proto_utils.get_value_at_field(choice_container, value_field)
if annotation_utils.is_primitive_type(value_field.message_type):
self._print_primitive_field(field_name + oneof_field_name, value_field, value)
else:
self._print_message_field(field_name + oneof_field_name, value_field, value)
|
Prints a FHIR choice field.
This field is expected to have one valid oneof set.
Args:
field_name: The name of the field.
field: The FieldDescriptor whose contents to print.
choice_container: The value present at field, which should be a oneof with
a single value set.
|
github-repos
|
def _ParseCachedEntry2003(self, value_data, cached_entry_offset):
try:
cached_entry = self._ReadStructureFromByteStream(
value_data[cached_entry_offset:], cached_entry_offset,
self._cached_entry_data_type_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse cached entry value with error: {0!s}'.format(
exception))
path_size = cached_entry.path_size
maximum_path_size = cached_entry.maximum_path_size
path_offset = cached_entry.path_offset
if path_offset > 0 and path_size > 0:
path_size += path_offset
maximum_path_size += path_offset
try:
path = value_data[path_offset:path_size].decode('utf-16-le')
except UnicodeDecodeError:
raise errors.ParseError('Unable to decode cached entry path to string')
cached_entry_object = AppCompatCacheCachedEntry()
cached_entry_object.cached_entry_size = (
self._cached_entry_data_type_map.GetByteSize())
cached_entry_object.file_size = getattr(cached_entry, 'file_size', None)
cached_entry_object.last_modification_time = (
cached_entry.last_modification_time)
cached_entry_object.path = path
return cached_entry_object
|
Parses a Windows 2003 cached entry.
Args:
value_data (bytes): value data.
cached_entry_offset (int): offset of the first cached entry data
relative to the start of the value data.
Returns:
AppCompatCacheCachedEntry: cached entry.
Raises:
ParseError: if the value data could not be parsed.
|
juraj-google-style
|
def _fluent_size(self, fluents, ordering) -> Sequence[Sequence[int]]:
shapes = []
for name in ordering:
fluent = fluents[name]
shape = self._param_types_to_shape(fluent.param_types)
shapes.append(shape)
return tuple(shapes)
|
Returns the sizes of `fluents` following the given `ordering`.
Returns:
Sequence[Sequence[int]]: A tuple of tuple of integers
representing the shape and size of each fluent.
|
codesearchnet
|
def get(self, attr, value=None, resolve=True):
try:
if resolve:
value = self._resolve_attribute(attr)
else:
value = self.attributes[attr]
except KeyError:
pass
return value
|
Get the value of an attribute from submit description file.
Args:
attr (str): The name of the attribute whose value should be returned.
value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None.
resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If
False then return the raw value of 'attr'. Defaults to True.
Returns:
str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'.
|
codesearchnet
|
def _GetParentModificationTime(self, gzip_file_entry):
parent_file_entry = path_spec_resolver.Resolver.OpenFileEntry(gzip_file_entry.path_spec.parent)
if (not parent_file_entry):
return None
return parent_file_entry.modification_time
|
Retrieves the modification time of the file entry's parent file.
Note that this retrieves the time from the file entry of the parent of the
gzip file entry's path spec, which is different from trying to retrieve it
from the gzip file entry's parent file entry.
It would be preferable to retrieve the modification time from the metadata
in the gzip file itself, but it appears to not be set when the file is
written by fseventsd.
Args:
gzip_file_entry (dfvfs.FileEntry): file entry of the gzip file containing
the fseventsd data.
Returns:
dfdatetime.DateTimeValues: parent modification time, or None if not
available.
|
codesearchnet
|
def label_count(self):
occurrences = collections.defaultdict(int)
for label in self:
occurrences[label.value] += 1
return occurrences
|
Return for each label the number of occurrences within the list.
Returns:
dict: A dictionary containing for every label-value (key)
the number of occurrences (value).
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('a', 7.2, 10.5),
>>> Label('b', 10.5, 14),
>>> Label('a', 15, 18)
>>> ])
>>> ll.label_count()
{'a': 3 'b': 2}
|
codesearchnet
|
def getRetinas(self, retina_name=None):
resourcePath = '/retinas'
method = 'GET'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [retina.Retina(**r) for r in response.json()]
|
Information about retinas
Args:
retina_name, str: The retina name (optional) (optional)
Returns: Array[Retina]
|
juraj-google-style
|
def from_hising(cls, h, J, offset=None):
poly = {(k,): v for k, v in h.items()}
poly.update(J)
if offset is not None:
poly[frozenset([])] = offset
return cls(poly, Vartype.SPIN)
|
Construct a binary polynomial from a higher-order Ising problem.
Args:
h (dict):
The linear biases.
J (dict):
The higher-order biases.
offset (optional, default=0.0):
Constant offset applied to the model.
Returns:
:obj:`.BinaryPolynomial`
Examples:
>>> poly = dimod.BinaryPolynomial.from_hising({'a': 2}, {'ab': -1}, 0)
|
juraj-google-style
|
def get_fast_tokenizer_file(tokenization_files: List[str]) -> str:
tokenizer_files_map = {}
for file_name in tokenization_files:
search = _re_tokenizer_file.search(file_name)
if search is not None:
v = search.groups()[0]
tokenizer_files_map[v] = file_name
available_versions = sorted(tokenizer_files_map.keys())
tokenizer_file = FULL_TOKENIZER_FILE
transformers_version = version.parse(__version__)
for v in available_versions:
if version.parse(v) <= transformers_version:
tokenizer_file = tokenizer_files_map[v]
else:
break
return tokenizer_file
|
Get the tokenization file to use for this version of transformers.
Args:
tokenization_files (`List[str]`): The list of available configuration files.
Returns:
`str`: The tokenization file to use.
|
github-repos
|
def p40baro(msg):
d = hex2bin(data(msg))
if (d[26] == '0'):
return None
p = ((bin2int(d[27:39]) * 0.1) + 800)
return p
|
Barometric pressure setting
Args:
msg (String): 28 bytes hexadecimal message (BDS40) string
Returns:
float: pressure in millibar
|
codesearchnet
|
def get_diff_for_doctesting(repo: Repo, base_commit: str, commits: List[str]) -> List[str]:
print('\n
code_diff = []
for commit in commits:
for diff_obj in commit.diff(base_commit):
if not diff_obj.b_path.endswith('.py') and (not diff_obj.b_path.endswith('.md')):
continue
if diff_obj.change_type in ['A']:
code_diff.append(diff_obj.b_path)
elif diff_obj.change_type in ['M', 'R']:
if diff_obj.a_path != diff_obj.b_path:
code_diff.extend([diff_obj.a_path, diff_obj.b_path])
elif diff_contains_doc_examples(repo, commit, diff_obj.b_path):
code_diff.append(diff_obj.a_path)
else:
print(f"Ignoring diff in {diff_obj.b_path} as it doesn't contain any doc example.")
return code_diff
|
Get the diff in doc examples between a base commit and one or several commits.
Args:
repo (`git.Repo`):
A git repository (for instance the Transformers repo).
base_commit (`str`):
The commit reference of where to compare for the diff. This is the current commit, not the branching point!
commits (`List[str]`):
The list of commits with which to compare the repo at `base_commit` (so the branching point).
Returns:
`List[str]`: The list of Python and Markdown files with a diff (files added or renamed are always returned, files
modified are returned if the diff in the file is only in doctest examples).
|
github-repos
|
def _make_3d(field, twod):
shp = list(field.shape)
if (twod and ('X' in twod)):
shp.insert(1, 1)
elif twod:
shp.insert(0, 1)
return field.reshape(shp)
|
Add a dimension to field if necessary.
Args:
field (numpy.array): the field that need to be 3d.
twod (str): 'XZ', 'YZ' or None depending on what is relevant.
Returns:
numpy.array: reshaped field.
|
codesearchnet
|
def _CreatePlacemark(self, parent, name, style_id=None, visible=True, description=None):
placemark = ET.SubElement(parent, 'Placemark')
placemark_name = ET.SubElement(placemark, 'name')
placemark_name.text = name
if (description is not None):
desc_tag = ET.SubElement(placemark, 'description')
desc_tag.text = description
if (style_id is not None):
styleurl = ET.SubElement(placemark, 'styleUrl')
styleurl.text = ('
if (not visible):
visibility = ET.SubElement(placemark, 'visibility')
visibility.text = '0'
return placemark
|
Create a KML Placemark element.
Args:
parent: The parent ElementTree.Element instance.
name: The placemark name as a string.
style_id: If not None, the id of a style to use for the placemark.
visible: Whether the placemark is initially visible or not.
description: A description string or None.
Returns:
The placemark ElementTree.Element instance.
|
codesearchnet
|
def get_special_tokens_mask(self, token_ids_0: list, token_ids_1: Optional[list]=None, already_has_special_tokens: bool=False) -> list[int]:
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
|
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of ids of the first sequence.
token_ids_1 (`List[int]`, *optional*):
List of ids of the second sequence.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
github-repos
|
def unbind(self, binding):
username = self.backend.config.generate_binding_username(binding)
try:
self.backend.atlas.DatabaseUsers.delete_a_database_user(username)
except ErrAtlasNotFound:
pass
self.backend.storage.remove(binding)
|
Unbind the instance
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding
|
juraj-google-style
|
def mean_pooling(self, model_output, attention_mask):
token_embeddings = model_output[0]
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-09)
|
Calculates the mean of token embeddings
Args:
model_output: The output of the model.
attention_mask: This is a tensor that contains 1s for all input tokens and
0s for all padding tokens.
Returns:
The mean of the token embeddings.
|
github-repos
|
def write(self, data):
self._check_open()
if not isinstance(data, str):
raise TypeError('Expected str but got %s.' % type(data))
if not data:
return
self._buffer.append(data)
self._buffered += len(data)
self._offset += len(data)
if self._buffered >= self._flushsize:
self._flush()
|
Write some bytes.
Args:
data: data to write. str.
Raises:
TypeError: if data is not of type str.
|
juraj-google-style
|
def AddArguments(cls, argument_group):
argument_group.add_argument(
'--fields', dest='fields', type=str, action='store',
default=cls._DEFAULT_FIELDS, help=(
'Defines which fields should be included in the output.'))
argument_group.add_argument(
'--additional_fields', dest='additional_fields', type=str,
action='store', default='', help=(
'Defines extra fields to be included in the output, in addition to'
' the default fields, which are {0:s}.'.format(
cls._DEFAULT_FIELDS)))
argument_group.add_argument(
'--timestamp_format', dest='timestamp_format', type=str,
action='store', default=cls._DEFAULT_TIMESTAMP_FORMAT, help=(
'Set the timestamp format that will be used in the datetime'
'column of the XLSX spreadsheet.'))
|
Adds command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
|
juraj-google-style
|
def delta_E( reactants, products, check_balance=True ):
if check_balance:
if delta_stoichiometry( reactants, products ) != {}:
raise ValueError( "reaction is not balanced: {}".format( delta_stoichiometry( reactants, products) ) )
return sum( [ r.energy for r in products ] ) - sum( [ r.energy for r in reactants ] )
|
Calculate the change in energy for reactants --> products.
Args:
reactants (list(vasppy.Calculation): A list of vasppy.Calculation objects. The initial state.
products (list(vasppy.Calculation): A list of vasppy.Calculation objects. The final state.
check_balance (bool:optional): Check that the reaction stoichiometry is balanced. Default: True.
Returns:
(float) The change in energy.
|
juraj-google-style
|
def modify_ack_deadline(self, items):
ack_ids = [item.ack_id for item in items]
seconds = [item.seconds for item in items]
request = types.StreamingPullRequest(modify_deadline_ack_ids=ack_ids, modify_deadline_seconds=seconds)
self._manager.send(request)
|
Modify the ack deadline for the given messages.
Args:
items(Sequence[ModAckRequest]): The items to modify.
|
codesearchnet
|
def pick(self, connections):
if len(connections) == 1:
return connections[0]
def key(conn):
return (datetime.min
if conn.backoff_time is None
else conn.backoff_time)
return min(*connections, key=key)
|
Picks a connection with the earliest backoff time.
As a result, the first connection is picked
for as long as it has no backoff time.
Otherwise, the connections are tried in a round robin fashion.
Args:
connections (:obj:list): List of
:class:`~bigchaindb_driver.connection.Connection` instances.
|
juraj-google-style
|
def now_playing(self, **kwargs):
path = self._get_path('now_playing')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Get the list of movies playing in theatres. This list refreshes
every day. The maximum number of items this list will include is 100.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
Returns:
A dict representation of the JSON returned from the API.
|
juraj-google-style
|
def init_app(self, app, context=DEFAULT_DICT):
if context is not _CONTEXT_MISSING:
self.update_context(context, app=app)
if (app not in _CONTEXT_CALLBACK_MAP
and context is not _CONTEXT_MISSING):
key = self._get_context_name(app=app)
self._context_callbacks(app, key, original_context=context)
|
Lazy constructor for the :class:`Component` class.
This method will allow the component to be used like a Flask
extension/singleton.
Args:
app (flask.Flask): The Application to base this Component upon.
Useful for app wide singletons.
Keyword Args:
context (dict, optional): The contextual information to supply to
this component.
|
juraj-google-style
|
def filter_paragraph(p):
tokens = p.split()
if len(tokens) < 6:
return True
if not re.search(_SOME_ALPHA_RE, p):
return True
last = 0
found_sentence = False
num_alpha = 0
for i, x in enumerate(tokens):
if x == '.':
if i - last > 3 and num_alpha >= 3:
found_sentence = True
break
last = i
num_alpha = 0
if re.match(_ONLY_ALPHA_RE, x):
num_alpha += 1
if not found_sentence:
return True
return False
|
Simple filter to remove obviously bad paragraphs (bad text extraction).
Note this needs to run very quickly as it is applied to every paragraph
in the corpus, so nothing fancy! This whole method should be linear
expected time in len(p).
Args:
p: string, paragraph
Returns:
True if we should remove the paragraph.
|
juraj-google-style
|
def _copy_non_source(op, graph, op_map, base_graph):
input_mutations = []
control_mutations = []
copied_inputs = []
for input_index, original_input in enumerate(op.inputs):
copied_input = op_map.get(original_input, None)
if copied_input is None:
copied_input = array_ops.placeholder(name='unused_control_flow_input', shape=original_input.shape, dtype=original_input.dtype)
input_mutations.append(_InputMutation(copied_op=None, input_index=input_index, old_graph_tensor=original_input))
copied_inputs.append(copied_input)
copied_control_inputs = []
for original_control_input in op.control_inputs:
copied_control_input = op_map.get(original_control_input, None)
if copied_control_input is None:
control_mutations.append(_ControlMutation(copied_op=None, old_graph_op=original_control_input))
else:
copied_control_inputs.append(copied_control_input)
with ops.control_dependencies(copied_control_inputs), ops.device(op.device):
f = base_graph._functions.get(op.type, None)
if f is not None and compat.as_str(f.name) not in graph._functions:
f.add_to_graph(graph)
copied_op = graph.create_op(op_type=op.type, inputs=copied_inputs, dtypes=[x.dtype for x in op.outputs], attrs={key: value for key, value in op.node_def.attr.items() if not key.startswith('_class') and (not key.startswith('_tpu_replicate'))}, name=op.name)
op_map[op] = copied_op
for i, o in enumerate(op.outputs):
op_map[o] = copied_op.outputs[i]
return ([mutation._replace(copied_op=copied_op) for mutation in input_mutations], [mutation._replace(copied_op=copied_op) for mutation in control_mutations])
|
Copy an op directly to a given graph.
Generally `op`'s inputs should already have been copied. If this is not the
case, for example with v1 while_loops, then `_copy_non_source` inserts
placeholders for the unavailable Tensors and returns a list of required
mutations.
Args:
op: The op to be copied.
graph: The destination graph.
op_map: A dict mapping ops and tensors in the old graph to the new one.
base_graph: The graph we're copying from, for any necessary functions.
Returns:
A tuple of (required_inputs, required_control_inputs):
required_inputs:
A list of `_InputMutation` tuples containing inputs to `copied_op` which
must be updated once `old_graph_tensor` has been copied.
required_control_inputs:
A list of `_ControlMutation` tuples containing control inputs to
`copied_op` which must be added once `old_graph_op` has been copied.
|
github-repos
|
def replace_pyof_version(module_fullname, version):
module_version = MetaStruct.get_pyof_version(module_fullname)
if ((not module_version) or (module_version == version)):
return None
return module_fullname.replace(module_version, version)
|
Replace the OF Version of a module fullname.
Get's a module name (eg. 'pyof.v0x01.common.header') and returns it on
a new 'version' (eg. 'pyof.v0x02.common.header').
Args:
module_fullname (str): The fullname of the module
(e.g.: pyof.v0x01.common.header)
version (str): The version to be 'inserted' on the module fullname.
Returns:
str: module fullname
The new module fullname, with the replaced version,
on the format "pyof.v0x01.common.header". If the requested
version is the same as the one of the module_fullname or if
the module_fullname is not a 'OF version' specific module,
returns None.
|
codesearchnet
|
def set_from_json(self, name, json, models=None, setter=None):
if (name in self.properties()):
log.trace('Patching attribute %r of %r with %r', name, self, json)
descriptor = self.lookup(name)
descriptor.set_from_json(self, json, models, setter)
else:
log.warning("JSON had attr %r on obj %r, which is a client-only or invalid attribute that shouldn't have been sent", name, self)
|
Set a property value on this object from JSON.
Args:
name: (str) : name of the attribute to set
json: (JSON-value) : value to set to the attribute to
models (dict or None, optional) :
Mapping of model ids to models (default: None)
This is needed in cases where the attributes to update also
have values that have references.
setter(ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
|
codesearchnet
|
def find_rule(condition):
final_condition = re.sub('{{.*}}', '42', condition)
ast_tokens = Condition.get_tokens(final_condition)
ast_compressed_tokens = Condition.compress_tokens(ast_tokens)
name = 'undefined'
function = (lambda tokens: False)
if (len(ast_compressed_tokens) > 0):
for rule in Condition.RULES:
if Condition.match_tokens(ast_compressed_tokens, rule['types']):
name = rule['name']
function = rule['evaluate']
break
return (name, ast_tokens, function)
|
Find rule for given condition.
Args:
condition (str): Python condition as string.
Returns:
str, list, function: found rule name, list of AST tokens for condition
and verification function.
|
codesearchnet
|
def _convert(self, value, dtype):
if isinstance(value, resource_variable_ops.ResourceVariable):
raise RuntimeError(f'Attempting to return a variable from an eagerly executed py_func. Only numeric data structures like Tensors or NumPy arrays should be returned; to return the value of a variable, make sure to obtain the Tensor backing it by calling `.read_value()` on the variable in question: {value}')
if value is None and self._is_grad_func:
return constant_op.constant(0.0, dtype=dtype)
return ops.convert_to_tensor(value, dtype=dtype)
|
Converts `value` to a tensor of type `dtype`, with error checking.
Args:
value: The tensor to convert.
dtype: The desired dtype.
Returns:
A tensor of type `dtype`, or a zeros tensor if value is None and
this function is in fact a gradient function.
Raises:
RuntimeError: if `value` is a variable.
|
github-repos
|
def save_graph(graph_str, dest_file, fmt=None, image_ratio=None):
g = pydot.graph_from_dot_data(graph_str)
if fmt is None:
fmt = os.path.splitext(dest_file)[1].lower().strip('.') or "png"
if hasattr(g, "write_" + fmt):
write_fn = getattr(g, "write_" + fmt)
else:
raise Exception("Unsupported graph format: '%s'" % fmt)
if image_ratio:
g.set_ratio(str(image_ratio))
write_fn(dest_file)
return fmt
|
Render a graph to an image file.
Args:
graph_str (str): Dot-language graph string.
dest_file (str): Filepath to save the graph to.
fmt (str): Format, eg "png", "jpg".
image_ratio (float): Image ratio.
Returns:
String representing format that was written, such as 'png'.
|
juraj-google-style
|
def buckets_insert(self, bucket, project_id=None):
args = {'project': project_id if project_id else self._project_id}
data = {'name': bucket}
url = Api._ENDPOINT + (Api._BUCKET_PATH % '')
return datalab.utils.Http.request(url, args=args, data=data, credentials=self._credentials)
|
Issues a request to create a new bucket.
Args:
bucket: the name of the bucket.
project_id: the project to use when inserting the bucket.
Returns:
A parsed bucket information dictionary.
Raises:
Exception if there is an error performing the operation.
|
juraj-google-style
|
def delete(self, path):
self.__validate_storage_path(path, projects_allowed=False)
entity = self.api_client.get_entity_by_query(path=path)
if (entity['entity_type'] in self.__BROWSABLE_TYPES):
contents = self.api_client.list_folder_content(entity['uuid'])
if (contents['count'] > 0):
raise StorageArgumentException('This method cannot delete non-empty folder. Please empty the folder first.')
self.api_client.delete_folder(entity['uuid'])
elif (entity['entity_type'] == 'file'):
self.api_client.delete_file(entity['uuid'])
|
Delete an entity from the storage service using its path.
Args:
path(str): The path of the entity to be delete
Returns:
The uuid of created file entity as string
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
|
codesearchnet
|
def _update_flags(compiler_flags, remove_flags=()):
for flag in GFORTRAN_SHARED_FLAGS:
if (flag not in compiler_flags):
compiler_flags.append(flag)
if (DEBUG_ENV in os.environ):
to_add = GFORTRAN_DEBUG_FLAGS
to_remove = GFORTRAN_OPTIMIZE_FLAGS
else:
to_add = GFORTRAN_OPTIMIZE_FLAGS
if (os.environ.get(WHEEL_ENV) is None):
to_add += (GFORTRAN_NATIVE_FLAG,)
to_remove = GFORTRAN_DEBUG_FLAGS
for flag in to_add:
if (flag not in compiler_flags):
compiler_flags.append(flag)
return [flag for flag in compiler_flags if (not ((flag in to_remove) or (flag in remove_flags)))]
|
Update a given set of compiler flags.
Args:
compiler_flags (List[str]): Existing flags associated with a compiler.
remove_flags (Optional[Container[str]]): A container of flags to remove
that will override any of the defaults.
Returns:
List[str]: The modified list (i.e. some flags added and some removed).
|
codesearchnet
|
def __init__(self, callback):
self._callback = callback
self._interface = brocade_interface(
callback=pynos.utilities.return_xml
)
self._rbridge = brocade_rbridge(
callback=pynos.utilities.return_xml
)
self._mac_address_table = brocade_mac_address_table(
callback=pynos.utilities.return_xml
)
self._tunnels = brocade_tunnels(
callback=pynos.utilities.return_xml
)
|
Interface init function.
Args:
callback: Callback function that will be called for each action.
Returns:
Interface Object
Raises:
None
|
juraj-google-style
|
def tangent(f):
node = annotate.resolve_calls(f)
RemoveWith().visit(node)
wrapped = functools.wraps(f)(compile_.compile_function(node))
wrapped.tangent = f
return wrapped
|
A decorator which removes the `with insert_grad_of` statement.
This allows the function to be called as usual.
Args:
f: A function
Returns:
A function with any `with insert_grad_of` context managers removed.
|
codesearchnet
|
def make_parser():
parser = argparse.ArgumentParser(usage='%(prog)s [options] input')
parser.add_argument('--output-cfg', type=str, action='store', dest='output_cfg', default=None, help='Output control flow graph as SVG.')
parser.add_argument('--output-typegraph', type=str, action='store', dest='output_typegraph', default=None, help='Output typegraph as SVG.')
parser.add_argument('--visualize', type=str, action='store', dest='visualize_typegraph', default=None, help='Generate an HTML visualization of the typegraph.')
parser.add_argument('--visualize-blocks', type=str, action='store', dest='visualize_block_graph', default=None, help='Generate an HTML visualization of the blockgraph.')
wrapper = datatypes.ParserWrapper(parser)
pytype_config.add_all_pytype_options(wrapper)
return arg_parser.Parser(parser, pytype_single_args=wrapper.actions)
|
Make parser for command line args.
Returns:
A Parser object.
|
github-repos
|
def __init__(self, max_workers=None):
_remove_dead_thread_references()
if max_workers is None:
self._max_workers = multiprocessing.cpu_count()
else:
self._max_workers = max_workers
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
self._result_queue = multiprocessing.Queue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
self._processes = set()
self._shutdown_thread = False
self._shutdown_process_event = multiprocessing.Event()
self._shutdown_lock = threading.Lock()
self._queue_count = 0
self._pending_work_items = {}
|
Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
|
juraj-google-style
|
def __init__(self, packet_count=None, byte_count=None):
super().__init__()
self.packet_count = packet_count
self.byte_count = byte_count
|
Create BucketCounter with the optional parameters below.
Args:
packet_count (int): Number of packets processed by bucket.
byte_count (int): Number of bytes processed by bucket.
|
juraj-google-style
|
def get_package_from_handle(package_handle):
if isinstance(package_handle, dict):
package_handle = ResourceHandle.from_dict(package_handle)
package_resource = package_repository_manager.get_resource_from_handle(package_handle)
package = Package(package_resource)
return package
|
Create a package given its handle (or serialized dict equivalent)
Args:
package_handle (`ResourceHandle` or dict): Resource handle, or
equivalent serialized dict representation from
ResourceHandle.to_dict
Returns:
`Package`.
|
juraj-google-style
|
def WriteGraphOpCreation(self, graph_op_creation):
debug_event = debug_event_pb2.DebugEvent(graph_op_creation=graph_op_creation)
self._EnsureTimestampAdded(debug_event)
_pywrap_debug_events_writer.WriteGraphOpCreation(self._dump_root, debug_event)
|
Write a GraphOpCreation proto with the writer.
Args:
graph_op_creation: A GraphOpCreation proto, describing the details of the
creation of an op inside a TensorFlow Graph.
|
github-repos
|
def test_sample_paths_2d(self, use_time_grid, supply_normal_draws):
dtype = tf.float64
mu = np.array([0.2, 0.7])
a = np.array([[0.4, 0.1], [0.3, 0.2]])
b = np.array([[0.33, -0.03], [0.21, 0.5]])
def drift_fn(t, x):
return mu * tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype)
def vol_fn(t, x):
del x
return (a * t + b) * tf.ones([2, 2], dtype=t.dtype)
process = tff.models.GenericItoProcess(dim=2, drift_fn=drift_fn, volatility_fn=vol_fn)
times = np.array([0.1, 0.21, 0.32, 0.43, 0.55])
x0 = np.array([0.1, -1.1])
if use_time_grid:
times_grid = tf.linspace(tf.constant(0.0, dtype=dtype), 0.55, 56)
time_step = None
else:
times_grid = None
time_step = 0.01
if supply_normal_draws:
num_samples = 1
normal_draws = tf.random.normal(shape=[5000, times_grid.shape[0] - 1, 2], dtype=dtype)
normal_draws = tf.concat([normal_draws, -normal_draws], axis=0)
else:
num_samples = 10000
normal_draws = None
paths = self.evaluate(process.sample_paths(times, num_samples=num_samples, initial_state=x0, time_step=time_step, times_grid=times_grid, normal_draws=normal_draws, seed=12134))
num_samples = 10000
self.assertAllClose(paths.shape, (num_samples, 5, 2), atol=0)
means = np.mean(paths, axis=0)
times = np.reshape(times, [-1, 1])
expected_means = x0 + 2.0 / 3.0 * mu * np.power(times, 1.5)
self.assertAllClose(means, expected_means, rtol=0.01, atol=0.01)
|
Tests path properties for 2-dimentional Ito process.
We construct the following Ito processes.
dX_1 = mu_1 sqrt(t) dt + s11 dW_1 + s12 dW_2
dX_2 = mu_2 sqrt(t) dt + s21 dW_1 + s22 dW_2
mu_1, mu_2 are constants.
s_ij = a_ij t + b_ij
For this process expected value at time t is (x_0)_i + 2/3 * mu_i * t^1.5.
Args:
use_time_grid: A boolean to indicate whther `times_grid` is supplied.
supply_normal_draws: A boolean to indicate whether `normal_draws` is
supplied.
|
github-repos
|
def getGUA(self, filterByPrefix=None):
print '%s call getGUA' % self.port
print filterByPrefix
globalAddrs = []
try:
globalAddrs = self.getGlobal()
if filterByPrefix is None:
return globalAddrs[0]
else:
for line in globalAddrs:
fullIp = ModuleHelper.GetFullIpv6Address(line)
if fullIp.startswith(filterByPrefix):
return fullIp
print 'no global address matched'
return str(globalAddrs[0])
except Exception, e:
ModuleHelper.WriteIntoDebugLogger("getGUA() Error: " + str(e))
|
get expected global unicast IPv6 address of Thread device
Args:
filterByPrefix: a given expected global IPv6 prefix to be matched
Returns:
a global IPv6 address
|
juraj-google-style
|
def stop_capture_handler(self, name):
empty_capturers_indeces = []
for (k, sc) in self._stream_capturers.iteritems():
stream_capturer = sc[0]
stream_capturer.remove_handler(name)
if (stream_capturer.handler_count == 0):
self._pool.killone(sc[1])
empty_capturers_indeces.append(k)
for i in empty_capturers_indeces:
del self._stream_capturers[i]
|
Remove all handlers with a given name
Args:
name:
The name of the handler(s) to remove.
|
codesearchnet
|
def handle_unsubscribe(self, request, path):
ret = []
if path:
name = path[0]
child = self.children[name]
ret += child.handle_unsubscribe(request, path[1:])
if ((not child.children) and (not child.update_requests) and (not child.delta_requests)):
del self.children[name]
else:
if (request in self.update_requests):
self.update_requests.remove(request)
else:
self.delta_requests.remove(request)
ret.append(request.return_response())
return ret
|
Remove from the notifier list and send a return
Args:
request (Subscribe): The original subscribe request
path (list): The relative path from ourself
Returns:
list: [(callback, Response)] that need to be called
|
codesearchnet
|
def gvd(self, wavelength):
g = (wavelength*1.e-9)**3./(2.*spc.pi*spc.c**2.) * self.nDer2(wavelength)
return g
|
The group velocity dispersion (GVD) with respect to wavelength.
Args:
wavelength (float, list, None): The wavelength(s) the GVD will
be evaluated at.
Returns:
float, list: The GVD at the target wavelength(s).
|
juraj-google-style
|
def end(self: EventSetOrNode) -> EventSetOrNode:
from temporian.core.operators.end import end
return end(self)
|
Generates a single timestamp at the end of an
[`EventSet`][temporian.EventSet], per index key.
Usage example:
```python
>>> a = tp.event_set(
... timestamps=[5, 6, 7, 1],
... features={"f": [50, 60, 70, 10], "idx": [1, 1, 1, 2]},
... indexes=["idx"]
... )
>>> a_end = a.end()
>>> a_end
indexes: [('idx', int64)]
features: []
events:
idx=1 (1 events):
timestamps: [7.]
idx=2 (1 events):
timestamps: [1.]
...
```
Returns:
A feature-less EventSet with a single timestamp per index group.
|
github-repos
|
def for_document(cls, document_ref, snapshot_callback, snapshot_class_instance, reference_class_instance):
return cls(document_ref, document_ref._client, {'documents': {'documents': [document_ref._document_path]}, 'target_id': WATCH_TARGET_ID}, document_watch_comparator, snapshot_callback, snapshot_class_instance, reference_class_instance)
|
Creates a watch snapshot listener for a document. snapshot_callback
receives a DocumentChange object, but may also start to get
targetChange and such soon
Args:
document_ref: Reference to Document
snapshot_callback: callback to be called on snapshot
snapshot_class_instance: instance of DocumentSnapshot to make
snapshots with to pass to snapshot_callback
reference_class_instance: instance of DocumentReference to make
references
|
codesearchnet
|
def delete_detector(self, detector_id, **kwargs):
resp = self._delete(self._u(self._DETECTOR_ENDPOINT_SUFFIX, detector_id), **kwargs)
resp.raise_for_status()
return resp
|
Remove a detector.
Args:
detector_id (string): the ID of the detector.
|
codesearchnet
|
def __init__(self, servers, debug=False):
self.servers = [servers] if isinstance(servers, basestring) else servers
self.key_hasher = self._debug_key_hash if debug else self._key_hash
self._client = None
self.debug = debug
self.current = ''
|
Create a memcached client.
Args:
servers (str or list of str): Server URI(s), eg '127.0.0.1:11211'.
debug (bool): If True, quasi human readable keys are used. This helps
debugging - run 'memcached -vv' in the foreground to see the keys
being get/set/stored.
|
juraj-google-style
|
def abs_url(self, url):
parsed_url = urllib.parse.urlparse(url)
if not parsed_url.scheme and not parsed_url.netloc:
return urllib.parse.urljoin(str(self.base_url), str(url))
else:
return url
|
Given a relative or absolute URL; return an absolute URL.
Args:
url(basestring): A relative or absolute URL.
Returns:
str: An absolute URL.
|
juraj-google-style
|
def variance(numbers, type='population'):
mean = average(numbers)
variance = 0
for number in numbers:
variance += ((mean - number) ** 2)
if (type == 'population'):
return (variance / len(numbers))
else:
return (variance / (len(numbers) - 1))
|
Calculates the population or sample variance of a list of numbers.
A large number means the results are all over the place, while a
small number means the results are comparatively close to the average.
Args:
numbers: a list of integers or floating point numbers to compare.
type: string, 'population' or 'sample', the kind of variance to be computed.
Returns:
The computed population or sample variance.
Defaults to population variance.
Requires:
The math module, average()
|
codesearchnet
|
def file_to_list(file_name, file_location):
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
|
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
|
juraj-google-style
|
def add(self, index):
if (index - self.flush_at) < self.interval:
return
now = time.time()
elapsed = now - self.lap
elapsed_total = now - self.start
it = index - self.flush_at
self.lap = now
if self.verbose:
logger.info("iter={} {{{}}}={}[sec/{}iter] {}[sec]".format(
index, self.name, elapsed, it, elapsed_total))
if self.fd is not None:
print("{} {} {} {}".format(index, elapsed,
it, elapsed_total), file=self.fd)
self.flush_at = index
|
Calculate time elapsed from the point previously called
this method or this object is created to this is called.
Args:
index (int): Index to be displayed, and be used to take intervals.
|
juraj-google-style
|
def GetFileEntryByPathSpec(self, path_spec):
if not self.FileEntryExistsByPathSpec(path_spec):
return None
location = getattr(path_spec, 'location', None)
if len(location) == 1:
return tar_file_entry.TARFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
kwargs = {}
try:
kwargs['tar_info'] = self._tar_file.getmember(location[1:])
except KeyError:
kwargs['is_virtual'] = True
return tar_file_entry.TARFileEntry(
self._resolver_context, self, path_spec, **kwargs)
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
TARFileEntry: file entry or None.
|
juraj-google-style
|
def tf_retrieve_indices(self, buffer_elements, priority_indices):
states = dict()
buffer_start = self.buffer_index - buffer_elements
buffer_end = self.buffer_index
for name in sorted(self.states_memory):
buffer_state_memory = self.states_buffer[name]
buffer_states = buffer_state_memory[buffer_start:buffer_end]
memory_states = tf.gather(params=self.states_memory[name], indices=priority_indices)
states[name] = tf.concat(values=(buffer_states, memory_states), axis=0)
internals = dict()
for name in sorted(self.internals_memory):
internal_buffer_memory = self.internals_buffer[name]
buffer_internals = internal_buffer_memory[buffer_start:buffer_end]
memory_internals = tf.gather(params=self.internals_memory[name], indices=priority_indices)
internals[name] = tf.concat(values=(buffer_internals, memory_internals), axis=0)
actions = dict()
for name in sorted(self.actions_memory):
action_buffer_memory = self.actions_buffer[name]
buffer_action = action_buffer_memory[buffer_start:buffer_end]
memory_action = tf.gather(params=self.actions_memory[name], indices=priority_indices)
actions[name] = tf.concat(values=(buffer_action, memory_action), axis=0)
buffer_terminal = self.terminal_buffer[buffer_start:buffer_end]
priority_terminal = tf.gather(params=self.terminal_memory, indices=priority_indices)
terminal = tf.concat(values=(buffer_terminal, priority_terminal), axis=0)
buffer_reward = self.reward_buffer[buffer_start:buffer_end]
priority_reward = tf.gather(params=self.reward_memory, indices=priority_indices)
reward = tf.concat(values=(buffer_reward, priority_reward), axis=0)
if self.include_next_states:
assert util.rank(priority_indices) == 1
next_priority_indices = (priority_indices + 1) % self.capacity
next_buffer_start = (buffer_start + 1) % self.buffer_size
next_buffer_end = (buffer_end + 1) % self.buffer_size
next_states = dict()
for name in sorted(self.states_memory):
buffer_state_memory = self.states_buffer[name]
buffer_next_states = buffer_state_memory[next_buffer_start:next_buffer_end]
memory_next_states = tf.gather(params=self.states_memory[name], indices=next_priority_indices)
next_states[name] = tf.concat(values=(buffer_next_states, memory_next_states), axis=0)
next_internals = dict()
for name in sorted(self.internals_memory):
buffer_internal_memory = self.internals_buffer[name]
buffer_next_internals = buffer_internal_memory[next_buffer_start:next_buffer_end]
memory_next_internals = tf.gather(params=self.internals_memory[name], indices=next_priority_indices)
next_internals[name] = tf.concat(values=(buffer_next_internals, memory_next_internals), axis=0)
return dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals
)
else:
return dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
|
Fetches experiences for given indices by combining entries from buffer
which have no priorities, and entries from priority memory.
Args:
buffer_elements: Number of buffer elements to retrieve
priority_indices: Index tensor for priority memory
Returns: Batch of experiences
|
juraj-google-style
|
def system(self, error: str) -> None:
log = self._build_system_message(error)
self.queue_log_message(log)
|
Adds system error information to base log message and
sends it to the logger for writing.
Args:
* error: error that occurred
Returns:
* None
|
github-repos
|
def write(self):
if self.description:
return '@{0} {1}{4}{2}{4}+{4}{3}{4}'.format(self.id, self.description, self.sequence, self.quality, os.linesep)
else:
return '@{0}{3}{1}{3}+{3}{2}{3}'.format(self.id, self.sequence, self.quality, os.linesep)
|
Return FASTQ formatted string
Returns:
str: FASTQ formatted string containing entire FASTQ entry
|
codesearchnet
|
def unpack(self, buff=None, offset=0):
property_type = UBInt16(enum_ref=TableFeaturePropType)
property_type.unpack(buff, offset)
self.__class__ = TableFeaturePropType(property_type.value).find_class()
length = UBInt16()
length.unpack(buff, offset=offset+2)
super().unpack(buff[:offset+length.value], offset=offset)
|
Unpack *buff* into this object.
This method will convert a binary data into a readable value according
to the attribute format.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
|
juraj-google-style
|
def create(self, resource):
uri = self.URI + self.RESOURCES_PATH
return self._client.create(resource=resource, uri=uri)
|
Set all the labels for a resource.
Args:
resource: The object containing the resource URI and a list of labels
Returns:
dict: Resource Labels
|
juraj-google-style
|
def _GetFrameCodeObjectName(frame):
if ((frame.f_code.co_argcount >= 1) and ('self' == frame.f_code.co_varnames[0])):
return ((frame.f_locals['self'].__class__.__name__ + '.') + frame.f_code.co_name)
else:
return frame.f_code.co_name
|
Gets the code object name for the frame.
Args:
frame: the frame to get the name from
Returns:
The function name if the code is a static function or the class name with
the method name if it is an member function.
|
codesearchnet
|
def kron(*matrices: np.ndarray) -> np.ndarray:
product = np.eye(1)
for m in matrices:
product = np.kron(product, m)
return np.array(product)
|
Computes the kronecker product of a sequence of matrices.
A *args version of lambda args: functools.reduce(np.kron, args).
Args:
*matrices: The matrices and controls to combine with the kronecker
product.
Returns:
The resulting matrix.
|
juraj-google-style
|
def fetch(self, settlement_id, data={}, **kwargs):
return super(Settlement, self).fetch(settlement_id, data, **kwargs)
|
Fetch Settlement data for given Id
Args:
settlement_id : Id for which settlement object has to be retrieved
Returns:
settlement dict for given settlement id
|
juraj-google-style
|
def RegisterMountPoint(cls, mount_point, path_spec):
if mount_point in cls._mount_points:
raise KeyError('Mount point: {0:s} already set.'.format(mount_point))
cls._mount_points[mount_point] = path_spec
|
Registers a path specification mount point.
Args:
mount_point (str): mount point identifier.
path_spec (PathSpec): path specification of the mount point.
Raises:
KeyError: if the corresponding mount point is already set.
|
juraj-google-style
|
def add_arguments(cls, parser):
parser.add_argument(
'-i', '--issue',
action='store',
nargs='?',
const='',
dest='issue',
help="[pr] issue
)
parser.add_argument(
'-br', '--branch',
action='store',
nargs='?',
const='',
dest='branch',
help="[pr] branch",
)
parser.add_argument(
'-tbr', '--target-branch',
action='store',
nargs='?',
const='',
default='master',
dest='target_branch',
help="[pr] name of branch to pull changes into\n(defaults to: master)",
)
|
Add arguments to the parser for collection in app.args.
Args:
parser:
`argparse.ArgumentParser`. Parser.
Arguments added here are server on
self.args.
|
juraj-google-style
|
def get_lowest_decomposition(self, composition):
entries_list = []
elements = [e.symbol for e in composition.elements]
for i in range(len(elements)):
for combi in itertools.combinations(elements, (i + 1)):
chemsys = [Element(e) for e in combi]
x = self.costdb.get_entries(chemsys)
entries_list.extend(x)
try:
pd = PhaseDiagram(entries_list)
return pd.get_decomposition(composition)
except IndexError:
raise ValueError('Error during PD building; most likely, cost data does not exist!')
|
Get the decomposition leading to lowest cost
Args:
composition:
Composition as a pymatgen.core.structure.Composition
Returns:
Decomposition as a dict of {Entry: amount}
|
codesearchnet
|
def create_multispan_plots(tag_ids):
import matplotlib.gridspec as gridspec
fig = plt.figure()
nrows = 1
if len(tag_ids) > 1:
nrows = 2
fig.set_size_inches(10, 5*nrows)
gs = gridspec.GridSpec(nrows, len(tag_ids))
ax_list = [fig.add_subplot(g) for g in gs]
ax_dict = {}
for i, tag_dict in enumerate(tag_ids):
ax_dict[tag_dict['id']] = ax_list[i]
ax_dict[tag_dict['id']].set_title(
'System {} (id {})'.format(tag_dict['name'], tag_dict['id']))
if nrows > 1:
ax_total = plt.subplot(gs[1, :])
title = 'Combined {}'.format(tag_ids[0]['name'])
for i in range(1, len(tag_ids)):
title = title + ' and {}'.format(tag_ids[i]['name'])
ax_total.set_title(title)
gs.tight_layout(fig, rect=[0, 0.03, 1, 0.95])
return fig, ax_dict, ax_total
gs.tight_layout(fig, rect=[0, 0.03, 1, 0.95])
return fig, ax_dict, None
|
Create detail plots (first row) and total block(second row) of experiments.
Args:
tag_ids: list of tag-dictionaries, where the dictionaries must have fields 'name' (used for naming)
and 'id' (used for numbering axis_dict)
Returns:
Figure element fig, ax_dict containing the first row plots (accessed via id) and ax_total containing the
second row block.
|
juraj-google-style
|
def post_url(self, url, token='', json=None, data=None, headers=None):
if (token == ''):
token = self._user_token
if headers:
headers.update({'Authorization': 'Token {}'.format(token)})
else:
headers = {'Authorization': 'Token {}'.format(token)}
if json:
return requests.post(url,
headers=headers,
json=json,
verify=False)
if data:
return requests.post(url,
headers=headers,
data=data,
verify=False)
return requests.post(url,
headers=headers,
verify=False)
|
Returns a post resquest object taking in a url, user token, and
possible json information.
Arguments:
url (str): The url to make post to
token (str): The authentication token
json (dict): json info to send
Returns:
obj: Post request object
|
juraj-google-style
|
def _unbind_topics(self, topics):
self.client.unsubscribe(topics.status)
self.client.unsubscribe(topics.tracing)
self.client.unsubscribe(topics.streaming)
self.client.unsubscribe(topics.response)
|
Unsubscribe to all of the topics we needed for communication with device
Args:
topics (MQTTTopicValidator): The topic validator for this device that
we have connected to.
|
juraj-google-style
|
def decode(data):
dom = None
try:
dom = dhtmlparser.parseString(data)
except Exception, e:
raise MetaParsingException("Can't parse your XML data: %s" % e.message)
root = dom.find("root")
if not root:
raise MetaParsingException("All elements have to be inside <root>.")
if len(root) > 1:
raise MetaParsingException("Too many <root> elements in your XML!")
items = root[0].find("item")
if not items:
raise MetaParsingException("There are no <items> in your XML <root>!")
decoded = []
for item in items:
if "key" not in item.params:
raise MetaParsingException(
"There is no 'key' parameter in %s." % str(item)
)
decoded.append([
item.params["key"],
item.getContent().strip()
])
decoded = validator.check_structure(decoded)
return decoded
|
Handles decoding of the XML `data`.
Args:
data (str): Data which will be decoded.
Returns:
dict: Dictionary with decoded data.
|
juraj-google-style
|
def _control_flow_post_processing(self, input_tensors=None) -> None:
if input_tensors is None:
input_tensors = self.inputs
for input_tensor in input_tensors:
control_flow_util.CheckInputFromValidContext(self, input_tensor.op)
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
|
Add this op to its control flow context.
This may add new ops and change this op's inputs. self.inputs must be
available before calling this method.
Args:
input_tensors: (Optional.) A list of `Tensors` corresponding to the inputs
of this op, which should be equivalent to `self.inputs`. Pass this
argument to avoid evaluating `self.inputs` unnecessarily.
|
github-repos
|
def __init__(self,
descriptors=None,
descriptor_loader=import_descriptor_loader):
self.__descriptor_loader = descriptor_loader
self.__descriptors = descriptors or {}
|
Constructor.
Args:
descriptors: A dictionary or dictionary-like object that can be used
to store and cache descriptors by definition name.
definition_loader: A function used for resolving missing descriptors.
The function takes a definition name as its parameter and returns
an appropriate descriptor. It may raise DefinitionNotFoundError.
|
juraj-google-style
|
def sort_request(request: Dict[(str, Any)]) -> OrderedDict:
sort_order = ['jsonrpc', 'method', 'params', 'id']
return OrderedDict(sorted(request.items(), key=(lambda k: sort_order.index(k[0]))))
|
Sort a JSON-RPC request dict.
This has no effect other than making the request nicer to read.
>>> json.dumps(sort_request(
... {'id': 2, 'params': [2, 3], 'method': 'add', 'jsonrpc': '2.0'}))
'{"jsonrpc": "2.0", "method": "add", "params": [2, 3], "id": 2}'
Args:
request: JSON-RPC request in dict format.
|
codesearchnet
|
def string_join(inputs, separator='', name=None):
return gen_string_ops.string_join(inputs, separator=separator, name=name)
|
Perform element-wise concatenation of a list of string tensors.
Given a list of string tensors of same shape, performs element-wise
concatenation of the strings of the same index in all tensors.
>>> tf.strings.join(['abc','def']).numpy()
b'abcdef'
>>> tf.strings.join([['abc','123'],
... ['def','456'],
... ['ghi','789']]).numpy()
array([b'abcdefghi', b'123456789'], dtype=object)
>>> tf.strings.join([['abc','123'],
... ['def','456']],
... separator=" ").numpy()
array([b'abc def', b'123 456'], dtype=object)
The reduction version of this elementwise operation is
`tf.strings.reduce_join`
Args:
inputs: A list of `tf.Tensor` objects of same size and `tf.string` dtype.
separator: A string added between each string being joined.
name: A name for the operation (optional).
Returns:
A `tf.string` tensor.
|
github-repos
|
def _PrintSessionsDetails(self, storage_reader):
for (session_number, session) in enumerate(storage_reader.GetSessions()):
session_identifier = uuid.UUID(hex=session.identifier)
session_identifier = '{0!s}'.format(session_identifier)
start_time = 'N/A'
if (session.start_time is not None):
start_time = timelib.Timestamp.CopyToIsoFormat(session.start_time)
completion_time = 'N/A'
if (session.completion_time is not None):
completion_time = timelib.Timestamp.CopyToIsoFormat(session.completion_time)
enabled_parser_names = 'N/A'
if session.enabled_parser_names:
enabled_parser_names = ', '.join(sorted(session.enabled_parser_names))
command_line_arguments = (session.command_line_arguments or 'N/A')
parser_filter_expression = (session.parser_filter_expression or 'N/A')
preferred_encoding = (session.preferred_encoding or 'N/A')
if isinstance(preferred_encoding, py2to3.BYTES_TYPE):
preferred_encoding = preferred_encoding.decode('utf-8')
if session.artifact_filters:
artifact_filters_string = ', '.join(session.artifact_filters)
else:
artifact_filters_string = 'N/A'
filter_file = (session.filter_file or 'N/A')
title = 'Session: {0:s}'.format(session_identifier)
table_view = views.ViewsFactory.GetTableView(self._views_format_type, title=title)
table_view.AddRow(['Start time', start_time])
table_view.AddRow(['Completion time', completion_time])
table_view.AddRow(['Product name', session.product_name])
table_view.AddRow(['Product version', session.product_version])
table_view.AddRow(['Command line arguments', command_line_arguments])
table_view.AddRow(['Parser filter expression', parser_filter_expression])
table_view.AddRow(['Enabled parser and plugins', enabled_parser_names])
table_view.AddRow(['Preferred encoding', preferred_encoding])
table_view.AddRow(['Debug mode', session.debug_mode])
table_view.AddRow(['Artifact filters', artifact_filters_string])
table_view.AddRow(['Filter file', filter_file])
table_view.Write(self._output_writer)
if self._verbose:
self._PrintPreprocessingInformation(storage_reader, (session_number + 1))
self._PrintParsersCounter(session.parsers_counter, session_identifier=session_identifier)
self._PrintAnalysisReportCounter(session.analysis_reports_counter, session_identifier=session_identifier)
self._PrintEventLabelsCounter(session.event_labels_counter, session_identifier=session_identifier)
|
Prints the details of the sessions.
Args:
storage_reader (BaseStore): storage.
|
codesearchnet
|
def _run_pytype(self, pytype_args_dict):
with self._create_pytype_subprocess(pytype_args_dict) as p:
self.stdout, self.stderr = (s.decode('utf-8') for s in p.communicate())
self.returncode = p.returncode
|
A single command-line call to the pytype binary.
Typically you'll want to use _CheckTypesAndErrors or
_InferTypesAndCheckErrors, which will set up the command-line arguments
properly and check that the errors file is in the right state after the
call. (The errors check is bundled in to avoid the user forgetting to call
assertHasErrors() with no arguments when expecting no errors.)
Args:
pytype_args_dict: A dictionary of the arguments to pass to pytype, minus
the binary name. For example, to run pytype simple.py --output=- the
arguments should be {"simple.py": self.INCLUDE, "--output": "-"}
|
github-repos
|
def __init__(self, api_key=None, endpoint=None, dtype=None, verbose=None, debug=None):
self.api_key = api_key if api_key else os.environ['MPDS_KEY']
self.network = httplib2.Http()
self.endpoint = endpoint or self.endpoint
self.dtype = dtype or MPDSDataTypes.PEER_REVIEWED
self.verbose = verbose if verbose is not None else self.verbose
self.debug = debug or self.debug
|
MPDS API consumer constructor
Args:
api_key: (str) The MPDS API key, or None if the MPDS_KEY envvar is set
endpoint: (str) MPDS API gateway URL
Returns: None
|
juraj-google-style
|
def backward_propagation(parameters, cache, X, Y):
m = X.shape[1]
W1 = parameters['W1']
W2 = parameters['W2']
A1 = cache['A1']
A2 = cache['A2']
dZ2 = (A2 - Y)
dW2 = ((1.0 / m) * np.dot(dZ2, A1.T))
db2 = ((1.0 / m) * np.sum(dZ2, axis=1, keepdims=True))
dZ1 = ((W2.T * dZ2) * (1 - np.power(A1, 2)))
dW1 = ((1.0 / m) * np.dot(dZ1, X.T))
db1 = ((1.0 / m) * np.sum(dZ1, axis=1, keepdims=True))
grads = {'dW1': dW1, 'db1': db1, 'dW2': dW2, 'db2': db2}
return grads
|
Implement the backward propagation using the instructions above.
Arguments:
parameters -- python dictionary containing our parameters
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2".
X -- input data of shape (2, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
Returns:
grads -- python dictionary containing your gradients with respect to different parameters
|
codesearchnet
|
def BuildLSTMLayer(batch_size, seq_length, num_inputs, num_nodes):
weights = RandomVar(LSTMCellWeightsShape(num_inputs, num_nodes), name='weights')
m = array_ops.zeros([batch_size, num_nodes], name='init_m')
c = array_ops.zeros([batch_size, num_nodes], name='init_c')
x_seq, pad_seq = RandomInputs(batch_size, seq_length, num_inputs)
out_seq = LSTMLayer('lstm', weights, m, c, x_seq, pad_seq)
return (out_seq, [weights])
|
Builds a single LSTM layer with random weights and inputs.
Args:
batch_size: Inputs are fed in batches of this size.
seq_length: The sequence length to unroll the LSTM layer.
num_inputs: Dimension of inputs that are fed into each LSTM cell.
num_nodes: The number of nodes in each LSTM cell.
Returns:
(out_seq, weights) pair. The out_seq is a list of per-sequence-step
outputs, each with shape [batch_size, num_nodes]. The weights are a list of
weight variables that may be trained.
|
github-repos
|
def __init__(self, image_processor, tokenizer, chat_template=None, image_seq_length=256, policy_definitions=None, **kwargs):
super().__init__(image_processor, tokenizer, chat_template, image_seq_length, **kwargs)
if policy_definitions is None:
self.policy_definitions = DEFAULT_SHIELDGEMMA2_POLICIES
else:
self.policy_definitions = policy_definitions
|
A processor for the ShieldGemma 2 model.
Args:
image_processor: The image processor to use, typically a `Gemma3ImageProcessorFast` instance.
tokenizer: The tokenizer to use, typically a `GemmaTokenizerFast` instance.
chat_template: The chat template to use with this processor. Typically, this is unset as the processor
configuration on Hugging Face Hub includes this value already.
image_seq_length: The number of soft tokens per image. Typically, this is unset as the processor
configuration on Hugging Face Hub includes this value already.
policy_definitions: A mapping from policy name to its description in text used as the default policies to
classify images against. The policy descriptions are included in the text of the prompts generated by
this processor. Typically, this is unset as the processor configuration on Hugging Face Hub includes
the base policies ShieldGemma was trained on.
|
github-repos
|
def build_chunk(oscillators):
step_random_processes(oscillators)
subchunks = []
for osc in oscillators:
osc.amplitude.step_amp()
osc_chunk = osc.get_samples(config.CHUNK_SIZE)
if osc_chunk is not None:
subchunks.append(osc_chunk)
if len(subchunks):
new_chunk = sum(subchunks)
else:
new_chunk = numpy.zeros(config.CHUNK_SIZE)
chunk_amplitude = amplitude.find_amplitude(new_chunk)
if chunk_amplitude > config.MAX_AMPLITUDE:
new_chunk = amplitude.normalize_amplitude(new_chunk,
config.MAX_AMPLITUDE)
avg_amp = (sum(osc.amplitude.value for osc in oscillators) /
len(oscillators))
for osc in oscillators:
if (osc.amplitude.value > avg_amp and rand.prob_bool(0.1) or
rand.prob_bool(0.01)):
osc.amplitude.drift_target = rand.weighted_rand(
[(-5, 1), (0, 10)])
osc.amplitude.change_rate = rand.weighted_rand(
osc.amplitude.change_rate_weights)
return new_chunk.astype(config.SAMPLE_DATA_TYPE).tostring()
|
Build an audio chunk and progress the oscillator states.
Args:
oscillators (list): A list of oscillator.Oscillator objects
to build chunks from
Returns:
str: a string of audio sample bytes ready to be written to a wave file
|
juraj-google-style
|
def daylight_saving_end_day(self, value=None):
if (value is not None):
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str for field `daylight_saving_end_day`'.format(value))
if (',' in value):
raise ValueError('value should not contain a comma for field `daylight_saving_end_day`')
self._daylight_saving_end_day = value
|
Corresponds to IDD Field `daylight_saving_end_day`
Args:
value (str): value for IDD Field `daylight_saving_end_day`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def Deserialize(self, reader):
super(ContractState, self).Deserialize(reader)
code = FunctionCode()
code.Deserialize(reader)
self.Code = code
self.ContractProperties = reader.ReadUInt8()
self.Name = reader.ReadVarString(max=252)
self.CodeVersion = reader.ReadVarString(max=252)
self.Author = reader.ReadVarString(max=252)
self.Email = reader.ReadVarString(max=252)
self.Description = reader.ReadVarString(max=65536)
|
Deserialize full object.
Args:
reader (neocore.IO.BinaryReader):
|
juraj-google-style
|
def from_json(json):
return Point(
lat=json['lat'],
lon=json['lon'],
time=isostr_to_datetime(json['time'])
)
|
Creates Point instance from JSON representation
Args:
json (:obj:`dict`): Must have at least the following keys: lat (float), lon (float),
time (string in iso format). Example,
{
"lat": 9.3470298,
"lon": 3.79274,
"time": "2016-07-15T15:27:53.574110"
}
json: map representation of Point instance
Returns:
:obj:`Point`
|
juraj-google-style
|
def initialize():
global __initialized
if __initialized:
return
try:
for data in DEFAULT_CONFIG_OPTIONS:
nsobj = _get_config_namespace(data['prefix'], data['name'], sort_order=data['sort_order'])
for opt in data['options']:
_register_default_option(nsobj, opt)
db.session.add(nsobj)
for (ns, info) in CINQ_PLUGINS.items():
if (info['name'] == 'commands'):
continue
for entry_point in info['plugins']:
_cls = entry_point.load()
if hasattr(_cls, 'ns'):
ns_name = '{}: {}'.format(info['name'].capitalize(), _cls.name)
if (not isinstance(_cls.options, abstractproperty)):
nsobj = _get_config_namespace(_cls.ns, ns_name)
if _cls.options:
for opt in _cls.options:
_register_default_option(nsobj, opt)
db.session.add(nsobj)
_add_default_roles()
_import_templates()
db.session.commit()
dbconfig.reload_data()
__initialized = True
except ProgrammingError as ex:
if (str(ex).find('1146') != (- 1)):
logging.getLogger('cloud_inquisitor').error('Missing required tables, please make sure you run `cloud-inquisitor db upgrade`')
|
Initialize the application configuration, adding any missing default configuration or roles
Returns:
`None`
|
codesearchnet
|
def bessel_i0e(x, name=None):
with ops.name_scope(name, 'bessel_i0e', [x]):
return gen_special_math_ops.bessel_i0e(x)
|
Computes the Bessel i0e function of `x` element-wise.
Modified Bessel function of order 0.
>>> tf.math.special.bessel_i0e([-1., -0.5, 0.5, 1.]).numpy()
array([0.46575961, 0.64503527, 0.64503527, 0.46575961], dtype=float32)
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(scipy)
Equivalent to scipy.special.i0e
@end_compatibility
|
github-repos
|
def _get_unique_function_name(function_type, functions):
function_name = function_name_base = function_type
count = 2
while (function_name in functions):
function_name = '{}_{}'.format(function_name_base, count)
count += 1
return function_name
|
Get a unique function name.
Args:
function_type(str): Name of Function. Ex) Convolution, Affine
functions(OrderedDict of (str, Function)
Returns: str
A unique function name
|
codesearchnet
|
def Factory(cls, name, min_threads, max_threads=None):
with cls.factory_lock:
result = cls.POOLS.get(name)
if (result is None):
cls.POOLS[name] = result = cls(name, min_threads, max_threads=max_threads)
return result
|
Creates a new thread pool with the given name.
If the thread pool of this name already exist, we just return the existing
one. This allows us to have different pools with different characteristics
used by different parts of the code, at the same time.
Args:
name: The name of the required pool.
min_threads: The number of threads in the pool.
max_threads: The maximum number of threads to grow the pool to. If not set
we do not grow the pool.
Returns:
A threadpool instance.
|
codesearchnet
|
def _DepthwiseConv2dNumpyBasic(x1, x2, strides):
n, h, w, c = x1.shape
fh, fw, c2, o = x2.shape
assert c == c2
_, sh, sw, _ = strides
out_rows = (h - fh + sh)
out_cols = (w - fw + sw)
out = np.zeros([n, out_rows, out_cols, c * o])
for i in range(out_rows):
for j in range(out_cols):
for k in range(c):
start_height = i * sh
end_height = start_height + fh
start_width = j * sw
end_width = start_width + fw
multiplied_slice = x1[:, start_height:end_height, start_width:end_width, k, np.newaxis] * x2[:, :, k, :]
out[:, i, j, k * o:(k + 1) * o] = np.sum(multiplied_slice, axis=(1, 2))
return out
|
Compute depthwise_conv2d using Numpy.
This allows use to test TensorFlow's depthwise_conv2d by comparing to the
Numpy version.
Args:
x1: The input Numpy array, in NHWC format.
x2: The filter Numpy array.
strides: A Python list of 4 elements representing the strides.
Returns:
The depthwise conv2d output as a Numpy array.
|
github-repos
|
def compute_specificity_at_sensitivity(tp, tn, fp, fn, name):
sensitivities = math_ops.divide(tp, tp + fn + kepsilon)
min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))
indices_at_minval = math_ops.equal(math_ops.abs(sensitivities - sensitivity), min_val)
indices_at_minval = math_ops.cast(indices_at_minval, dtypes.int64)
indices_at_minval = math_ops.cumsum(indices_at_minval)
tf_index = math_ops.argmax(indices_at_minval, 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
return math_ops.divide(tn[tf_index], tn[tf_index] + fp[tf_index] + kepsilon, name)
|
Computes the specificity at the given sensitivity.
Args:
tp: True positives.
tn: True negatives.
fp: False positives.
fn: False negatives.
name: The name of the operation.
Returns:
The specificity using the aggregated values.
|
github-repos
|
def create_resource_group(access_token, subscription_id, rgname, location):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'?api-version=', RESOURCE_API])
rg_body = {'location': location}
body = json.dumps(rg_body)
return do_put(endpoint, body, access_token)
|
Create a resource group in the specified location.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
location (str): Azure data center location. E.g. westus.
Returns:
HTTP response. JSON body.
|
juraj-google-style
|
def arccos(x):
if any_symbolic_tensors((x,)):
return Arccos().symbolic_call(x)
return backend.numpy.arccos(x)
|
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if `y = cos(x)`, then `x = arccos(y)`.
Args:
x: Input tensor.
Returns:
Tensor of the angle of the ray intersecting the unit circle at the given
x-coordinate in radians `[0, pi]`.
Example:
>>> x = keras.ops.convert_to_tensor([1, -1])
>>> keras.ops.arccos(x)
array([0.0, 3.1415927], dtype=float32)
|
github-repos
|
def _collapse_state(args: Dict[str, Any]):
index = args['index']
result = args['result']
prob_one = args['prob_one']
state = _state_shard(args)
normalization = np.sqrt(prob_one if result else 1 - prob_one)
state *= (_one_projector(args, index) * result +
(1 - _one_projector(args, index)) * (1 - result))
state /= normalization
|
Projects state shards onto the appropriate post measurement state.
This function makes no assumptions about the interpretation of quantum
theory.
Args:
args: The args from shard_num_args.
|
juraj-google-style
|
def Query(self, query, parameters=None):
if parameters:
self._cursor.execute(query, parameters)
else:
self._cursor.execute(query)
return self._cursor.fetchall()
|
Queries the database file.
Args:
query (str): SQL query.
parameters (Optional[dict|tuple]): query parameters.
Returns:
list[sqlite3.Row]: rows resulting from the query.
|
juraj-google-style
|
def remove_trunk_group(self, intf, value):
string = 'no switchport trunk group {}'.format(value)
return self.configure_interface(intf, string)
|
Removes a specified trunk group to the interface
Args:
intf (str): The interface name to remove the trunk group from
value (str): The trunk group value
Returns:
True if the operation as successfully applied otherwise false
|
codesearchnet
|
def email_users(users, subject, text_body, html_body=None, sender=None, configuration=None, **kwargs):
if (not users):
raise ValueError('No users supplied')
recipients = list()
for user in users:
recipients.append(user.data['email'])
if (configuration is None):
configuration = users[0].configuration
configuration.emailer().send(recipients, subject, text_body, html_body=html_body, sender=sender, **kwargs)
|
Email a list of users
Args:
users (List[User]): List of users
subject (str): Email subject
text_body (str): Plain text email body
html_body (str): HTML email body
sender (Optional[str]): Email sender. Defaults to SMTP username.
configuration (Optional[Configuration]): HDX configuration. Defaults to configuration of first user in list.
**kwargs: See below
mail_options (List): Mail options (see smtplib documentation)
rcpt_options (List): Recipient options (see smtplib documentation)
Returns:
None
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.