code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def prepare_for_tokenization(self, text: str, is_split_into_words: bool=False, **kwargs) -> tuple[str, dict[str, Any]]:
return (text, kwargs) | Performs any necessary transformations before tokenization.
This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the
`kwargs` at the end of the encoding process to be sure all the arguments have been used.
Args:
text (`str`):
The text to prepare.
is_split_into_words (`bool`, *optional*, defaults to `False`):
Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
which it will tokenize. This is useful for NER or token classification.
kwargs (`Dict[str, Any]`, *optional*):
Keyword arguments to use for the tokenization.
Returns:
`Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs. | github-repos |
def get_user_roles(self, user):
self.project_service.set_auth(self._token_project)
return self.project_service.get_user_roles(user) | Get roles associated with the given user.
Args:
user (string): User name.
Returns:
(list): List of roles that user has.
Raises:
requests.HTTPError on failure. | juraj-google-style |
def retrieve(self, block_height, headers=None):
path = self.path + block_height
return self.transport.forward_request(
method='GET', path=path, headers=None) | Retrieves the block with the given ``block_height``.
Args:
block_height (str): height of the block to retrieve.
headers (dict): Optional headers to pass to the request.
Returns:
dict: The block with the given ``block_height``. | juraj-google-style |
def _is_node_return_ended(self, node):
if isinstance(node, astroid.Return):
return True
if isinstance(node, astroid.Call):
try:
funcdef_node = node.func.inferred()[0]
if self._is_function_def_never_returning(funcdef_node):
return True
except astroid.InferenceError:
pass
if isinstance(node, astroid.While):
return True
if isinstance(node, astroid.Raise):
if (not node.exc):
return True
if (not utils.is_node_inside_try_except(node)):
return True
exc = utils.safe_infer(node.exc)
if ((exc is None) or (exc is astroid.Uninferable)):
return False
exc_name = exc.pytype().split('.')[(- 1)]
handlers = utils.get_exception_handlers(node, exc_name)
handlers = (list(handlers) if (handlers is not None) else [])
if handlers:
return any((self._is_node_return_ended(_handler) for _handler in handlers))
return True
if isinstance(node, astroid.If):
is_orelse_returning = any((self._is_node_return_ended(_ore) for _ore in node.orelse if (not isinstance(_ore, astroid.FunctionDef))))
is_if_returning = any((self._is_node_return_ended(_ifn) for _ifn in node.body if (not isinstance(_ifn, astroid.FunctionDef))))
return (is_if_returning and is_orelse_returning)
return any((self._is_node_return_ended(_child) for _child in node.get_children() if (not isinstance(_child, astroid.ExceptHandler)))) | Check if the node ends with an explicit return statement.
Args:
node (astroid.NodeNG): node to be checked.
Returns:
bool: True if the node ends with an explicit statement, False otherwise. | codesearchnet |
def __init__(self, token_list):
self.__token_arr = np.array(list(set(token_list))) | Initialize.
Args:
token_list: The list of all tokens. | juraj-google-style |
def shared_symbol_table(name, version, symbols, imports=None):
return SymbolTable(table_type=SHARED_TABLE_TYPE, symbols=symbols, name=name, version=version, imports=imports) | Constructs a shared symbol table.
Args:
name (unicode): The name of the shared symbol table.
version (int): The version of the shared symbol table.
symbols (Iterable[unicode]): The symbols to associate with the table.
imports (Optional[Iterable[SymbolTable]): The shared symbol tables to inject into this one.
Returns:
SymbolTable: The constructed table. | codesearchnet |
def GrabFileSystem(self, path_spec):
identifier = self._GetFileSystemCacheIdentifier(path_spec)
self._file_system_cache.GrabObject(identifier) | Grabs a cached file system object defined by path specification.
Args:
path_spec (PathSpec): path specification. | juraj-google-style |
def image_format(value):
if value.image.format.upper() not in constants.ALLOWED_IMAGE_FORMATS:
raise ValidationError(MESSAGE_INVALID_IMAGE_FORMAT) | Confirms that the uploaded image is of supported format.
Args:
value (File): The file with an `image` property containing the image
Raises:
django.forms.ValidationError | juraj-google-style |
def add_variable(self, shape, initializer='zeros', dtype=None, aggregation='none', layout=None, name=None):
self._check_super_called()
initializer = initializers.get(initializer)
with backend.name_scope(self.name, caller=self):
variable = backend.Variable(initializer=initializer, shape=shape, dtype=dtype, trainable=False, aggregation=aggregation, layout=layout, name=name)
self._track_variable(variable)
return variable | Add a variable to the optimizer.
Args:
shape: Shape tuple for the variable. Must be fully-defined
(no `None` entries).
initializer: Initializer object to use to populate the initial
variable value, or string name of a built-in initializer
(e.g. `"random_normal"`). Defaults to `"zeros"`.
dtype: Dtype of the variable to create, e.g. `"float32"`. If
unspecified, defaults to the `keras.backend.floatx()`.
aggregation: Optional string, one of `None`, `"none"`, `"mean"`,
`"sum"` or `"only_first_replica"`. Annotates the variable with
the type of multi-replica aggregation to be used for this
variable when writing custom data parallel training loops.
Defaults to `"none"`.
layout: Optional tensor layout. Defaults to `None`.
name: String name of the variable. Useful for debugging purposes.
Returns:
An optimizer variable, in the format of `keras.Variable`. | github-repos |
def assert_true(expr, msg, extras=None):
if not expr:
fail(msg, extras) | Assert an expression evaluates to true, otherwise fail the test.
Args:
expr: The expression that is evaluated.
msg: A string explaining the details in case of failure.
extras: An optional field for extra information to be included in
test result. | github-repos |
def status(self, targets, jobs=None, remote=None, show_checksums=False):
cloud = self._get_cloud(remote, 'status')
return self.repo.cache.local.status(targets, jobs=jobs, remote=cloud, show_checksums=show_checksums) | Check status of data items in a cloud-agnostic way.
Args:
targets (list): list of targets to check status for.
jobs (int): number of jobs that can be running simultaneously.
remote (dvc.remote.base.RemoteBase): optional remote to compare
targets to. By default remote from core.remote config option
is used.
show_checksums (bool): show checksums instead of file names in
information messages. | codesearchnet |
def validate_sqs_policy(self, accounts):
sqs_queue_name = self.dbconfig.get('sqs_queue_name', self.ns)
sqs_queue_region = self.dbconfig.get('sqs_queue_region', self.ns)
sqs_account = AWSAccount.get(self.dbconfig.get('sqs_queue_account', self.ns))
session = get_aws_session(sqs_account)
sqs = session.client('sqs', region_name=sqs_queue_region)
sqs_queue_url = sqs.get_queue_url(QueueName=sqs_queue_name, QueueOwnerAWSAccountId=sqs_account.account_number)
sqs_attribs = sqs.get_queue_attributes(QueueUrl=sqs_queue_url['QueueUrl'], AttributeNames=['Policy'])
policy = json.loads(sqs_attribs['Attributes']['Policy'])
for account in accounts:
arn = 'arn:aws:sns:*:{}:{}'.format(account.account_number, sqs_queue_name)
if arn not in policy['Statement'][0]['Condition']['ForAnyValue:ArnEquals']['aws:SourceArn']:
self.log.warning('SQS policy is missing condition for ARN {}'.format(arn))
policy['Statement'][0]['Condition']['ForAnyValue:ArnEquals']['aws:SourceArn'].append(arn)
sqs.set_queue_attributes(QueueUrl=sqs_queue_url['QueueUrl'], Attributes={'Policy': json.dumps(policy)}) | Given a list of accounts, ensures that the SQS policy allows all the accounts to write to the queue
Args:
accounts (`list` of :obj:`Account`): List of accounts
Returns:
`None` | juraj-google-style |
def get_orbital_derivative_between_states(self, band_i, band_j, kpoint, spin, cart_dir):
if band_i < 0 or band_i > self.nbands - 1 or band_j < 0 or band_j > self.nelect - 1:
raise ValueError("Band index out of bounds")
if kpoint > self.nkpoints:
raise ValueError("K-point index out of bounds")
if cart_dir > 2 or cart_dir < 0:
raise ValueError("cart_dir index out of bounds")
return self.cder_data[band_i, band_j, kpoint, spin, cart_dir] | Method returning a value
between bands band_i and band_j for k-point index, spin-channel and cartesian direction.
Args:
band_i (Integer): Index of band i
band_j (Integer): Index of band j
kpoint (Integer): Index of k-point
spin (Integer): Index of spin-channel (0 or 1)
cart_dir (Integer): Index of cartesian direction (0,1,2)
Returns:
a float value | juraj-google-style |
def get_graph(self, run_key, device_name, debug=False):
return self.get_graphs(run_key, debug=debug).get(device_name, None) | Get the runtime GraphDef proto associated with a run key and a device.
Args:
run_key: A Session.run kay.
device_name: Name of the device in question.
debug: Whether the debugger-decoratedgraph is to be retrieved.
Returns:
A `GraphDef` proto. | codesearchnet |
def ws45(msg):
d = hex2bin(data(msg))
if d[3] == '0':
return None
ws = bin2int(d[4:6])
return ws | Wind shear.
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
int: Wind shear level. 0=NIL, 1=Light, 2=Moderate, 3=Severe | juraj-google-style |
def request_file(link, outfile, force_rerun_flag=False):
if force_rerun(flag=force_rerun_flag, outfile=outfile):
req = requests.get(link)
if req.status_code == 200:
with open(outfile, 'w') as f:
f.write(req.text)
log.debug('Loaded and saved {} to {}'.format(link, outfile))
else:
log.error('{}: request error {}'.format(link, req.status_code))
return outfile | Download a file given a URL if the outfile does not exist already.
Args:
link (str): Link to download file.
outfile (str): Path to output file, will make a new file if it does not exist. Will not download if it does
exist, unless force_rerun_flag is True.
force_rerun_flag (bool): Flag to force re-downloading of the file if it exists already.
Returns:
str: Path to downloaded file. | juraj-google-style |
def bpe_decode(self, sequences):
return self.bpe_tokenizer.batch_decode(sequences) | Convert a list of lists of bpe token ids into a list of strings by calling bpe tokenizer.
Args:
sequences (`torch.Tensor`):
List of tokenized input ids.
Returns:
`List[str]`: The list of bpe decoded sentences. | github-repos |
def pnl(self, account='', modelCode='') -> List[PnL]:
return [v for v in self.wrapper.pnls.values() if
(not account or v.account == account) and
(not modelCode or v.modelCode == modelCode)] | List of subscribed :class:`.PnL` objects (profit and loss),
optionally filtered by account and/or modelCode.
The :class:`.PnL` objects are kept live updated.
Args:
account: If specified, filter for this account name.
modelCode: If specified, filter for this account model. | juraj-google-style |
def inverse_event_shape(self, output_shape):
return self._inverse_event_shape(output_shape) | Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `inverse_event_shape_tensor`. May be only partially defined.
Args:
output_shape: `TensorShape` indicating event-portion shape passed into
`inverse` function.
Returns:
inverse_event_shape_tensor: `TensorShape` indicating event-portion shape
after applying `inverse`. Possibly unknown. | github-repos |
def matches_any(patterns: List[Pattern[str]], line: str) -> bool:
stripped_line = line.strip()
for pattern in patterns:
if pattern.match(stripped_line):
return True
return False | Checks if the line matches any of the given patterns.
Args:
patterns: A list of compiled regular expression patterns.
line: The line to check for matches.
Returns:
True if the line matches any of the patterns, False otherwise. | github-repos |
def read_lines(self, max_lines=None):
if (max_lines is None):
return self.read_stream().split('\n')
max_to_read = self.metadata.size
bytes_to_read = min((100 * max_lines), self.metadata.size)
while True:
content = self.read_stream(byte_count=bytes_to_read)
lines = content.split('\n')
if ((len(lines) > max_lines) or (bytes_to_read >= max_to_read)):
break
bytes_to_read = min((bytes_to_read * 10), max_to_read)
del lines[(- 1)]
return lines[0:max_lines] | Reads the content of this object as text, and return a list of lines up to some max.
Args:
max_lines: max number of lines to return. If None, return all lines.
Returns:
The text content of the object as a list of lines.
Raises:
Exception if there was an error requesting the object's content. | codesearchnet |
def _common_prefix(self, m):
if not m:
return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1 | Given a list of str, returns the longest common prefix.
Args:
m: (list of str) A list of strings.
Returns:
(str) The longest common prefix. | github-repos |
def files_upload(
self, *, file: Union[str, IOBase] = None, content: str = None, **kwargs
) -> SlackResponse:
if file is None and content is None:
raise e.SlackRequestError("The file or content argument must be specified.")
if file is not None and content is not None:
raise e.SlackRequestError(
"You cannot specify both the file and the content argument."
)
if file:
return self.api_call("files.upload", files={"file": file}, data=kwargs)
elif content:
data = kwargs.copy()
data.update({"content": content})
return self.api_call("files.upload", data=data) | Uploads or creates a file.
Args:
file (str): Supply a file path.
when you'd like to upload a specific file. e.g. 'dramacat.gif'
content (str): Supply content when you'd like to create an
editable text file containing the specified text. e.g. 'launch plan'
Raises:
SlackRequestError: If niether or both the `file` and `content` args are specified. | juraj-google-style |
def _cast_dict(self, data_dict):
for (key, value) in data_dict.iteritems():
data_dict[key] = self._cast_value(value)
if ('resp_body_data' in data_dict):
del data_dict['resp_body_data']
return data_dict | Internal method that makes sure any dictionary elements
are properly cast into the correct types, instead of
just treating everything like a string from the csv file.
Args:
data_dict: dictionary containing bro log data.
Returns:
Cleaned Data dict. | codesearchnet |
def register_date_conversion_handler(date_specifier_patterns):
def _decorator(func):
global DATE_SPECIFIERS_CONVERSION_HANDLERS
DATE_SPECIFIERS_CONVERSION_HANDLERS[DATE_SPECIFIERS_REGEXES[date_specifier_patterns]] = func
return func
return _decorator | Decorator for registering handlers that convert text dates to dates.
Args:
date_specifier_patterns (str): the date specifier (in regex pattern format) for which the handler is registered | codesearchnet |
def set_trunk_groups(self, intf, value=None, default=False, disable=False):
if default:
cmd = 'default switchport trunk group'
return self.configure_interface(intf, cmd)
if disable:
cmd = 'no switchport trunk group'
return self.configure_interface(intf, cmd)
current_value = self.get(intf)['trunk_groups']
failure = False
value = make_iterable(value)
for name in set(value).difference(current_value):
if (not self.add_trunk_group(intf, name)):
failure = True
for name in set(current_value).difference(value):
if (not self.remove_trunk_group(intf, name)):
failure = True
return (not failure) | Configures the switchport trunk group value
Args:
intf (str): The interface identifier to configure.
value (str): The set of values to configure the trunk group
default (bool): Configures the trunk group default value
disable (bool): Negates all trunk group settings
Returns:
True if the config operation succeeds otherwise False | codesearchnet |
def __init__(self, ethertype=None):
super().__init__(action_type=ActionType.OFPAT_POP_MPLS)
self.ethertype = ethertype | Create an ActionPopMPLS with the optional parameters below.
Args:
ethertype (int): indicates the Ethertype of the payload. | juraj-google-style |
def _expand_dims(x, input_shape, output_shape):
verify_no_new_dims([output_shape], input_shape)
if input_shape == output_shape or input_shape.ndims == 0:
return x
perm = [input_shape.dims.index(d) for d in output_shape.dims
if d in input_shape.dims]
x = tf.transpose(x, perm)
for i, d in enumerate(output_shape.dims):
if d not in input_shape.dims:
x = tf.expand_dims(x, i)
return x | Expand dimensions and transpose if necessary.
Args:
x: a tf.Tensor
input_shape: a Shape
output_shape: a Shape whose dimensions are a superset of
those in input_shape
Returns:
a tf.Tensor | juraj-google-style |
def format(self, exclude_class=False):
if exclude_class:
msg = self.msg
else:
msg = "%s: %s" % (self.__class__.__name__, self.msg)
if len(self.params) != 0:
paramstring = "\n".join([str(key) + ": " + str(val) for key, val in self.params.items()])
msg += "\nAdditional Information:\n" + paramstring
return msg | Format this exception as a string including class name.
Args:
exclude_class (bool): Whether to exclude the exception class
name when formatting this exception
Returns:
string: a multiline string with the message, class name and
key value parameters passed to create the exception. | juraj-google-style |
def websocket_url_for_server_url(url):
if url.startswith("http:"):
reprotocoled = "ws" + url[4:]
elif url.startswith("https:"):
reprotocoled = "wss" + url[5:]
else:
raise ValueError("URL has unknown protocol " + url)
if reprotocoled.endswith("/"):
return reprotocoled + "ws"
else:
return reprotocoled + "/ws" | Convert an ``http(s)`` URL for a Bokeh server websocket endpoint into
the appropriate ``ws(s)`` URL
Args:
url (str):
An ``http(s)`` URL
Returns:
str:
The corresponding ``ws(s)`` URL ending in ``/ws``
Raises:
ValueError:
If the input URL is not of the proper form. | juraj-google-style |
def get_program(self, program_resource_name: str) -> Dict:
return self.service.projects().programs().get(
name=program_resource_name).execute() | Returns the previously created quantum program.
Params:
program_resource_name: A string of the form
`projects/project_id/programs/program_id`.
Returns:
A dictionary containing the metadata and the program. | juraj-google-style |
def get(self, recipe_id):
self.logger.debug('Retrieving recipe by id: ' + recipe_id)
url = '%(base_url)s/recipe/%(recipe_id)s' % {
'base_url': self.base_url, 'recipe_id': recipe_id
}
r = self.gbdx_connection.get(url)
r.raise_for_status()
return r.json() | Retrieves an AnswerFactory Recipe by id
Args:
recipe_id The id of the recipe
Returns:
A JSON representation of the recipe | juraj-google-style |
def load_json(filename, **kwargs):
with open(filename, 'r', encoding='utf-8') as f:
return json.load(f, **kwargs) | Load a JSON object from the specified file.
Args:
filename: Path to the input JSON file.
**kwargs: Additional arguments to `json.load`.
Returns:
The object deserialized from JSON. | juraj-google-style |
def checksum(self, path):
try:
return self._blobstorageIO().checksum(path)
except Exception as e:
raise BeamIOError('Checksum operation failed', {path, e}) | Fetch checksum metadata of a file on the
:class:`~apache_beam.io.filesystem.FileSystem`.
Args:
path: string path of a file.
Returns: string containing checksum
Raises:
``BeamIOError``: if path isn't a file or doesn't exist. | github-repos |
def with_params(self, **kwargs):
if _TEST_MODE:
logging.info(
'Setting runtime parameters for %s
self, self.pipeline_id, kwargs)
return self
if self.pipeline_id is not None:
raise UnexpectedPipelineError(
'May only call with_params() on a Pipeline that has not yet '
'been scheduled for execution.')
ALLOWED = ('backoff_seconds', 'backoff_factor', 'max_attempts', 'target')
for name, value in kwargs.iteritems():
if name not in ALLOWED:
raise TypeError('Unexpected keyword: %s=%r' % (name, value))
setattr(self, name, value)
return self | Modify various execution parameters of a Pipeline before it runs.
This method has no effect in test mode.
Args:
kwargs: Attributes to modify on this Pipeline instance before it has
been executed.
Returns:
This Pipeline instance, for easy chaining. | juraj-google-style |
def orthonormalize_righthanded(basis):
(v1, v2) = (basis[(:, 0)], basis[(:, 1)])
e1 = normalize(v1)
e3 = normalize(np.cross(e1, v2))
e2 = normalize(np.cross(e3, e1))
return np.array([e1, e2, e3]).T | Orthonormalizes righthandedly a given 3D basis.
This functions returns a right handed orthonormalize_righthandedd basis.
Since only the first two vectors in the basis are used, it does not matter
if you give two or three vectors.
Right handed means, that:
.. math::
\\vec{e_1} \\times \\vec{e_2} &= \\vec{e_3} \\\\
\\vec{e_2} \\times \\vec{e_3} &= \\vec{e_1} \\\\
\\vec{e_3} \\times \\vec{e_1} &= \\vec{e_2} \\\\
Args:
basis (np.array): An array of shape = (3,2) or (3,3)
Returns:
new_basis (np.array): A right handed orthonormalized basis. | codesearchnet |
def _unflatten_dict(flat_dict, prefixes):
original_dict = {}
for (key, value) in flat_dict.items():
prefix_found = False
for prefix in prefixes:
full_prefix = (('__' + prefix) + '_')
if key.startswith(full_prefix):
if (prefix not in original_dict):
original_dict[prefix] = {}
original_dict[prefix][key[len(full_prefix):]] = value
prefix_found = True
break
if (not prefix_found):
original_dict[key] = value
return original_dict | Returns a dict of dicts if any prefixes match keys in the flat dict.
The function handles the case where the prefix may not be a dict.
Args:
flat_dict: A dict without any nesting.
prefixes: A list of strings which may have been dicts in the
original structure. | codesearchnet |
def from_string(cls, model_id, default_project=None):
(proj, dset, model) = _helpers._parse_3_part_id(model_id, default_project=default_project, property_name='model_id')
return cls.from_api_repr({'projectId': proj, 'datasetId': dset, 'modelId': model}) | Construct a model reference from model ID string.
Args:
model_id (str):
A model ID in standard SQL format. If ``default_project``
is not specified, this must included a project ID, dataset
ID, and model ID, each separated by ``.``.
default_project (str):
Optional. The project ID to use when ``model_id`` does not
include a project ID.
Returns:
google.cloud.bigquery.model.ModelReference:
Model reference parsed from ``model_id``.
Raises:
ValueError:
If ``model_id`` is not a fully-qualified table ID in
standard SQL format. | codesearchnet |
def get_pourbaix_plot(self, limits=None, title="",
label_domains=True, plt=None):
if limits is None:
limits = [[-2, 16], [-3, 3]]
plt = plt or pretty_plot(16)
xlim = limits[0]
ylim = limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
for entry, vertices in self._pd._stable_domain_vertices.items():
center = np.average(vertices, axis=0)
x, y = np.transpose(np.vstack([vertices, vertices[0]]))
plt.plot(x, y, 'k-', linewidth=lw)
if label_domains:
plt.annotate(generate_entry_label(entry), center, ha='center',
va='center', fontsize=20, color="b")
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt | Plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
title (str): Title to display on plot
label_domains (bool): whether to label pourbaix domains
plt (pyplot): Pyplot instance for plotting
Returns:
plt (pyplot) - matplotlib plot object with pourbaix diagram | juraj-google-style |
def texture_cube(self, size, components, data=None, *, alignment=1, dtype='f1') -> 'TextureCube':
res = TextureCube.__new__(TextureCube)
res.mglo, res._glo = self.mglo.texture_cube(size, components, data, alignment, dtype)
res._size = size
res._components = components
res._dtype = dtype
res.ctx = self
res.extra = None
return res | Create a :py:class:`TextureCube` object.
Args:
size (tuple): The width, height of the texture. Each side of the cube will have this size.
components (int): The number of components 1, 2, 3 or 4.
data (bytes): Content of the texture.
Keyword Args:
alignment (int): The byte alignment 1, 2, 4 or 8.
dtype (str): Data type.
Returns:
:py:class:`TextureCube` object | juraj-google-style |
def split_folder_and_path(filepath):
dirname = op.dirname(filepath)
filename = op.basename(filepath)
splitext = op.splitext(filename)
filename_without_extension = splitext[0]
extension = splitext[1]
return (dirname, filename_without_extension, extension) | Split a file path into its folder, filename, and extension
Args:
path (str): Path to a file
Returns:
tuple: of (folder, filename (without extension), extension) | codesearchnet |
def include(filename, hosts=False, when=True):
if not pyinfra.is_cli:
raise PyinfraError('local.include is only available in CLI mode.')
if not when:
return
if hosts is not False:
hosts = ensure_host_list(hosts, inventory=pseudo_state.inventory)
if pseudo_host not in hosts:
return
if pseudo_state.deploy_dir:
filename = path.join(pseudo_state.deploy_dir, filename)
frameinfo = get_caller_frameinfo()
logger.debug('Including local file: {0}'.format(filename))
try:
from pyinfra_cli.config import extract_file_config
from pyinfra_cli.util import exec_file
config_data = extract_file_config(filename)
kwargs = {
key.lower(): value
for key, value in six.iteritems(config_data)
if key in [
'SUDO', 'SUDO_USER', 'SU_USER',
'PRESERVE_SUDO_ENV', 'IGNORE_ERRORS',
]
}
with pseudo_state.deploy(
filename, kwargs, None, frameinfo.lineno,
in_deploy=False,
):
exec_file(filename)
except IOError as e:
raise PyinfraError(
'Could not include local file: {0}\n{1}'.format(filename, e),
) | Executes a local python file within the ``pyinfra.pseudo_state.deploy_dir``
directory.
Args:
hosts (string, list): group name or list of hosts to limit this include to
when (bool): indicate whether to trigger operations in this include | juraj-google-style |
def set_userdata(self, key: str, value: Any, cloneable: bool=False) -> 'DNA':
self._userdata[key] = value
if cloneable:
self._cloneable_userdata_keys.add(key)
return self | Sets user data associated with a key.
User data associated with the DNA will live only within current process,
and is not carried over during serialization/deserialization, which is
different from DNA metadata. (See `set_metadata` for more details.)
Args:
key: Key of the user data.
value: Value of the user data.
cloneable: If True, the key/value will be carry over to the cloned DNA.
Returns:
Self. | github-repos |
def CaptureVariableInternal(self, value, depth, limits, can_enqueue=True):
if depth == limits.max_depth:
return {'varTableIndex': 0}
if value is None:
self._total_size += 4
return {'value': 'None'}
if isinstance(value, _PRIMITIVE_TYPES):
r = _TrimString(repr(value),
min(limits.max_value_len,
self.max_size - self._total_size))
self._total_size += len(r)
return {'value': r, 'type': type(value).__name__}
if isinstance(value, _DATE_TYPES):
r = str(value)
self._total_size += len(r)
return {'value': r, 'type': 'datetime.'+ type(value).__name__}
if isinstance(value, dict):
items = [(repr(k), v) for (k, v) in value.items()]
return {'members':
self.CaptureVariablesList(items, depth + 1,
EMPTY_DICTIONARY, limits),
'type': 'dict'}
if isinstance(value, _VECTOR_TYPES):
fields = self.CaptureVariablesList(
(('[%d]' % i, x) for i, x in enumerate(value)),
depth + 1, EMPTY_COLLECTION, limits)
return {'members': fields, 'type': type(value).__name__}
if isinstance(value, types.FunctionType):
self._total_size += len(value.__name__)
return {'value': 'function ' + value.__name__}
if isinstance(value, Exception):
fields = self.CaptureVariablesList(
(('[%d]' % i, x) for i, x in enumerate(value.args)),
depth + 1, EMPTY_COLLECTION, limits)
return {'members': fields, 'type': type(value).__name__}
if can_enqueue:
index = self._var_table_index.get(id(value))
if index is None:
index = len(self._var_table)
self._var_table_index[id(value)] = index
self._var_table.append(value)
self._total_size += 4
return {'varTableIndex': index}
for pretty_printer in CaptureCollector.pretty_printers:
pretty_value = pretty_printer(value)
if not pretty_value:
continue
fields, object_type = pretty_value
return {'members':
self.CaptureVariablesList(fields, depth + 1, OBJECT_HAS_NO_FIELDS,
limits),
'type': object_type}
if not hasattr(value, '__dict__'):
r = str(type(value))
self._total_size += len(r)
return {'value': r}
items = value.__dict__.items()
if six.PY3:
items = list(itertools.islice(items, limits.max_list_items + 1))
members = self.CaptureVariablesList(items, depth + 2,
OBJECT_HAS_NO_FIELDS, limits)
v = {'members': members}
type_string = DetermineType(value)
if type_string:
v['type'] = type_string
return v | Captures a single nameless object into Variable message.
TODO(vlif): safely evaluate iterable types.
TODO(vlif): safely call str(value)
Args:
value: data to capture
depth: nested depth of dictionaries and vectors so far.
limits: Per-object limits for capturing variable data.
can_enqueue: allows referencing the object in variables table.
Returns:
Formatted captured data as per Variable proto. | juraj-google-style |
def update(self, puts, deletes):
with self._lmdb.begin(write=True, buffers=True) as txn:
cursor = txn.cursor(self._main_db)
for key in deletes:
if not cursor.set_key(key.encode()):
continue
value = self._deserializer(bytes(cursor.value()))
cursor.delete()
for (index_db, index_key_fn) in self._indexes.values():
index_keys = index_key_fn(value)
index_cursor = txn.cursor(index_db)
for idx_key in index_keys:
if index_cursor.set_key(idx_key):
index_cursor.delete()
for key, value in puts:
packed = self._serializer(value)
cursor.put(key.encode(), packed, overwrite=True)
for (index_db, index_key_fn) in self._indexes.values():
index_keys = index_key_fn(value)
index_cursor = txn.cursor(index_db)
for idx_key in index_keys:
index_cursor.put(idx_key, key.encode())
self.sync() | Applies the given puts and deletes atomically.
Args:
puts (:iterable:`tuple`): an iterable of key/value pairs to insert
deletes (:iterable:str:) an iterable of keys to delete | juraj-google-style |
def _get_return_value(self, tensors, indices):
tensors = self._create_device_transfers(tensors)
for output, i in zip(tensors, indices):
output.set_shape(self._shapes[i])
if self._names:
return {self._names[i]: t for t, i in zip(tensors, indices)}
return tensors | Return the value to return from a get op.
If the staging area has names, return a dictionary with the
names as keys. Otherwise return either a single tensor
or a list of tensors depending on the length of `tensors`.
Args:
tensors: List of tensors from the get op.
indices: Indices of associated names and shapes
Returns:
A single tensor, a list of tensors, or a dictionary
of tensors. | github-repos |
def recode_dwgsim_reads(dwgsim_prefix, fastq_rnf_fo, fai_fo, genome_id, estimate_unknown_values, number_of_read_tuples=(10 ** 9)):
dwgsim_pattern = re.compile('@(.*)_([0-9]+)_([0-9]+)_([01])_([01])_([01])_([01])_([0-9]+):([0-9]+):([0-9]+)_([0-9]+):([0-9]+):([0-9]+)_(([0-9abcdef])+)')
fai_index = rnftools.utils.FaIdx(fai_fo=fai_fo)
read_tuple_id_width = len(format(number_of_read_tuples, 'x'))
read_tuple_id = 0
last_read_tuple_name = None
old_fq = '{}.bfast.fastq'.format(dwgsim_prefix)
fq_creator = rnftools.rnfformat.FqCreator(fastq_fo=fastq_rnf_fo, read_tuple_id_width=read_tuple_id_width, genome_id_width=2, chr_id_width=fai_index.chr_id_width, coor_width=fai_index.coor_width, info_reads_in_tuple=True, info_simulator='dwgsim')
i = 0
with open(old_fq, 'r+') as f1:
for line in f1:
if ((i % 4) == 0):
read_tuple_name = line[1:].strip()
if (read_tuple_name != last_read_tuple_name):
new_tuple = True
if (last_read_tuple_name is not None):
read_tuple_id += 1
else:
new_tuple = False
last_read_tuple_name = read_tuple_name
m = dwgsim_pattern.search(line)
if (m is None):
rnftools.utils.error("Read tuple '{}' was not created by DwgSim.".format(line[1:]), program='RNFtools', subprogram='MIShmash', exception=ValueError)
contig_name = m.group(1)
start_1 = int(m.group(2))
start_2 = int(m.group(3))
direction_1 = ('F' if (int(m.group(4)) == 0) else 'R')
direction_2 = ('F' if (int(m.group(5)) == 0) else 'R')
chr_id = (fai_index.dict_chr_ids[contig_name] if (fai_index.dict_chr_ids != {}) else '0')
elif ((i % 4) == 1):
bases = line.strip()
if new_tuple:
segment = rnftools.rnfformat.Segment(genome_id=genome_id, chr_id=chr_id, direction=direction_1, left=start_1, right=(((start_1 + len(bases)) - 1) if estimate_unknown_values else 0))
else:
segment = rnftools.rnfformat.Segment(genome_id=genome_id, chr_id=chr_id, direction=direction_2, left=start_2, right=(((start_2 + len(bases)) - 1) if estimate_unknown_values else 0))
elif ((i % 4) == 2):
pass
elif ((i % 4) == 3):
qualities = line.strip()
fq_creator.add_read(read_tuple_id=read_tuple_id, bases=bases, qualities=qualities, segments=[segment])
i += 1
fq_creator.flush_read_tuple() | Convert DwgSim FASTQ file to RNF FASTQ file.
Args:
dwgsim_prefix (str): DwgSim prefix of the simulation (see its commandline parameters).
fastq_rnf_fo (file): File object of RNF FASTQ.
fai_fo (file): File object for FAI file of the reference genome.
genome_id (int): RNF genome ID to be used.
estimate_unknown_values (bool): Estimate unknown values (right coordinate of each end).
number_of_read_tuples (int): Estimate of number of simulated read tuples (to set width). | codesearchnet |
def diffuse_horizontal_radiation(self, value=9999.0):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `diffuse_horizontal_radiation`'.format(value))
if (value < 0.0):
raise ValueError('value need to be greater or equal 0.0 for field `diffuse_horizontal_radiation`')
self._diffuse_horizontal_radiation = value | Corresponds to IDD Field `diffuse_horizontal_radiation`
Args:
value (float): value for IDD Field `diffuse_horizontal_radiation`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | codesearchnet |
def _get_fields(mcs, bases, namespace):
fields = [
(name, namespace.pop(name))
for name, attribute
in list(namespace.items())
if isinstance(attribute, BaseField)
]
for base in reversed(bases):
if hasattr(base, mcs._fields_storage_key):
fields = list(
getattr(base, mcs._fields_storage_key).items()
) + fields
return OrderedDict(fields) | Create fields dictionary to be used in resource class namespace.
Pop all field objects from attributes dict (namespace) and store them
under _field_storage_key atrribute. Also collect all fields from base
classes in order that ensures fields can be overriden.
Args:
bases: all base classes of created serializer class
namespace (dict): namespace as dictionary of attributes | juraj-google-style |
def __set_type(self, obj, prop):
if TypeHandler.is_pure(obj, prop):
self.args_type = "PURE"
self.pure = SinonBase.Pure()
setattr(self.pure, "func", Wrapper.empty_function)
self.orig_func = None
elif TypeHandler.is_module_function(obj, prop):
self.args_type = "MODULE_FUNCTION"
self.orig_func = None
elif TypeHandler.is_function(obj):
self.args_type = "FUNCTION"
self.orig_func = None
elif TypeHandler.is_module(obj):
self.args_type = "MODULE"
elif TypeHandler.is_instance(obj):
obj = obj.__class__
self.args_type = "MODULE" | Triage type based on arguments
Here are four types of base: PURE, MODULE, MODULE_FUNCTION, FUNCTION
Args:
obj: None, FunctionType, ModuleType, Class, Instance
prop: None, string | juraj-google-style |
def _encode_required_field(self, name: str, containing_type_builder: expressions.Builder, builder: expressions.Builder, element_definition: message.Message) -> Optional[validation_pb2.SqlRequirement]:
element = cast(Any, element_definition)
if not _is_elem_supported(element):
return None
field_name = _last_path_token(builder)
min_size = element.min.value
max_size = element.max.value
element_count = builder.count()
query_list = []
if _fhir_path_data_types.is_collection(builder.return_type) and max_size.isdigit():
query_list.append(element_count <= int(max_size))
if min_size == 1:
query_list.append(builder.exists())
elif min_size > 0:
query_list.append(element_count >= min_size)
if not query_list:
return None
constraint_key = f'{name}-cardinality-is-valid'
description = f'The length of {name} must be maximum {max_size} and minimum {min_size}.'
fhir_path_builder = query_list[0]
for query in query_list[1:]:
fhir_path_builder = fhir_path_builder & query
if constraint_key in self._options.skip_keys:
return None
type_codes = _utils.element_type_codes(element)
if 'Reference' not in type_codes and (not _SKIP_TYPE_CODES.isdisjoint(type_codes)):
return None
result = self._encode_fhir_path_builder_constraint(fhir_path_builder, containing_type_builder)
if result is None:
return None
element_definition_path = self._abs_path_invocation(containing_type_builder)
constraint_key_column_name: str = _key_to_sql_column_name(_path_to_sql_column_name(constraint_key))
column_name_base: str = _path_to_sql_column_name(element_definition_path)
column_name = f'{column_name_base}_{constraint_key_column_name}'
requirement = validation_pb2.SqlRequirement(column_name=column_name, sql_expression=result.sql, fhir_path_sql_expression=result.fhir_path_sql, severity=validation_pb2.ValidationSeverity.SEVERITY_ERROR, type=validation_pb2.ValidationType.VALIDATION_TYPE_CARDINALITY, element_path=element_definition_path, description=description, fhir_path_key=constraint_key, fhir_path_expression=result.builder.fhir_path, fields_referenced_by_expression=[field_name])
return requirement | Returns `SqlRequirement` for the required field passed.
Args:
name: name of the constraint key.
containing_type_builder: The builder of the Structure definition for the
required field.
builder: The builder containing the element to encode required field for.
element_definition: Element definition of the builder.
Returns:
A `SqlRequirement` representing the requirement generated from
the element. | github-repos |
def infer_return_type(c, input_types, debug=False, depth=5):
try:
if hashable(c) and c in known_return_types:
return known_return_types[c]
elif isinstance(c, types.FunctionType):
return infer_return_type_func(c, input_types, debug, depth)
elif isinstance(c, types.MethodType):
if c.__self__ is not None:
input_types = [Const(c.__self__)] + input_types
return infer_return_type_func(c.__func__, input_types, debug, depth)
elif isinstance(c, BoundMethod):
input_types = [c.type] + input_types
return infer_return_type_func(c.func, input_types, debug, depth)
elif inspect.isclass(c):
if c in typehints.DISALLOWED_PRIMITIVE_TYPES:
return {list: typehints.List[Any], set: typehints.Set[Any], frozenset: typehints.FrozenSet[Any], tuple: typehints.Tuple[Any, ...], dict: typehints.Dict[Any, Any]}[c]
return c
elif c == getattr and len(input_types) == 2 and isinstance(input_types[1], Const):
from apache_beam.typehints import opcodes
return opcodes._getattr(input_types[0], input_types[1].value)
elif isinstance(c, python_callable.PythonCallableWithSource):
return infer_return_type(c._callable, input_types, debug, depth)
else:
return Any
except TypeInferenceError:
if debug:
traceback.print_exc()
return Any
except Exception:
if debug:
sys.stdout.flush()
raise
else:
return Any | Analyses a callable to deduce its return type.
Args:
c: A Python callable to infer the return type of.
input_types: A sequence of inputs corresponding to the input types.
debug: Whether to print verbose debugging information.
depth: Maximum inspection depth during type inference.
Returns:
A TypeConstraint that that the return value of this function will (likely)
satisfy given the specified inputs. | github-repos |
def extended_capabilities(self):
buf = (ctypes.c_uint8 * 32)()
self._dll.JLINKARM_GetEmuCapsEx(buf, 32)
return list(buf) | Gets the capabilities of the connected emulator as a list.
Args:
self (JLink): the ``JLink`` instance
Returns:
List of 32 integers which define the extended capabilities based on
their value and index within the list. | juraj-google-style |
def wheel_delta(self):
delta = self._libinput.libinput_event_tablet_tool_get_wheel_delta(self._handle)
changed = self._libinput.libinput_event_tablet_tool_wheel_has_changed(self._handle)
return (delta, changed) | The delta for the wheel in degrees and whether it has changed in
this event.
Returns:
(float, bool): The delta of the wheel, in degrees, compared to
the last event and whether it has changed. | codesearchnet |
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_buffer = utils.BytearrayStream()
if self._located_items:
self._located_items.write(local_buffer, kmip_version=kmip_version)
if self._unique_identifiers:
for unique_identifier in self._unique_identifiers:
unique_identifier.write(
local_buffer,
kmip_version=kmip_version
)
self.length = local_buffer.length()
super(LocateResponsePayload, self).write(
output_buffer,
kmip_version=kmip_version
)
output_buffer.write(local_buffer.buffer) | Write the data encoding the Locate response payload to a buffer.
Args:
output_buffer (stream): A data buffer in which to encode object
data, supporting a write method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0. | juraj-google-style |
def get_content(self, url):
cache_path = self._url_to_path(url)
try:
with open(cache_path, 'rb') as f:
return f.read()
except IOError:
return None | Returns the content of a cached resource.
Args:
url: The url of the resource
Returns:
The content of the cached resource or None if not in the cache | juraj-google-style |
def __random_density_bures(N, rank=None, seed=None):
P = (np.eye(N) + random_unitary(N).data)
G = P.dot(__ginibre_matrix(N, rank, seed))
G = G.dot(G.conj().T)
return (G / np.trace(G)) | Generate a random density matrix from the Bures metric.
Args:
N (int): the length of the density matrix.
rank (int or None): the rank of the density matrix. The default
value is full-rank.
seed (int): Optional. To set a random seed.
Returns:
ndarray: rho (N,N) a density matrix. | codesearchnet |
def _should_get_another_batch(self, content):
if (('max-keys' in self._options) and (self._options['max-keys'] <= common._MAX_GET_BUCKET_RESULT)):
return False
elements = self._find_elements(content, set([common._T_IS_TRUNCATED, common._T_NEXT_MARKER]))
if (elements.get(common._T_IS_TRUNCATED, 'false').lower() != 'true'):
return False
next_marker = elements.get(common._T_NEXT_MARKER)
if (next_marker is None):
self._options.pop('marker', None)
return False
self._options['marker'] = next_marker
return True | Whether to issue another GET bucket call.
Args:
content: response XML.
Returns:
True if should, also update self._options for the next request.
False otherwise. | codesearchnet |
def empty(cls, base_uri=None, draft=AUTO):
return cls.from_object({}, base_uri=base_uri, draft=draft) | Returns an empty ``Document``.
Arguments:
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``. | juraj-google-style |
def text(self, x, y, text):
for (i, char) in enumerate(text):
self.point((x + i), y, char) | Print a text on ASCII canvas.
Args:
x (int): x coordinate where the text should start.
y (int): y coordinate where the text should start.
text (str): string that should be printed. | codesearchnet |
def mkdir(path):
try:
os.makedirs(path)
if not os.path.isdir(path):
raise IOError('path is not a directory')
except OSError as e:
if e.errno == 17 and os.path.isdir(path):
return
raise | Make a directory and its parents.
Args:
path (str): path to create
Returns:
None
Raises:
OSError if the directory cannot be created. | juraj-google-style |
def position(string, index):
if not string:
return None
if index < 0 or index >= len(string):
raise InternalError("Out-of-range index passed to errors.position!")
lines = string.split("\n")
if len(lines) == 1:
return str(index)
before = n = 0
for n, line in enumerate(lines):
future = before + len(line) + 1
if future > index:
break
before = future
return "{0}:{1}".format(n, index - before) | Returns a helpful position description for an index in a
(multi-line) string using the format line:column.
Arguments:
string (str): The string to which the index refers.
index (int): The index of the character in question.
Returns:
A string with the format line:column where line refers to the
1-indexed row/line in which the character is found within the
string and column to the position of the character within
(relative to) that line. | juraj-google-style |
def serialize(self, items, default_flow_style=False):
yaml = self._load_yaml()
items = dict(items)
return yaml.dump(items, default_flow_style=default_flow_style) | Does the inverse of config parsing by taking parsed values and
converting them back to a string representing config file contents.
Args:
default_flow_style: defines serialization format (see PyYAML docs) | juraj-google-style |
def _finalize_job(cls, mapreduce_spec, mapreduce_state):
config = util.create_datastore_write_config(mapreduce_spec)
queue_name = util.get_queue_name(mapreduce_spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE))
done_callback = mapreduce_spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK)
done_task = None
if done_callback:
done_task = taskqueue.Task(
url=done_callback,
headers=util._get_task_headers(mapreduce_spec.mapreduce_id,
util.CALLBACK_MR_ID_TASK_HEADER),
method=mapreduce_spec.params.get("done_callback_method", "POST"))
@db.transactional(retries=5)
def _put_state():
fresh_state = model.MapreduceState.get_by_job_id(
mapreduce_spec.mapreduce_id)
if not fresh_state.active:
logging.warning(
"Job %s is not active. Looks like spurious task execution. "
"Dropping task.", mapreduce_spec.mapreduce_id)
return
mapreduce_state.put(config=config)
if done_task and not _run_task_hook(
mapreduce_spec.get_hooks(),
"enqueue_done_task",
done_task,
queue_name):
done_task.add(queue_name, transactional=True)
_put_state()
logging.info("Final result for job '%s' is '%s'",
mapreduce_spec.mapreduce_id, mapreduce_state.result_status)
cls._clean_up_mr(mapreduce_spec) | Finalize job execution.
Invokes done callback and save mapreduce state in a transaction,
and schedule necessary clean ups. This method is idempotent.
Args:
mapreduce_spec: an instance of MapreduceSpec
mapreduce_state: an instance of MapreduceState | juraj-google-style |
def piola_kirchoff_1(self, def_grad):
if not self.is_symmetric:
raise ValueError("The stress tensor is not symmetric, \
PK stress is based on a symmetric stress tensor.")
def_grad = SquareTensor(def_grad)
return def_grad.det*np.dot(self, def_grad.inv.trans) | calculates the first Piola-Kirchoff stress
Args:
def_grad (3x3 array-like): deformation gradient tensor | juraj-google-style |
def FindExtensionByName(self, full_name):
full_name = _NormalizeFullyQualifiedName(full_name)
(message_name, _, extension_name) = full_name.rpartition('.')
try:
scope = self.FindMessageTypeByName(message_name)
except KeyError:
scope = self.FindFileContainingSymbol(full_name)
return scope.extensions_by_name[extension_name] | Loads the named extension descriptor from the pool.
Args:
full_name: The full name of the extension descriptor to load.
Returns:
A FieldDescriptor, describing the named extension. | codesearchnet |
def _get_package_name(prefix=settings.TEMP_DIR, book_id=None):
if (book_id is None):
book_id = str(uuid.uuid4())
return os.path.join(prefix, book_id) | Return package path. Use uuid to generate package's directory name.
Args:
book_id (str, default None): UUID of the book.
prefix (str, default settings.TEMP_DIR): Where the package will be
stored. Default :attr:`settings.TEMP_DIR`.
Returns:
str: Path to the root directory. | codesearchnet |
def reset_sequence(cls, value=None, force=False):
cls._meta.reset_sequence(value, force=force) | Reset the sequence counter.
Args:
value (int or None): the new 'next' sequence value; if None,
recompute the next value from _setup_next_sequence().
force (bool): whether to force-reset parent sequence counters
in a factory inheritance chain. | codesearchnet |
def find(lst, a, case_sensitive=True):
a = force_list(a)
if (not case_sensitive):
lst = [x.lower() for x in lst]
a = [y.lower() for y in a]
return [i for (i, x) in enumerate(lst) if (x in a)] | Return indices of a list which have elements that match an object or list of objects
Args:
lst: list of values
a: object(s) to check equality
case_sensitive: if the search should be case sensitive
Returns:
list: list of indicies of lst which equal a | codesearchnet |
def json_to_numpy(string_like, dtype=None):
data = json.loads(string_like)
return np.array(data, dtype=dtype) | Convert a JSON object to a numpy array.
Args:
string_like (str): JSON string.
dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the
contents of each column, individually. This argument can only be used to
'upcast' the array. For downcasting, use the .astype(t) method.
Returns:
(np.array): numpy array | codesearchnet |
def _redirect_with_params(url_name, *args, **kwargs):
url = urlresolvers.reverse(url_name, args=args)
params = parse.urlencode(kwargs, True)
return '{0}?{1}'.format(url, params) | Helper method to create a redirect response with URL params.
This builds a redirect string that converts kwargs into a
query string.
Args:
url_name: The name of the url to redirect to.
kwargs: the query string param and their values to build.
Returns:
A properly formatted redirect string. | codesearchnet |
def _export(self, path, variables_saver):
self._saved_model_handler.export(path, variables_saver=variables_saver)
module_def_proto = module_def_pb2.ModuleDef()
module_def_proto.format = module_def_pb2.ModuleDef.FORMAT_V3
module_def_filename = get_module_proto_path(path)
tf_utils.atomic_write_string_to_file(
module_def_filename,
module_def_proto.SerializeToString(),
overwrite=False)
logging.info("Exported TF-Hub module to: %s", path) | Internal.
Args:
path: string where to export the module to.
variables_saver: an unary-function that writes the module variables
checkpoint on the given path. | juraj-google-style |
def i2le_script(number):
if number == 0:
return '00'
for i in range(80):
try:
return number.to_bytes(
length=i,
byteorder='little',
signed=True).hex()
except Exception:
continue | Convert int to signed little endian (l.e.) hex for scripts
Args:
number (int): int value to convert to bytes in l.e. format
Returns:
(str): the hex-encoded signed LE number | juraj-google-style |
def _extract_id_token(id_token):
if (type(id_token) == bytes):
segments = id_token.split(b'.')
else:
segments = id_token.split(u'.')
if (len(segments) != 3):
raise VerifyJwtTokenError('Wrong number of segments in token: {0}'.format(id_token))
return json.loads(_helpers._from_bytes(_helpers._urlsafe_b64decode(segments[1]))) | Extract the JSON payload from a JWT.
Does the extraction w/o checking the signature.
Args:
id_token: string or bytestring, OAuth 2.0 id_token.
Returns:
object, The deserialized JSON payload. | codesearchnet |
def auto_convert_cell(flagable, cell, position, worksheet, flags, units, parens_as_neg=True):
conversion = cell
if isinstance(cell, (int, float)):
pass
elif isinstance(cell, basestring):
if not cell:
conversion = None
else:
conversion = auto_convert_string_cell(flagable, cell, position, worksheet,
flags, units, parens_as_neg=parens_as_neg)
elif cell != None:
flagable.flag_change(flags, 'warning', position, worksheet,
flagable.FLAGS['unknown-to-string'])
conversion = str(cell)
if not conversion:
conversion = None
else:
pass
return conversion | Performs a first step conversion of the cell to check
it's type or try to convert if a valid conversion exists.
Args:
parens_as_neg: Converts numerics surrounded by parens to negative values | juraj-google-style |
def get_shell_code(self, shell=None, parent_environ=None, style=OutputStyle.file):
executor = self._create_executor(interpreter=create_shell(shell),
parent_environ=parent_environ)
if self.load_path and os.path.isfile(self.load_path):
executor.env.REZ_RXT_FILE = self.load_path
self._execute(executor)
return executor.get_output(style) | Get the shell code resulting from intepreting this context.
Args:
shell (str): Shell type, for eg 'bash'. If None, the current shell
type is used.
parent_environ (dict): Environment to interpret the context within,
defaults to os.environ if None.
style (): Style to format shell code in. | juraj-google-style |
def _set_values_internal(self, context, pipeline_key, root_pipeline_key, outputs, result_status):
self._context = context
self._pipeline_key = pipeline_key
self._root_pipeline_key = root_pipeline_key
self._result_status = result_status
self.outputs = outputs | Sets the user-visible values provided as an API by this class.
Args:
context: The _PipelineContext used for this Pipeline.
pipeline_key: The db.Key of this pipeline.
root_pipeline_key: The db.Key of the root pipeline.
outputs: The PipelineFuture for this pipeline.
result_status: The result status of this pipeline. | codesearchnet |
def random_bernoulli(shape, p=0.0, dtype=None, seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10000000.0)
return array_ops.where_v2(random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p, array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype)) | Returns a tensor with random bernoulli distribution of values.
Args:
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of bernoulli distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor. | github-repos |
def with_mfa(self, mfa_token):
if hasattr(mfa_token, '__call__'):
self.context.mfa_token = mfa_token.__call__()
else:
self.context.mfa_token = mfa_token
return self | Set the MFA token for the next request.
`mfa_token`s are only good for one request. Use this method to chain into
the protected action you want to perform.
Note: Only useful for Application authentication.
Usage:
account.with_mfa(application.totp.now()).pay(...)
Args:
mfa_token (str/function, optional): TOTP token for the Application
OR a callable/function which will generate such a token when called.
Returns:
self | codesearchnet |
def send(self, src_file, filename, st_mode=DEFAULT_PUSH_MODE, mtime=None,
timeout=None):
transport = DataFilesyncTransport(self.stream)
transport.write_data('SEND', '%s,%s' % (filename, st_mode), timeout)
try:
while True:
data = src_file.read(MAX_PUSH_DATA_BYTES)
if not data:
break
transport.write_data('DATA', data, timeout)
mtime = mtime or int(time.time())
transport.write_message(
FilesyncMessageTypes.DoneMessage('DONE', mtime), timeout)
except usb_exceptions.AdbStreamClosedError:
self._check_for_fail_message(transport, sys.exc_info(), timeout)
data_msg = transport.read_message(timeout)
data_msg.assert_command_is('OKAY') | Push a file-like object to the device.
Args:
src_file: File-like object for reading from
filename: Filename to push to on the device
st_mode: stat mode for filename on the device
mtime: modification time to set for the file on the device
timeout: Timeout to use for the send operation.
Raises:
AdbProtocolError: If we get an unexpected response.
AdbRemoteError: If there's a remote error (but valid protocol). | juraj-google-style |
def filter(self, versions, key=(lambda x: x)):
return [x for x in versions if self.check(key(x))] | Filter all of the versions in an iterable that match this version range
Args:
versions (iterable): An iterable of SemanticVersion objects
Returns:
list: A list of the SemanticVersion objects that matched this range | codesearchnet |
def profile_update_args_v3(self, profile):
ij = self.load_install_json(profile.get('install_json', 'install.json'))
ijp = self.install_json_params(ij)
if ((profile.get('args', {}).get('app', {}).get('optional') is None) and (profile.get('args', {}).get('app', {}).get('required') is None)):
app_args = profile['args'].pop('app')
profile['args']['app'] = {}
profile['args']['app']['optional'] = {}
profile['args']['app']['required'] = {}
for arg in self.profile_settings_args_install_json(ij, None):
required = ijp.get(arg).get('required', False)
try:
if required:
profile['args']['app']['required'][arg] = app_args.pop(arg)
else:
profile['args']['app']['optional'][arg] = app_args.pop(arg)
except KeyError:
if self.args.verbose:
print('{}{}Input "{}" not found in profile "{}".'.format(c.Style.BRIGHT, c.Fore.YELLOW, arg, profile.get('profile_name')))
print('{}{}Updating args section to v3 schema for profile {}.'.format(c.Style.BRIGHT, c.Fore.YELLOW, profile.get('profile_name'))) | Update v1 profile args to v3 schema for args.
.. code-block:: javascript
"args": {
"app": {
"required": {
"input_strings": "capitalize",
"tc_action": "Capitalize"
},
"optional": {
"fail_on_error": true
}
}
},
"default": {
"api_access_id": "$env.API_ACCESS_ID",
"api_default_org": "$env.API_DEFAULT_ORG",
},
Args:
profile (dict): The dictionary containting the profile settings. | codesearchnet |
def write_filepath(filepath, strategy):
dirpath = os.path.dirname(filepath)
base = os.path.basename(filepath)
return os.path.join(write_dirpath(dirpath, strategy), base) | Returns the writing file path to be used to save file distributedly.
Directory to contain `filepath` would be created if it doesn't exist.
Args:
filepath: Original filepath that would be used without distribution.
strategy: The tf.distribute strategy object currently used.
Returns:
The writing filepath that should be used to save file with distribution. | github-repos |
def __init__(self, name, type_var, impl_type, type_checker):
assert isinstance(name, str), repr(name)
assert isinstance(impl_type, type), repr(impl_type)
assert not isinstance(impl_type, TypingMeta), repr(impl_type)
assert isinstance(type_var, (type, _TypingBase)), repr(type_var)
self.name = name
self.type_var = type_var
self.impl_type = impl_type
self.type_checker = type_checker | Initializer.
Args:
name: The name, e.g. 'Pattern'.
type_var: The type parameter, e.g. AnyStr, or the
specific type, e.g. str.
impl_type: The implementation type.
type_checker: Function that takes an impl_type instance.
and returns a value that should be a type_var instance. | juraj-google-style |
def pbc_diff(fcoords1, fcoords2):
fdist = np.subtract(fcoords1, fcoords2)
return (fdist - np.round(fdist)) | Returns the 'fractional distance' between two coordinates taking into
account periodic boundary conditions.
Args:
fcoords1: First set of fractional coordinates. e.g., [0.5, 0.6,
0.7] or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single
coord or any array of coords.
fcoords2: Second set of fractional coordinates.
Returns:
Fractional distance. Each coordinate must have the property that
abs(a) <= 0.5. Examples:
pbc_diff([0.1, 0.1, 0.1], [0.3, 0.5, 0.9]) = [-0.2, -0.4, 0.2]
pbc_diff([0.9, 0.1, 1.01], [0.3, 0.5, 0.9]) = [-0.4, -0.4, 0.11] | codesearchnet |
def serialize_to_json(self, name, datas):
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content | Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content. | juraj-google-style |
def from_path_and_array(cls, path, folder, y, classes=None, val_idxs=None, test_name=None, num_workers=8, tfms=(None, None), bs=64):
assert (not ((tfms[0] is None) or (tfms[1] is None))), 'please provide transformations for your train and validation sets'
assert (not os.path.isabs(folder)), 'folder needs to be a relative path'
fnames = np.core.defchararray.add(f'{folder}/', sorted(os.listdir(f'{path}{folder}')))
return cls.from_names_and_array(path, fnames, y, classes, val_idxs, test_name, num_workers=num_workers, tfms=tfms, bs=bs) | Read in images given a sub-folder and their labels given a numpy array
Arguments:
path: a root path of the data (used for storing trained models, precomputed values, etc)
folder: a name of the folder in which training images are contained.
y: numpy array which contains target labels ordered by filenames.
bs: batch size
tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`
val_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`.
If None, default arguments to get_cv_idxs are used.
test_name: a name of the folder which contains test images.
num_workers: number of workers
Returns:
ImageClassifierData | codesearchnet |
def _fits_surface(self, width, height):
assert ((width > 0) and (height > 0))
if (self.rot and ((width > self.width) or (height > self.height))):
(width, height) = (height, width)
if ((width > self.width) or (height > self.height)):
return False
else:
return True | Test surface is big enough to place a rectangle
Arguments:
width (int, float): Rectangle width
height (int, float): Rectangle height
Returns:
boolean: True if it could be placed, False otherwise | codesearchnet |
def affine(img, angle, translate, scale, shear, resample=0, fillcolor=None):
if (not _is_pil_image(img)):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
assert (isinstance(translate, (tuple, list)) and (len(translate) == 2)), 'Argument translate should be a list or tuple of length 2'
assert (scale > 0.0), 'Argument scale should be positive'
output_size = img.size
center = (((img.size[0] * 0.5) + 0.5), ((img.size[1] * 0.5) + 0.5))
matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
kwargs = ({'fillcolor': fillcolor} if (PILLOW_VERSION[0] == '5') else {})
return img.transform(output_size, Image.AFFINE, matrix, resample, **kwargs) | Apply affine transformation on the image keeping image center invariant
Args:
img (PIL Image): PIL Image to be rotated.
angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction.
translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)
scale (float): overall scale
shear (float): shear angle value in degrees between -180 to 180, clockwise direction.
resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
An optional resampling filter.
See `filters`_ for more information.
If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0) | codesearchnet |
def serialize(self, user=None):
return {'content': self.body, 'type': self.typ, 'updated_at': self.updated_at, 'timestamp': self.updated_at, 'is_update': (not hasattr(self, 'unsaved')), 'attachments': [attachment.serialize() for attachment in self.attachment_set], 'title': self.msg_title, 'url': self.url, 'sender_name': self.sender.full_name, 'sender_key': self.sender.key, 'channel_key': self.channel.key, 'cmd': 'message', 'avatar_url': self.sender.avatar, 'key': self.key} | Serializes message for given user.
Note:
Should be called before first save(). Otherwise "is_update" will get wrong value.
Args:
user: User object
Returns:
Dict. JSON serialization ready dictionary object | codesearchnet |
def get_data_node(self, path: DataPath) -> Optional[DataNode]:
addr = self.schema_data.path2route(path)
node = self.schema
for p in addr:
node = node.get_data_child(*p)
if node is None:
return None
return node | Return the data node addressed by a data path.
Args:
path: Data path.
Returns:
Data node if found in the schema, or ``None``.
Raises:
InvalidSchemaPath: If the schema path is invalid. | juraj-google-style |
def read_mutiple_items(f, container_type, item_type, separator=' '):
return __read(f, (lambda line: container_type((item_type(item) for item in line.split(separator))))) | Extract an iterable from the current line of a file-like object.
Args:
f (file): the file-like object to read from
container_type (type): type of the iterable that will be returned
item_type (type): type of the values that will be elements of the returned iterable
separator (str): the separator between two consecutive items
Returns:
The extracted iterable
Example:
The file "a.input" contains three lines and three comma-separated digits on each::
>>> with open("a.input") as f:
... print(utools.files.read_multiple_items(f, list, int, separator=","))
... print(utools.files.read_multiple_items(f, set, str, separator=","))
... print(utools.files.read_multiple_items(f, tuple, float, separator=","))
...
[1, 2, 3]
{"4", "5", "6"}
(7.0, 8.0, 9.0) | codesearchnet |
def read(self, size=None):
if (size is not None):
read_size = min(size, self.__remaining_bytes)
else:
read_size = self.__remaining_bytes
data = self.__stream.read(read_size)
if ((read_size > 0) and (not data)):
raise exceptions.StreamExhausted(('Not enough bytes in stream; expected %d, exhausted after %d' % (self.__max_bytes, (self.__max_bytes - self.__remaining_bytes))))
self.__remaining_bytes -= len(data)
return data | Read at most size bytes from this slice.
Compared to other streams, there is one case where we may
unexpectedly raise an exception on read: if the underlying stream
is exhausted (i.e. returns no bytes on read), and the size of this
slice indicates we should still be able to read more bytes, we
raise exceptions.StreamExhausted.
Args:
size: If provided, read no more than size bytes from the stream.
Returns:
The bytes read from this slice.
Raises:
exceptions.StreamExhausted | codesearchnet |
def _get_source(link):
if link.startswith("http:
down = httpkie.Downloader()
return down.download(link)
if os.path.exists(link):
with open(link) as f:
return f.read()
raise UserWarning("html: '%s' is neither URL or data!" % link) | Return source of the `link` whether it is filename or url.
Args:
link (str): Filename or URL.
Returns:
str: Content.
Raises:
UserWarning: When the `link` couldn't be resolved. | juraj-google-style |
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep | Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A Electra sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. | github-repos |
def MethodCalled(self, mock_method):
for method in self._methods:
if method == mock_method:
self._methods.remove(mock_method)
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self) | Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group. | juraj-google-style |
def sg_parallel(func):
r
@wraps(func)
def wrapper(**kwargs):
r
opt = tf.sg_opt(kwargs)
res = []
for i in range(sg_gpus()):
with tf.device('/gpu:%d' % i):
with tf.name_scope('gpu_%d' % i):
with sg_context(reuse=(True if i > 0 else False)):
res.append(func(opt * tf.sg_opt(gpu_index=i)))
return res
return wrapper | r"""Decorates function as multiple gpu support towers.
Args:
func: function to decorate | juraj-google-style |
def parent_callback(self, parent_fu):
if (parent_fu.done() is True):
e = parent_fu._exception
if e:
super().set_exception(e)
else:
super().set_result(self.file_obj)
return | Callback from executor future to update the parent.
Args:
- parent_fu (Future): Future returned by the executor along with callback
Returns:
- None
Updates the super() with the result() or exception() | codesearchnet |
def CopyFromDateTimeString(self, time_string):
date_time_values = self._CopyDateTimeFromString(time_string)
self._CopyFromDateTimeValues(date_time_values) | Copies time elements from a date and time string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
fraction and time zone offset are optional. The default time zone
is UTC. | codesearchnet |
def incoming(self, messages):
if self._observers:
campfire = self._room.get_campfire()
for message in messages:
for observer in self._observers:
observer(Message(campfire, message)) | Called when incoming messages arrive.
Args:
messages (tuple): Messages (each message is a dict) | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.