code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _pre_suf_fix_filter(t: List, prefix: str, suffix: str) -> bool:
if prefix:
for a_token in t:
if a_token._.n_prefix(len(prefix)) != prefix:
return False
if suffix:
for a_token in t:
if a_token._.n_suffix(len(suffix)) != suffix:
return False
return True | Prefix and Suffix filter
Args:
t: List, list of tokens
prefix: str
suffix: str
Returns: bool | juraj-google-style |
def get_grouped_indices(self, voigt=False, **kwargs):
if voigt:
array = self.voigt
else:
array = self
indices = list(itertools.product(*[range(n) for n in array.shape]))
remaining = indices.copy()
grouped = [list(zip(*np.where(np.isclose(array, 0, **kwargs))))]
remaining = [i for i in remaining if (i not in grouped[0])]
while remaining:
new = list(zip(*np.where(np.isclose(array, array[remaining[0]], **kwargs))))
grouped.append(new)
remaining = [i for i in remaining if (i not in new)]
return [g for g in grouped if g] | Gets index sets for equivalent tensor values
Args:
voigt (bool): whether to get grouped indices
of voigt or full notation tensor, defaults
to false
**kwargs: keyword args for np.isclose. Can take atol
and rtol for absolute and relative tolerance, e. g.
>>> tensor.group_array_indices(atol=1e-8)
or
>>> tensor.group_array_indices(rtol=1e-5)
Returns:
list of index groups where tensor values are equivalent to
within tolerances | codesearchnet |
def integer_value_convert(dictin, dropfailedvalues=False):
return key_value_convert(dictin, valuefn=int, dropfailedvalues=dropfailedvalues) | Convert values of dictionary to integers
Args:
dictin (DictUpperBound): Input dictionary
dropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False.
Returns:
Dict: Dictionary with values converted to integers | juraj-google-style |
def hash_file(path, block_size=65536):
sha256 = hashlib.sha256()
with open(path, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest() | Returns SHA256 checksum of a file
Args:
path (string): Absolute file path of file to hash
block_size (int, optional): Number of bytes to read per block | juraj-google-style |
def from_intlist(int_list, *qregs):
if not all((isinstance(i, int) for i in int_list)):
raise LayoutError('Expected a list of ints')
if len(int_list) != len(set(int_list)):
raise LayoutError('Duplicate values not permitted; Layout is bijective.')
n_qubits = sum(reg.size for reg in qregs)
if len(int_list) < n_qubits:
err_msg = 'Integer list length must equal number of qubits in circuit.'
raise LayoutError(err_msg)
out = Layout()
main_idx = 0
for qreg in qregs:
for idx in range(qreg.size):
out[(qreg, idx)] = int_list[main_idx]
main_idx += 1
if main_idx != len(int_list):
for int_item in int_list[main_idx:]:
out[int_item] = None
return out | Converts a list of integers to a Layout
mapping virtual qubits (index of the list) to
physical qubits (the list values).
Args:
int_list (list): A list of integers.
*qregs (QuantumRegisters): The quantum registers to apply
the layout to.
Returns:
Layout: The corresponding Layout object.
Raises:
LayoutError: Invalid input layout. | juraj-google-style |
def create_impression_event(self, experiment, variation_id, user_id, attributes):
params = self._get_common_params(user_id, attributes)
impression_params = self._get_required_params_for_impression(experiment, variation_id)
params[self.EventParams.USERS][0][self.EventParams.SNAPSHOTS].append(impression_params)
return Event(self.EVENTS_URL,
params,
http_verb=self.HTTP_VERB,
headers=self.HTTP_HEADERS) | Create impression Event to be sent to the logging endpoint.
Args:
experiment: Experiment for which impression needs to be recorded.
variation_id: ID for variation which would be presented to user.
user_id: ID for user.
attributes: Dict representing user attributes and values which need to be recorded.
Returns:
Event object encapsulating the impression event. | juraj-google-style |
def are_all_matches_terminal(self,
predicate: Callable[[ops.Operation], bool]):
return all(
self.next_moment_operating_on(op.qubits, i + 1) is None for
(i, op) in self.findall_operations(predicate)
) | Check whether all of the ops that satisfy a predicate are terminal.
Args:
predicate: A predicate on ops.Operations which is being checked.
Returns:
Whether or not all `Operation` s in a circuit that satisfy the
given predicate are terminal. | juraj-google-style |
def Run(script, container=None, exit_on_error=False, gas=Fixed8.Zero(), test_mode=True):
from neo.Core.Blockchain import Blockchain
from neo.SmartContract.StateMachine import StateMachine
from neo.EventHub import events
bc = Blockchain.Default()
accounts = DBCollection(bc._db, DBPrefix.ST_Account, AccountState)
assets = DBCollection(bc._db, DBPrefix.ST_Asset, AssetState)
validators = DBCollection(bc._db, DBPrefix.ST_Validator, ValidatorState)
contracts = DBCollection(bc._db, DBPrefix.ST_Contract, ContractState)
storages = DBCollection(bc._db, DBPrefix.ST_Storage, StorageItem)
script_table = CachedScriptTable(contracts)
service = StateMachine(accounts, validators, assets, contracts, storages, None)
engine = ApplicationEngine(trigger_type=TriggerType.Application, container=container, table=script_table, service=service, gas=gas, testMode=test_mode, exit_on_error=exit_on_error)
script = binascii.unhexlify(script)
engine.LoadScript(script)
try:
success = engine.Execute()
engine.testMode = True
service.ExecutionCompleted(engine, success)
except Exception as e:
engine.testMode = True
service.ExecutionCompleted(engine, False, e)
for event in service.events_to_dispatch:
events.emit(event.event_type, event)
return engine | Runs a script in a test invoke environment
Args:
script (bytes): The script to run
container (neo.Core.TX.Transaction): [optional] the transaction to use as the script container
Returns:
ApplicationEngine | codesearchnet |
def execute(self, data_dict, callback, group=None, trace=None):
group = (group or self.group)
context = _ScopedContext(data_dict, self.undefined_str, group=group)
_Execute(self._program.Statements(), context, callback, trace) | Low level method to expand the template piece by piece.
Args:
data_dict: The JSON data dictionary.
callback: A callback which should be called with each expanded token.
group: Dictionary of name -> Template instance (for styles)
Example: You can pass 'f.write' as the callback to write directly to a file
handle. | codesearchnet |
def handle(self, args, kwargs):
return self.NOT_SUPPORTED | Handle this dispatcher's operation with the specified arguments.
If this operation dispatcher can handle the given arguments, then
return an appropriate value (or raise an appropriate exception).
Args:
args: The arguments to the operation.
kwargs: They keyword arguments to the operation.
Returns:
The result of the operation, or `OpDispatcher.NOT_SUPPORTED` if this
dispatcher can not handle the given arguments. | github-repos |
def _follow_leafref(self, xpath: 'Expr', init: 'TerminalNode') -> Optional['DataNode']:
if isinstance(xpath, LocationPath):
lft = self._follow_leafref(xpath.left, init)
if (lft is None):
return None
return lft._follow_leafref(xpath.right, init)
elif isinstance(xpath, Step):
if (xpath.axis == Axis.parent):
return self.data_parent()
elif (xpath.axis == Axis.child):
if (isinstance(self, InternalNode) and xpath.qname):
qname = (xpath.qname if xpath.qname[1] else (xpath.qname[0], init.ns))
return self.get_data_child(*qname)
elif isinstance(xpath, Root):
return self.schema_root()
return None | Return the data node referred to by a leafref path.
Args:
xpath: XPath expression compiled from a leafref path.
init: initial context node | codesearchnet |
def _SerializeAttributeContainer(self, attribute_container):
if self._serializers_profiler:
self._serializers_profiler.StartTiming(attribute_container.CONTAINER_TYPE)
try:
attribute_container_data = self._serializer.WriteSerialized(attribute_container)
if (not attribute_container_data):
raise IOError('Unable to serialize attribute container: {0:s}.'.format(attribute_container.CONTAINER_TYPE))
attribute_container_data = attribute_container_data.encode('utf-8')
finally:
if self._serializers_profiler:
self._serializers_profiler.StopTiming(attribute_container.CONTAINER_TYPE)
return attribute_container_data | Serializes an attribute container.
Args:
attribute_container (AttributeContainer): attribute container.
Returns:
bytes: serialized attribute container.
Raises:
IOError: if the attribute container cannot be serialized.
OSError: if the attribute container cannot be serialized. | codesearchnet |
def xxd_output_to_object(input_cc_file):
model_bytes = xxd_output_to_bytes(input_cc_file)
return convert_bytearray_to_object(model_bytes) | Converts xxd output C++ source file to object.
Args:
input_cc_file: Full path name to th C++ source file dumped by xxd
Raises:
RuntimeError: If input_cc_file path is invalid.
IOError: If input_cc_file cannot be opened.
Returns:
A python object corresponding to the input tflite file. | github-repos |
def parse_object_like_triples(self):
self.rdf.triples = SimpleNamespace()
for s,p,o in self.rdf.graph:
ns_prefix, ns_uri, predicate = self.rdf.graph.compute_qname(p)
if not hasattr(self.rdf.triples, ns_prefix):
setattr(self.rdf.triples, ns_prefix, SimpleNamespace())
if not hasattr(getattr(self.rdf.triples, ns_prefix), predicate):
setattr(getattr(self.rdf.triples, ns_prefix), predicate, [])
getattr(getattr(self.rdf.triples, ns_prefix), predicate).append(o) | method to parse triples from self.rdf.graph for object-like
access
Args:
None
Returns:
None: sets self.rdf.triples | juraj-google-style |
def get_steps_branch_len(self, length):
return log((length / self.length), min(self.branches[0][0])) | Get, how much steps will needed for a given branch length.
Returns:
float: The age the tree must achieve to reach the given branch length. | codesearchnet |
def Remove(self, row):
if row == 0 or row > self.size:
raise TableError("Attempt to remove header row")
new_table = []
for t_row in self._table:
if t_row.row != row:
new_table.append(t_row)
if t_row.row > row:
t_row.row -= 1
self._table = new_table | Removes a row from the table.
Args:
row: int, the row number to delete. Must be >= 1, as the header
cannot be removed.
Raises:
TableError: Attempt to remove nonexistent or header row. | juraj-google-style |
def filter(self, *query_filter):
for query in query_filter:
self.query.append(query)
return self | Set the query filter to perform the query with
Args:
*query_filter: Simplified Query Language filter | juraj-google-style |
def upload(self, *args, **kwargs):
self.prepare()
metadata = self.create_metadata(*args, **kwargs)
package = self.build_napp_package(metadata.get('name'))
NAppsClient().upload_napp(metadata, package) | Create package and upload it to NApps Server.
Raises:
FileNotFoundError: If kytos.json is not found. | codesearchnet |
def replaceFA(self, faDataType: int, xml: str):
self.client.replaceFA(faDataType, xml) | Replaces Financial Advisor's settings.
Args:
faDataType: See :meth:`.requestFA`.
xml: The XML-formatted configuration string. | juraj-google-style |
def read(self, fileName, **kwargs):
if self._langext is not None:
with open(fileName, 'r') as fin:
newmodel = self._langext.translate(fin.read(), **kwargs)
with open(fileName+'.translated', 'w') as fout:
fout.write(newmodel)
fileName += '.translated'
lock_and_call(
lambda: self._impl.read(fileName),
self._lock
)
self._errorhandler_wrapper.check() | Interprets the specified file (script or model or mixed) As a side
effect, it invalidates all entities (as the passed file can contain any
arbitrary command); the lists of entities will be re-populated lazily
(at first access).
Args:
fileName: Full path to the file.
Raises:
RuntimeError: in case the file does not exist. | juraj-google-style |
def _set_least_batch_id(self, txn_signature):
batch = self._batches_by_txn_id[txn_signature]
least_index = self._index_of_batch(
self._batches_by_id[self._least_batch_id_wo_results].batch)
current_index = self._index_of_batch(batch)
all_prior = False
if current_index <= least_index:
return
if all(
all(t.header_signature in self._txn_results
for t in b.transactions)
for b in self._batches[least_index:current_index]):
all_prior = True
if not all_prior:
return
possible_least = self._batches[current_index].header_signature
for b in self._batches[current_index:]:
if not all(t.header_signature in self._txn_results
for t in b.transactions):
possible_least = b.header_signature
break
self._least_batch_id_wo_results = possible_least | Set the first batch id that doesn't have all results.
Args:
txn_signature (str): The txn identifier of the transaction with
results being set. | juraj-google-style |
def mpim_close(self, *, channel: str, **kwargs) -> SlackResponse:
kwargs.update({'channel': channel})
return self.api_call('mpim.close', json=kwargs) | Closes a multiparty direct message channel.
Args:
channel (str): Multiparty Direct message channel to close. e.g. 'G1234567890' | codesearchnet |
def address(self, compressed=True, testnet=False):
version = '0x'
return version + binascii.hexlify(self.keccak[12:]).decode('ascii') | Address property that returns the Base58Check
encoded version of the HASH160.
Args:
compressed (bool): Whether or not the compressed key should
be used.
testnet (bool): Whether or not the key is intended for testnet
usage. False indicates mainnet usage.
Returns:
bytes: Base58Check encoded string | juraj-google-style |
def save_as(self, filename=None):
if (filename is None):
filename = self.filename
if (filename is None):
filename = self.default_filename
if (filename is None):
raise RuntimeError("Class '{}' has no default filename".format(self.__class__.__name__))
self._do_save_as(filename)
self.filename = filename | Dumps object contents into file on disk.
Args:
filename (optional): defaults to self.filename. If passed, self.filename
will be updated to filename. | codesearchnet |
def days(start, end=None):
return iterate.between(start, datetime.timedelta(days=1), end) | Iterate over the days between the given datetime_tzs.
Args:
start: datetime_tz to start from.
end: (Optional) Date to end at, if not given the iterator will never
terminate.
Returns:
An iterator which generates datetime_tz objects a day apart. | codesearchnet |
def unicode_convert(obj):
try:
if isinstance(obj, dict):
return {unicode_convert(key): unicode_convert(value) for (key, value) in obj.items()}
elif isinstance(obj, list):
return [unicode_convert(element) for element in obj]
elif isinstance(obj, str):
return obj
elif isinstance(obj, six.text_type):
return obj.encode('utf-8')
elif isinstance(obj, six.integer_types):
return obj
else:
return obj
except:
return obj | Converts unicode objects to anscii.
Args:
obj (object): The object to convert.
Returns:
The object converted to anscii, if possible. For ``dict`` and ``list``, the object type is maintained. | codesearchnet |
def bind_extensions(app):
app.db = app.config['PUZZLE_BACKEND']
app.db.init_app(app)
bootstrap.init_app(app)
markdown(app)
@app.template_filter('islist')
def islist(object):
return isinstance(object, (tuple, list)) | Configure extensions.
Args:
app (Flask): initialized Flask app instance | juraj-google-style |
def WritePathStatHistory(self, client_path, stat_entries):
client_path_history = ClientPathHistory()
for (timestamp, stat_entry) in iteritems(stat_entries):
client_path_history.AddStatEntry(timestamp, stat_entry)
self.MultiWritePathHistory({client_path: client_path_history}) | Writes a collection of `StatEntry` observed for particular path.
Args:
client_path: A `ClientPath` instance.
stat_entries: A dictionary with timestamps as keys and `StatEntry`
instances as values. | codesearchnet |
def all_tokens(self, delimiter=' ', label_list_ids=None):
tokens = set()
for utterance in self.utterances.values():
tokens = tokens.union(utterance.all_tokens(delimiter=delimiter, label_list_ids=label_list_ids))
return tokens | Return a list of all tokens occurring in one of the labels in the corpus.
Args:
delimiter (str): The delimiter used to split labels into tokens
(see :meth:`audiomate.annotations.Label.tokenized`).
label_list_ids (list): If not None, only labels from label-lists with an idx contained in this list
are considered.
Returns:
:class:`set`: A set of distinct tokens. | juraj-google-style |
def import_image_from_file(self, filename, repository=None, tag=None,
changes=None):
return self.import_image(
src=filename, repository=repository, tag=tag, changes=changes
) | Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from a tar file on disk.
Args:
filename (str): Full path to a tar file.
repository (str): The repository to create
tag (str): The tag to apply
Raises:
IOError: File does not exist. | juraj-google-style |
def _GetElementDataTypeDefinition(self, data_type_definition):
if (not data_type_definition):
raise errors.FormatError('Missing data type definition')
element_data_type_definition = getattr(data_type_definition, 'element_data_type_definition', None)
if (not element_data_type_definition):
raise errors.FormatError('Invalid data type definition missing element')
return element_data_type_definition | Retrieves the element data type definition.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Returns:
DataTypeDefinition: element data type definition.
Raises:
FormatError: if the element data type cannot be determined from the data
type definition. | codesearchnet |
def GetParserObjects(cls, parser_filter_expression=None):
includes, excludes = cls._GetParserFilters(parser_filter_expression)
parser_objects = {}
for parser_name, parser_class in iter(cls._parser_classes.items()):
if not includes and parser_name in excludes:
continue
if includes and parser_name not in includes:
continue
parser_object = parser_class()
if parser_class.SupportsPlugins():
plugin_includes = None
if parser_name in includes:
plugin_includes = includes[parser_name]
parser_object.EnablePlugins(plugin_includes)
parser_objects[parser_name] = parser_object
return parser_objects | Retrieves the parser objects.
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
Returns:
dict[str, BaseParser]: parsers per name. | juraj-google-style |
def load_config(paths=DEFAULT_CONFIG_PATHS):
config = Config()
for path in paths:
if os.path.isfile(path):
config.load_pyfile(path)
return config | Attempt to load config from paths, in order.
Args:
paths (List[string]): list of paths to python files
Return:
Config: loaded config | juraj-google-style |
def read(self, n):
d = b''
while n:
try:
block = self._socket.recv(n)
except socket.error:
block = None
if not block:
raise EOFError('Socket closed')
d += block
n -= len(block)
return d | Receive *n* bytes from the socket.
Args:
n(int): The number of bytes to read.
Returns:
bytes: *n* bytes read from the socket.
Raises:
EOFError: If the socket was closed. | juraj-google-style |
def malloc(self, key, shape, dtype):
if key not in self._memory or self._memory[key].shape != shape or self._memory[key].dtype != dtype:
self._memory[key] = Shmem(key, shape, dtype, self._uuid)
return self._memory[key].np_array | Allocates a block of shared memory, and returns a numpy array whose data corresponds with that block.
Args:
key (str): The key to identify the block.
shape (list of int): The shape of the numpy array to allocate.
dtype (type): The numpy data type (e.g. np.float32).
Returns:
np.ndarray: The numpy array that is positioned on the shared memory. | juraj-google-style |
def _on_scan(self, info):
device_id = info['uuid']
expiration_time = info.get('validity_period', 60)
infocopy = deepcopy(info)
infocopy['expiration_time'] = monotonic() + expiration_time
with self._scan_lock:
self._scanned_devices[device_id] = infocopy | Callback called when a new device is discovered on this CMDStream
Args:
info (dict): Information about the scanned device | juraj-google-style |
def from_rfc3339(value):
return datetime.datetime.strptime(value, _RFC3339_MICROS).replace(tzinfo=pytz.utc) | Convert a microsecond-precision timestamp to datetime.
Args:
value (str): The RFC3339 string to convert.
Returns:
datetime.datetime: The datetime object equivalent to the timestamp in
UTC. | codesearchnet |
def Read(self, file_object):
try:
self.root_key = biplist.readPlist(file_object)
except (
biplist.NotBinaryPlistException,
biplist.InvalidPlistException) as exception:
raise IOError(exception) | Reads a plist from a file-like object.
Args:
file_object (dfvfs.FileIO): a file-like object containing plist data.
Raises:
IOError: if the plist file-like object cannot be read.
OSError: if the plist file-like object cannot be read. | juraj-google-style |
def set_static_dns(iface, *addrs):
if ((addrs is ()) or (str(addrs[0]).lower() == 'none')):
return {'Interface': iface, 'DNS Server': 'No Changes'}
if (str(addrs[0]).lower() == '[]'):
log.debug('Clearing list of DNS servers')
cmd = ['netsh', 'interface', 'ip', 'set', 'dns', 'name={0}'.format(iface), 'source=static', 'address=none']
__salt__['cmd.run'](cmd, python_shell=False)
return {'Interface': iface, 'DNS Server': []}
addr_index = 1
for addr in addrs:
if (addr_index == 1):
cmd = ['netsh', 'interface', 'ip', 'set', 'dns', 'name={0}'.format(iface), 'source=static', 'address={0}'.format(addr), 'register=primary']
__salt__['cmd.run'](cmd, python_shell=False)
addr_index = (addr_index + 1)
else:
cmd = ['netsh', 'interface', 'ip', 'add', 'dns', 'name={0}'.format(iface), 'address={0}'.format(addr), 'index={0}'.format(addr_index)]
__salt__['cmd.run'](cmd, python_shell=False)
addr_index = (addr_index + 1)
return {'Interface': iface, 'DNS Server': addrs} | Set static DNS configuration on a Windows NIC
Args:
iface (str): The name of the interface to set
addrs (*):
One or more DNS servers to be added. To clear the list of DNS
servers pass an empty list (``[]``). If undefined or ``None`` no
changes will be made.
Returns:
dict: A dictionary containing the new DNS settings
CLI Example:
.. code-block:: bash
salt -G 'os_family:Windows' ip.set_static_dns 'Local Area Connection' '192.168.1.1'
salt -G 'os_family:Windows' ip.set_static_dns 'Local Area Connection' '192.168.1.252' '192.168.1.253' | codesearchnet |
def inside_cell( self, r ):
centre = np.array( [ 0.5, 0.5, 0.5 ] )
new_r = self.nearest_image( centre, r )
return new_r | Given a fractional-coordinate, if this lies outside the cell return the equivalent point inside the cell.
Args:
r (np.array): Fractional coordinates of a point (this may be outside the cell boundaries).
Returns:
(np.array): Fractional coordinates of an equivalent point, inside the cell boundaries. | juraj-google-style |
def launch_subshell(self, shell_cls, cmd, args, *, prompt=None, context={}):
readline.write_history_file(self.history_fname)
prompt = (prompt if prompt else shell_cls.__name__)
mode = _ShellBase._Mode(shell=self, cmd=cmd, args=args, prompt=prompt, context=context)
shell = shell_cls(batch_mode=self.batch_mode, debug=self.debug, mode_stack=(self._mode_stack + [mode]), pipe_end=self._pipe_end, root_prompt=self.root_prompt, stdout=self.stdout, stderr=self.stderr, temp_dir=self._temp_dir)
self.print_debug("Leave parent shell '{}'".format(self.prompt))
exit_directive = shell.cmdloop()
self.print_debug("Enter parent shell '{}': {}".format(self.prompt, exit_directive))
readline.clear_history()
if os.path.isfile(self.history_fname):
readline.read_history_file(self.history_fname)
if (not (exit_directive is True)):
return exit_directive | Launch a subshell.
The doc string of the cmdloop() method explains how shell histories and
history files are saved and restored.
The design of the _ShellBase class encourage launching of subshells through
the subshell() decorator function. Nonetheless, the user has the option
of directly launching subshells via this method.
Arguments:
shell_cls: The _ShellBase class object to instantiate and launch.
args: Arguments used to launch this subshell.
prompt: The name of the subshell. The default, None, means
to use the shell_cls.__name__.
context: A dictionary to pass to the subshell as its context.
Returns:
'root': Inform the parent shell to keep exiting until the root shell
is reached.
'all': Exit the the command line.
False, None, or anything that are evaluated as False: Inform the
parent shell to stay in that parent shell.
An integer indicating the depth of shell to exit to. 0 = root shell. | codesearchnet |
def data_string_compare(db_data, user_data):
db_data = ''.join(db_data.split())
user_data = ''.join(user_data.split())
if operator.eq(db_data, user_data):
return True
return False | Validate string removing all white space before comparison.
Args:
db_data (str): The data store in Redis.
user_data (str): The user provided data.
Returns:
bool: True if the data passed validation. | juraj-google-style |
def _ReadEncodedData(self, read_size):
encoded_data = self._file_object.read(read_size)
read_count = len(encoded_data)
self._encoded_data = b''.join([self._encoded_data, encoded_data])
(self._decoded_data, self._encoded_data) = self._decoder.Decode(self._encoded_data)
self._decoded_data_size = len(self._decoded_data)
return read_count | Reads encoded data from the file-like object.
Args:
read_size (int): number of bytes of encoded data to read.
Returns:
int: number of bytes of encoded data read. | codesearchnet |
def from_string(cls, string, format_=None, fps=None, **kwargs):
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs) | Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text) | juraj-google-style |
def _get_env(key, default=None, coerce=(lambda x: x), required=False):
try:
value = os.environ[key]
except KeyError:
if (required is True):
raise RequiredSettingMissing(key)
else:
return default
try:
return coerce(value)
except Exception:
raise CoercianError(key, value, coerce) | Return env var coerced into a type other than string.
This function extends the standard os.getenv function to enable
the coercion of values into data types other than string (all env
vars are strings by default).
Args:
key: string, the name of the env var to look up
Kwargs:
default: the default value to return if the env var does not exist. NB the
default value is **not** coerced, and is assumed to be of the correct type.
coerce: a function that is used to coerce the value returned into
another type
required: bool, if True, then a RequiredSettingMissing error is raised
if the env var does not exist.
Returns the env var, passed through the coerce function | codesearchnet |
def run(self, qobj, backend_options=None):
self._set_options(qobj_config=qobj.config, backend_options=backend_options)
job_id = str(uuid.uuid4())
job = BasicAerJob(self, job_id, self._run_job, qobj)
job.submit()
return job | Run qobj asynchronously.
Args:
qobj (Qobj): payload of the experiment
backend_options (dict): backend options
Returns:
BasicAerJob: derived from BaseJob
Additional Information:
backend_options: Is a dict of options for the backend. It may contain
* "initial_statevector": vector_like
The "initial_statevector" option specifies a custom initial
initial statevector for the simulator to be used instead of the all
zero state. This size of this vector must be correct for the number
of qubits in all experiments in the qobj.
Example::
backend_options = {
"initial_statevector": np.array([1, 0, 0, 1j]) / np.sqrt(2),
} | codesearchnet |
def draw_point(self, x, y):
check_int_err(lib.SDL_RenderDrawPoint(self._ptr, x, y)) | Draw a point on the current rendering target.
Args:
x (int): The x coordinate of the point.
y (int): The y coordinate of the point.
Raises:
SDLError: If an error is encountered. | juraj-google-style |
def source_required(src_file):
if not src_file.exists():
return True
required = True
hash_file = src_file.with_suffix(".hash", depth=0)
LOG.debug("Hash file location: %s", hash_file)
if hash_file.exists():
new_hash = get_hash_of_dirs(src_file)
with open(hash_file, 'r') as h_file:
old_hash = h_file.readline()
required = not new_hash == old_hash
if required:
from benchbuild.utils.cmd import rm
rm("-r", src_file)
rm(hash_file)
if required:
LOG.info("Source required for: %s", src_file)
LOG.debug("Reason: src-exists: %s hash-exists: %s", src_file.exists(),
hash_file.exists())
return required | Check, if a download is required.
Args:
src_file: The filename to check for.
src_root: The path we find the file in.
Returns:
True, if we need to download something, False otherwise. | juraj-google-style |
def update_thread(cls, session, conversation, thread):
data = thread.to_api()
data['reload'] = True
return cls(('/conversations/%s/threads/%d.json' % (conversation.id, thread.id)), data=data, request_type=RequestPaginator.PUT, singleton=True, session=session) | Update a thread.
Args:
session (requests.sessions.Session): Authenticated session.
conversation (helpscout.models.Conversation): The conversation
that the thread belongs to.
thread (helpscout.models.Thread): The thread to be updated.
Returns:
helpscout.models.Conversation: Conversation including freshly
updated thread. | codesearchnet |
def FromFile(cls, in_path):
with open(in_path, 'rb') as infile:
in_data = json.load(infile)
if (not (('trace', 'selectors') in in_data)):
raise ArgumentError('Invalid trace file format', keys=in_data.keys(), expected=('trace', 'selectors'))
selectors = [DataStreamSelector.FromString(x) for x in in_data['selectors']]
readings = [IOTileReading(x['time'], DataStream.FromString(x['stream']).encode(), x['value'], reading_id=x['reading_id']) for x in in_data['trace']]
return SimulationTrace(readings, selectors=selectors) | Load a previously saved ascii representation of this simulation trace.
Args:
in_path (str): The path of the input file that we should load.
Returns:
SimulationTrace: The loaded trace object. | codesearchnet |
def __init__(self, input_lists, skip_node_names=None, destination_node_name=None):
self._input_lists = input_lists
self._skip_node_names = skip_node_names
self._inputs = []
self._visited_nodes = []
self._depth_count = 0
self._depth_list = []
self._destination_node_name = destination_node_name | Constructor of _DFSGraphTracer.
Args:
input_lists: A list of dicts. Each dict is an adjacency (input) map from
the recipient node name as the key and the list of input node names
as the value.
skip_node_names: Optional: a list of node names to skip tracing.
destination_node_name: Optional: destination node name. If not `None`, it
should be the name of a destination not as a str and the graph tracing
will raise GraphTracingReachedDestination as soon as the node has been
reached.
Raises:
GraphTracingReachedDestination: if stop_at_node_name is not None and
the specified node is reached. | github-repos |
def traverse_inorder(self, leaves=True, internal=True):
for node in self.root.traverse_inorder(leaves=leaves, internal=internal):
yield node | Perform an inorder traversal of the ``Node`` objects in this ``Tree``
Args:
``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` | juraj-google-style |
def wb020(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `wb020`'.format(value))
self._wb020 = value | Corresponds to IDD Field `wb020`
Wet-bulb temperature corresponding to 02.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `wb020`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | juraj-google-style |
def __init__(self, message: str, p: processor.Processor):
self._message = message
self._p = p
self._start = None
self._ttft = None
self._model_call_event = asyncio.Event()
self._model_call_event.clear() | Wraps a processor to provide performance messaging.
Should only be used for processors that consume their entire input before
producing output (such as non-streaming or unidirectional/single streaming
model calls). The TTFT is estimated by waiting first that the inputs
stream is
completely sent to the processor (`start` time is then set). When the
processor outputs its first token, the duration from `start` is then
reported.
In a bidirectional streaming setup, the TTFT will not be reported at all.
Args:
message: header of the status chunk that will be returned. It is used to
identify different calls to this function.
p: processor for which we need to compute ttft. self._message = message
self._p = p self._start = None self._ttft = None self._model_call_event
= asyncio.Event() self._model_call_event.clear() | github-repos |
def _ParseFSMState(self, template):
if (not template):
return
state_name = ''
for line in template:
self._line_num += 1
line = line.rstrip()
if (line and (not self.comment_regex.match(line))):
if ((not self.state_name_re.match(line)) or (len(line) > self.MAX_NAME_LEN) or (line in TextFSMRule.LINE_OP) or (line in TextFSMRule.RECORD_OP)):
raise TextFSMTemplateError(("Invalid state name: '%s'. Line: %s" % (line, self._line_num)))
state_name = line
if (state_name in self.states):
raise TextFSMTemplateError(("Duplicate state name: '%s'. Line: %s" % (line, self._line_num)))
self.states[state_name] = []
self.state_list.append(state_name)
break
for line in template:
self._line_num += 1
line = line.rstrip()
if (not line):
break
if self.comment_regex.match(line):
continue
if (not (line.startswith(' ^') or line.startswith('\t^'))):
raise TextFSMTemplateError(("Missing white space or carat ('^') before rule. Line: %s" % self._line_num))
self.states[state_name].append(TextFSMRule(line, self._line_num, self.value_map))
return state_name | Extracts State and associated Rules from body of template file.
After the Value definitions the remainder of the template is
state definitions. The routine is expected to be called iteratively
until no more states remain - indicated by returning None.
The routine checks that the state names are a well formed string, do
not clash with reserved names and are unique.
Args:
template: Valid template file after Value definitions
have already been read.
Returns:
Name of the state parsed from file. None otherwise.
Raises:
TextFSMTemplateError: If any state definitions are invalid. | codesearchnet |
def _embedPayload(slaveaddress, mode, functioncode, payloaddata):
_checkSlaveaddress(slaveaddress)
_checkMode(mode)
_checkFunctioncode(functioncode, None)
_checkString(payloaddata, description='payload')
firstPart = ((_numToOneByteString(slaveaddress) + _numToOneByteString(functioncode)) + payloaddata)
if (mode == MODE_ASCII):
request = (((_ASCII_HEADER + _hexencode(firstPart)) + _hexencode(_calculateLrcString(firstPart))) + _ASCII_FOOTER)
else:
request = (firstPart + _calculateCrcString(firstPart))
return request | Build a request from the slaveaddress, the function code and the payload data.
Args:
* slaveaddress (int): The address of the slave.
* mode (str): The modbus protcol mode (MODE_RTU or MODE_ASCII)
* functioncode (int): The function code for the command to be performed. Can for example be 16 (Write register).
* payloaddata (str): The byte string to be sent to the slave.
Returns:
The built (raw) request string for sending to the slave (including CRC etc).
Raises:
ValueError, TypeError.
The resulting request has the format:
* RTU Mode: slaveaddress byte + functioncode byte + payloaddata + CRC (which is two bytes).
* ASCII Mode: header (:) + slaveaddress (2 characters) + functioncode (2 characters) + payloaddata + LRC (which is two characters) + footer (CRLF)
The LRC or CRC is calculated from the byte string made up of slaveaddress + functioncode + payloaddata.
The header, LRC/CRC, and footer are excluded from the calculation. | codesearchnet |
def __new__(mcs, classname, bases, class_dict):
options = {}
required = set()
for name, option in class_dict.iteritems():
if isinstance(option, _Option):
options[name] = option
if option.required:
required.add(name)
for name in options:
class_dict.pop(name)
class_dict[mcs._OPTIONS] = options
class_dict[mcs._REQUIRED] = required
cls = type.__new__(mcs, classname, bases, class_dict)
if object not in bases:
parent_options = {}
for c in reversed(cls.__mro__):
if mcs._OPTIONS in c.__dict__:
parent_options.update(c.__dict__[mcs._OPTIONS])
if mcs._REQUIRED in c.__dict__:
required.update(c.__dict__[mcs._REQUIRED])
for k, v in parent_options.iteritems():
if k not in options:
options[k] = v
return cls | Creates a _Config class and modifies its class dict.
Args:
classname: name of the class.
bases: a list of base classes.
class_dict: original class dict.
Returns:
A new _Config class. The modified class will have two fields.
_options field is a dict from option name to _Option objects.
_required field is a set of required option names. | juraj-google-style |
def run_inference(self, batch: Sequence[str], model: _VLLMModelServer, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:
return asyncio.run(self._async_run_inference(batch, model, inference_args)) | Runs inferences on a batch of text strings.
Args:
batch: A sequence of examples as text strings.
model: A _VLLMModelServer containing info for connecting to the server.
inference_args: Any additional arguments for an inference.
Returns:
An Iterable of type PredictionResult. | github-repos |
def __init__(self, scope, parent):
CodeEntity.__init__(self, scope, parent)
self._si = -1 | Constructor for statements.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree. | juraj-google-style |
def showAddColumnDialog(self, triggered):
if triggered:
dialog = AddAttributesDialog(self)
dialog.accepted.connect(self.addColumn)
dialog.rejected.connect(self.uncheckButton)
dialog.show() | Display the dialog to add a column to the model.
This method is also a slot.
Args:
triggered (bool): If the corresponding button was
activated, the dialog will be created and shown. | codesearchnet |
def overlapping(self, variant_obj):
category = 'snv' if variant_obj['category'] == 'sv' else 'sv'
query = {
'$and': [
{'case_id': variant_obj['case_id']},
{'category': category},
{'hgnc_ids' : { '$in' : variant_obj['hgnc_ids']}}
]
}
sort_key = [('rank_score', pymongo.DESCENDING)]
variants = self.variant_collection.find(query).sort(sort_key).limit(30)
return variants | Return overlapping variants.
Look at the genes that a variant overlaps to.
Then return all variants that overlap these genes.
If variant_obj is sv it will return the overlapping snvs and oposite
There is a problem when SVs are huge since there are to many overlapping variants.
Args:
variant_obj(dict)
Returns:
variants(iterable(dict)) | juraj-google-style |
def copy(self, texture, source_rect=None, dest_rect=None, rotation=0, center=None, flip=lib.SDL_FLIP_NONE):
if (source_rect == None):
source_rect_ptr = ffi.NULL
else:
source_rect_ptr = source_rect._ptr
if (dest_rect == None):
dest_rect_ptr = ffi.NULL
else:
dest_rect_ptr = dest_rect._ptr
if (center == None):
center_ptr = ffi.NULL
else:
center_ptr = center._ptr
check_int_err(lib.SDL_RenderCopyEx(self._ptr, texture._ptr, source_rect_ptr, dest_rect_ptr, rotation, center_ptr, flip)) | Copy a portion of the source texture to the current rendering target, rotating it by angle around the given center.
Args:
texture (Texture): The source texture.
source_rect (Rect): The source rectangle, or None for the entire texture.
dest_rect (Rect): The destination rectangle, or None for the entire rendering target.
rotation (float): An angle in degrees that indicates the rotation that will be applied to dest_rect.
center (Point): The point around which dest_rect will be rotated (if None, rotation will be done around
dest_rect.w/2, dest_rect.h/2).
flip (int): A value stating which flipping actions should be performed on the texture.
Raises:
SDLError: If an error is encountered. | codesearchnet |
def add_field(self, fieldname, fieldspec=whoosh_module_fields.TEXT):
self._whoosh.add_field(fieldname, fieldspec)
return self._whoosh.schema | Add a field in the index of the model.
Args:
fieldname (Text): This parameters register a new field in specified model.
fieldspec (Name, optional): This option adds various options as were described before.
Returns:
TYPE: The new schema after deleted is returned. | juraj-google-style |
def memory_write64(self, addr, data, zone=None):
words = []
bitmask = 4294967295
for long_word in data:
words.append((long_word & bitmask))
words.append(((long_word >> 32) & bitmask))
return self.memory_write32(addr, words, zone=zone) | Writes long words to memory of a target system.
Note:
This is little-endian.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to write to
data (list): list of long words to write
zone (str): optional memory zone to access
Returns:
Number of long words written to target.
Raises:
JLinkException: on memory access error. | codesearchnet |
def parse_files(self):
log_re = self.log_format_regex
log_lines = []
for log_file in self.matching_files():
with open(log_file) as f:
matches = re.finditer(log_re, f.read())
for match in matches:
log_lines.append(match.groupdict())
return log_lines | Find the files and parse them.
Returns:
list: list of dictionaries (one for each parsed line). | codesearchnet |
def _validate_dump_with_graphs(self, device_name):
if not self._debug_graphs:
raise LookupError('No partition graphs loaded for device %s' % device_name)
debug_graph = self._debug_graphs[device_name]
for datum in self._dump_tensor_data[device_name]:
if datum.node_name not in debug_graph.node_inputs:
raise ValueError("Node name '%s' is not found in partition graphs of device %s." % (datum.node_name, device_name))
pending_inputs = {}
for node in debug_graph.node_inputs:
pending_inputs[node] = []
inputs = debug_graph.node_inputs[node]
for inp in inputs:
inp_node = debug_graphs.get_node_name(inp)
inp_output_slot = debug_graphs.get_output_slot(inp)
if inp_node in self._debug_watches[device_name] and inp_output_slot in self._debug_watches[device_name][inp_node] and (debug_graph.node_op_types.get(inp) not in ('Enter', 'NextIteration')) and ((inp_node, inp_output_slot) not in pending_inputs[node]):
pending_inputs[node].append((inp_node, inp_output_slot))
for i, datum in enumerate(self._dump_tensor_data[device_name]):
node = datum.node_name
slot = datum.output_slot
if not self._satisfied_at_timestamp(device_name, pending_inputs[node], datum.timestamp, start_i=i + 1):
raise ValueError('Causality violated in timing relations of debug dumps: %s (%d): these input(s) are not satisfied: %s' % (node, datum.timestamp, repr(pending_inputs[node])))
recipients = debug_graph.node_recipients[node]
for recipient in recipients:
recipient_pending_inputs = pending_inputs[recipient]
if (node, slot) in recipient_pending_inputs:
if self.node_op_type(recipient) == 'Merge':
del recipient_pending_inputs[:]
else:
del recipient_pending_inputs[recipient_pending_inputs.index((node, slot))] | Validate the dumped tensor data against the partition graphs.
Only the watched nodes are validated by this method, because tfdbg allows
clients to watch only a subset of the nodes.
Args:
device_name: (`str`) device name.
Raises:
LookupError: If the partition graphs have not been loaded yet.
ValueError: If dumps contain node names not found in partition graph.
Or if the temporal order of the dump's timestamps violate the
input relations on the partition graphs. | github-repos |
def load_folder_files(folder_path, recursive=True):
if isinstance(folder_path, (list, set)):
files = []
for path in set(folder_path):
files.extend(load_folder_files(path, recursive))
return files
if not os.path.exists(folder_path):
return []
file_list = []
for dirpath, dirnames, filenames in os.walk(folder_path):
filenames_list = []
for filename in filenames:
if not filename.endswith(('.yml', '.yaml', '.json')):
continue
filenames_list.append(filename)
for filename in filenames_list:
file_path = os.path.join(dirpath, filename)
file_list.append(file_path)
if not recursive:
break
return file_list | load folder path, return all files endswith yml/yaml/json in list.
Args:
folder_path (str): specified folder path to load
recursive (bool): load files recursively if True
Returns:
list: files endswith yml/yaml/json | juraj-google-style |
def assert_equal_flattened(self, expected_results, actual_results):
self.assertEqual(len(expected_results), len(actual_results))
for i, expected_result in enumerate(expected_results):
final_result = []
actual_result = actual_results[i]
for val in actual_result:
final_result.extend(val.numpy())
self.assertAllEqual(expected_result, final_result) | Asserts that flattened results are equal.
Due to the number of replicas in the strategy, the output may have a
different structure and needs to be flattened for comparison.
Args:
expected_results: The results expected as a result of a computation.
actual_results: The actual results of a computation. | github-repos |
def _compute_nfps_uniform(cum_counts, sizes):
nfps = np.zeros((len(sizes), len(sizes)))
for l in range(len(sizes)):
for u in range(l, len(sizes)):
nfps[(l, u)] = _compute_nfp_uniform(l, u, cum_counts, sizes)
return nfps | Computes the matrix of expected false positives for all possible
sub-intervals of the complete domain of set sizes, assuming uniform
distribution of set_sizes within each sub-intervals.
Args:
cum_counts: the complete cummulative distribution of set sizes.
sizes: the complete domain of set sizes.
Return (np.array): the 2-D array of expected number of false positives
for every pair of [l, u] interval, where l is axis-0 and u is
axis-1. | codesearchnet |
def AsyncPopenArgs(self):
args = {}
if self.operating_system == OperatingSystem.WINDOWS:
args['close_fds'] = True
detached_process = 8
create_new_process_group = 512
args['creationflags'] = detached_process | create_new_process_group
else:
args['preexec_fn'] = os.setsid
args['close_fds'] = True
args['stdin'] = subprocess.PIPE
args['stdout'] = subprocess.PIPE
args['stderr'] = subprocess.PIPE
return args | Returns the args for spawning an async process using Popen on this OS.
Make sure the main process does not wait for the new process. On windows
this means setting the 0x8 creation flag to detach the process.
Killing a group leader kills the whole group. Setting creation flag 0x200 on
Windows or running setsid on *nix makes sure the new process is in a new
session with the new process the group leader. This means it can't be killed
if the parent is killed.
Finally, all file descriptors (FD) need to be closed so that waiting for the
output of the main process does not inadvertently wait for the output of the
new process, which means waiting for the termination of the new process.
If the new process wants to write to a file, it can open new FDs.
Returns:
{str:}, The args for spawning an async process using Popen on this OS. | github-repos |
def get_metadata(feature_name, etextno):
metadata_values = MetadataExtractor.get(feature_name).get_metadata(etextno)
return frozenset(metadata_values) | Looks up the value of a meta-data feature for a given text.
Arguments:
feature_name (str): The name of the meta-data to look up.
etextno (int): The identifier of the Gutenberg text for which to look
up the meta-data.
Returns:
frozenset: The values of the meta-data for the text or an empty set if
the text does not have meta-data associated with the feature.
Raises:
UnsupportedFeature: If there is no MetadataExtractor registered that
can extract meta-data for the given feature name. | codesearchnet |
def __init__(self, data, entities=None, categories=None):
self.data = data
if entities is None:
entities = self.default_entities()
self.entities = entities
if categories is None:
categories = []
self.categories = categories
self.validate() | Initialization method.
Args:
data (list of list of int/float): 2-dim array.
entities (list): list of entities.
categories (list): list of the categories (one per entity). | juraj-google-style |
def AddArguments(cls, argument_group):
argument_group.add_argument(
'--preferred_year', '--preferred-year', dest='preferred_year',
type=int, action='store', default=None, metavar='YEAR', help=(
'When a format\'s timestamp does not include a year, e.g. '
'syslog, use this as the initial year instead of attempting '
'auto-detection.'))
argument_group.add_argument(
'--process_archives', '--process-archives', dest='process_archives',
action='store_true', default=False, help=(
'Process file entries embedded within archive files, such as '
'archive.tar and archive.zip. This can make processing '
'significantly slower.'))
argument_group.add_argument(
'--skip_compressed_streams', '--skip-compressed-streams',
dest='process_compressed_streams', action='store_false', default=True,
help=(
'Skip processing file content within compressed streams, such as '
'syslog.gz and syslog.bz2.')) | Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group. | juraj-google-style |
def add_to_collection(name, value) -> None:
get_default_graph().add_to_collection(name, value) | Wrapper for `Graph.add_to_collection()` using the default graph.
See `tf.Graph.add_to_collection`
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
@compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility | github-repos |
def get_resize_output_image_size(input_image: ImageInput, size: Union[int, Tuple[int, int], List[int], Tuple[int]], patch_size: Union[int, Tuple[int, int], List[int], Tuple[int]], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> tuple:
max_height, max_width = size if isinstance(size, (tuple, list)) else (size, size)
patch_height, patch_width = patch_size if isinstance(patch_size, (tuple, list)) else (patch_size, patch_size)
height, width = get_image_size(input_image, input_data_format)
ratio = max(height / max_height, width / max_width)
if ratio > 1:
height = int(math.floor(height / ratio))
width = int(math.floor(width / ratio))
num_height_tokens, num_width_tokens = _num_image_tokens((height, width), (patch_height, patch_width))
return (num_height_tokens * patch_height, num_width_tokens * patch_width) | Find the target (height, width) dimension of the output image after resizing given the input image and the desired
size.
Args:
input_image (`ImageInput`):
The image to resize.
size (`int` or `Tuple[int, int]`):
Max image size an input image can be. Must be a dictionary with the key "longest_edge".
patch_size (`int` or `Tuple[int, int]`):
The patch_size as `(height, width)` to use for resizing the image. If patch_size is an integer, `(patch_size, patch_size)`
will be used
input_data_format (`ChannelDimension`, *optional*):
The channel dimension format of the input image. If unset, will use the inferred format from the input.
Returns:
`tuple`: The target (height, width) dimension of the output image after resizing. | github-repos |
def remove_user(self, group, username):
try:
self.lookup_id(group)
except ldap_tools.exceptions.InvalidResult as err:
raise err from None
operation = {'memberUid': [(ldap3.MODIFY_DELETE, [username])]}
self.client.modify(self.__distinguished_name(group), operation) | Remove a user from the specified LDAP group.
Args:
group: Name of group to update
username: Username of user to remove
Raises:
ldap_tools.exceptions.InvalidResult:
Results of the query were invalid. The actual exception raised
inherits from InvalidResult. See #lookup_id for more info. | codesearchnet |
def get_legacy_output_shapes(dataset_or_iterator):
return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), get_structure(dataset_or_iterator)) | Returns the output shapes for elements of the input dataset / iterator.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.
Returns:
A (nested) structure of `tf.TensorShape` objects matching the structure of
the dataset / iterator elements and specifying the shape of the individual
components.
@compatibility(TF2)
This is a legacy API for inspecting the type signature of dataset elements. In
TF 2, you should use the `tf.data.Dataset.element_spec` attribute instead.
@end_compatibility | github-repos |
def get_config(self, obj):
try:
shared_object_config = self._shared_objects_config[obj]
except (TypeError, KeyError):
return None
shared_object_config.increment_ref_count()
return shared_object_config | Gets a `SharedObjectConfig` if one has already been seen for `obj`.
Args:
obj: The object for which to retrieve the `SharedObjectConfig`.
Returns:
The SharedObjectConfig for a given object, if already seen. Else,
`None`. | github-repos |
def update_power_state(self, id_or_uri, power_state):
uri = self._client.build_uri(id_or_uri) + "/powerState"
return self._client.update(power_state, uri) | Sets the power state of the specified power delivery device. The device must be an HP Intelligent Outlet.
Args:
id_or_uri:
Can be either the power device id or the uri
power_state:
{"powerState":"On|Off"}
Returns:
str: The power state | juraj-google-style |
def copy_entities(self, from_namespace, from_workspace, etype, enames):
r = fapi.copy_entities(from_namespace, from_workspace, self.namespace, self.name, etype, enames, self.api_url)
fapi._check_response_code(r, 201) | Copy entities from another workspace.
Args:
from_namespace (str): Source workspace namespace
from_workspace (str): Source workspace name
etype (str): Entity type
enames (list(str)): List of entity names to copy | codesearchnet |
def sum_rightmost_ndims_preserving_shape(x, ndims):
x = tf.convert_to_tensor(value=x)
if (x.shape.ndims is not None):
axes = tf.range((x.shape.ndims - ndims), x.shape.ndims)
else:
axes = tf.range((tf.rank(x) - ndims), tf.rank(x))
return tf.reduce_sum(input_tensor=x, axis=axes) | Return `Tensor` with right-most ndims summed.
Args:
x: the `Tensor` whose right-most `ndims` dimensions to sum
ndims: number of right-most dimensions to sum.
Returns:
A `Tensor` resulting from calling `reduce_sum` on the `ndims` right-most
dimensions. If the shape of `x` is statically known, the result will also
have statically known shape. Otherwise, the resulting shape will only be
known at runtime. | codesearchnet |
def bloch_vector_of(self, qubit: ops.Qid) -> np.ndarray:
return bloch_vector_from_state_vector(self.state_vector(), self.qubit_map[qubit]) | Returns the bloch vector of a qubit in the state.
Calculates the bloch vector of the given qubit
in the state given by self.state_vector(), given that
self.state_vector() follows the standard Kronecker convention of
numpy.kron.
Args:
qubit: qubit who's bloch vector we want to find.
Returns:
A length 3 numpy array representing the qubit's bloch vector.
Raises:
ValueError: if the size of the state represents more than 25 qubits.
IndexError: if index is out of range for the number of qubits
corresponding to the state. | codesearchnet |
class ViltProcessor(ProcessorMixin):
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'ViltImageProcessor'
tokenizer_class = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if 'feature_extractor' in kwargs:
warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)
feature_extractor = kwargs.pop('feature_extractor')
image_processor = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
def __call__(self, images, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding:
encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs)
encoding_image_processor = self.image_processor(images, return_tensors=return_tensors)
encoding.update(encoding_image_processor)
return encoding
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def feature_extractor_class(self):
warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)
return self.image_processor_class
@property
def feature_extractor(self):
warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning)
return self.image_processor | Constructs a ViLT processor which wraps a BERT tokenizer and ViLT image processor into a single processor.
[`ViltProcessor`] offers all the functionalities of [`ViltImageProcessor`] and [`BertTokenizerFast`]. See the
docstring of [`~ViltProcessor.__call__`] and [`~ViltProcessor.decode`] for more information.
Args:
image_processor (`ViltImageProcessor`, *optional*):
An instance of [`ViltImageProcessor`]. The image processor is a required input.
tokenizer (`BertTokenizerFast`, *optional*):
An instance of ['BertTokenizerFast`]. The tokenizer is a required input. | github-repos |
async def get_user_groups(request):
acl_callback = request.get(GROUPS_KEY)
if (acl_callback is None):
raise RuntimeError('acl_middleware not installed')
user_id = (await get_auth(request))
groups = (await acl_callback(user_id))
if (groups is None):
return None
user_groups = ((Group.AuthenticatedUser, user_id) if (user_id is not None) else ())
return set(itertools.chain(groups, (Group.Everyone,), user_groups)) | Returns the groups that the user in this request has access to.
This function gets the user id from the auth.get_auth function, and passes
it to the ACL callback function to get the groups.
Args:
request: aiohttp Request object
Returns:
If the ACL callback function returns None, this function returns None.
Otherwise this function returns the sequence of group permissions
provided by the callback, plus the Everyone group. If user_id is not
None, the AuthnticatedUser group and the user_id are added to the
groups returned by the function
Raises:
RuntimeError: If the ACL middleware is not installed | codesearchnet |
def FVDEVolumeOpen(fvde_volume, path_spec, file_object, key_chain):
encrypted_root_plist = key_chain.GetCredential(path_spec, 'encrypted_root_plist')
if encrypted_root_plist:
fvde_volume.read_encrypted_root_plist(encrypted_root_plist)
password = key_chain.GetCredential(path_spec, 'password')
if password:
fvde_volume.set_password(password)
recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')
if recovery_password:
fvde_volume.set_recovery_password(recovery_password)
fvde_volume.open_file_object(file_object) | Opens the FVDE volume using the path specification.
Args:
fvde_volume (pyfvde.volume): FVDE volume.
path_spec (PathSpec): path specification.
file_object (FileIO): file-like object.
key_chain (KeyChain): key chain. | codesearchnet |
def delete_entity(self, etype, entity_id):
r = fapi.delete_entity(self.namespace, self.name, etype, entity_id, self.api_url)
fapi._check_response_code(r, 202) | Delete an entity in this workspace.
Args:
etype (str): Entity type
entity_id (str): Entity name/unique id | codesearchnet |
def get_best_electronegativity_anonymous_mapping(self, struct1, struct2):
(struct1, struct2) = self._process_species([struct1, struct2])
(struct1, struct2, fu, s1_supercell) = self._preprocess(struct1, struct2)
matches = self._anonymous_match(struct1, struct2, fu, s1_supercell, use_rms=True, break_on_match=True)
if matches:
min_X_diff = np.inf
for m in matches:
X_diff = 0
for (k, v) in m[0].items():
X_diff += (struct1.composition[k] * ((k.X - v.X) ** 2))
if (X_diff < min_X_diff):
min_X_diff = X_diff
best = m[0]
return best | Performs an anonymous fitting, which allows distinct species in one
structure to map to another. E.g., to compare if the Li2O and Na2O
structures are similar. If multiple substitutions are within tolerance
this will return the one which minimizes the difference in
electronegativity between the matches species.
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
Returns:
min_mapping (Dict): Mapping of struct1 species to struct2 species | codesearchnet |
def get_file_list(wildcard):
files = glob.glob(os.path.expanduser(wildcard))
return files | Search for files to be concatenated. Currently very basic, but could
expand to be more sophisticated.
Args:
wildcard (regular expression string)
Returns:
files (list of full file paths) | juraj-google-style |
def constant(value, delay=None):
@asyncio.coroutine
def coro():
if delay:
(yield from asyncio.sleep(delay))
return value
return coro | Returns a coroutine function that when called, always returns
the provided value.
This function has an alias: `paco.identity`.
Arguments:
value (mixed): value to constantly return when coroutine is called.
delay (int/float): optional return value delay in seconds.
Returns:
coroutinefunction
Usage::
coro = paco.constant('foo')
await coro()
# => 'foo'
await coro()
# => 'foo' | codesearchnet |
def hflip(img):
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.transpose(Image.FLIP_LEFT_RIGHT) | Horizontally flip the given PIL Image.
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Horizontall flipped image. | juraj-google-style |
def _CreateIndexIfNotExists(self, index_name, mappings):
try:
if (not self._client.indices.exists(index_name)):
self._client.indices.create(body={'mappings': mappings}, index=index_name)
except elasticsearch.exceptions.ConnectionError as exception:
raise RuntimeError('Unable to create Elasticsearch index with error: {0!s}'.format(exception)) | Creates an Elasticsearch index if it does not exist.
Args:
index_name (str): mame of the index.
mappings (dict[str, object]): mappings of the index.
Raises:
RuntimeError: if the Elasticsearch index cannot be created. | codesearchnet |
def format_comment(comment_data):
format_pieces = []
if 'line' in comment_data:
format_pieces.append('line {line}')
if 'column' in comment_data:
if format_pieces:
format_pieces.append(', ')
format_pieces.append('col {column}')
if format_pieces:
format_pieces.append(': ')
if 'severity' in comment_data:
format_pieces.append('{severity}: ')
if 'message_id' in comment_data:
format_pieces.append('[{message_id}]: ')
if 'message' in comment_data:
format_pieces.append('{message}')
return ''.join(format_pieces).format(**comment_data) | Formats the data returned by the linters.
Given a dictionary with the fields: line, column, severity, message_id,
message, will generate a message like:
'line {line}, col {column}: {severity}: [{message_id}]: {message}'
Any of the fields may nbe absent.
Args:
comment_data: dictionary with the linter data.
Returns:
a string with the formatted message. | juraj-google-style |
def tags_all(self):
if 'tags' not in self.database.collection_names():
print 'Warning: Searching on non-existance tags collection'
return None
cursor = self.database['tags'].find({}, {'_id':0, 'md5':1, 'tags':1})
return [item for item in cursor] | List of the tags and md5s for all samples
Args:
None
Returns:
List of the tags and md5s for all samples | juraj-google-style |
def AddTableColumns(self, table, columns):
table_columns = self._table_columns.setdefault(table, [])
for attr in columns:
if (attr not in table_columns):
table_columns.append(attr) | Add columns to table if they are not already there.
Args:
table: table name as a string
columns: an iterable of column names | codesearchnet |
def collect_previous_mask(input_tensors):
def _collect_previous_mask(x):
return getattr(x, '_keras_mask', None)
return nest.map_structure(_collect_previous_mask, input_tensors) | Retrieves the output mask(s) of the previous node.
Args:
input_tensors: An arbitrary structure of Tensors.
Returns:
A mask tensor or list of mask tensors. | github-repos |
def generate_algebra_simplify_sample(vlist, ops, min_depth, max_depth):
depth = random.randrange(min_depth, (max_depth + 1))
expr = random_expr(depth, vlist, ops)
sample = str(expr)
target = format_sympy_expr(sympy.simplify(sample))
return (sample, target) | Randomly generate an algebra simplify dataset sample.
Given an input expression, produce the simplified expression.
Args:
vlist: Variable list. List of chars that can be used in the expression.
ops: List of ExprOp instances. The allowed operators for the expression.
min_depth: Expression trees will not have a smaller depth than this. 0 means
there is just a variable. 1 means there is one operation.
max_depth: Expression trees will not have a larger depth than this. To make
all trees have the same depth, set this equal to `min_depth`.
Returns:
sample: String representation of the input.
target: String representation of the solution. | codesearchnet |
def has_all_nonzero_segment_lengths(neuron, threshold=0.0):
bad_ids = []
for sec in _nf.iter_sections(neuron):
p = sec.points
for i, s in enumerate(zip(p[:-1], p[1:])):
if segment_length(s) <= threshold:
bad_ids.append((sec.id, i))
return CheckResult(len(bad_ids) == 0, bad_ids) | Check presence of neuron segments with length not above threshold
Arguments:
neuron(Neuron): The neuron object to test
threshold(float): value above which a segment length is considered to
be non-zero
Returns:
CheckResult with result including list of (section_id, segment_id)
of zero length segments | juraj-google-style |
def scale(self):
if (self.type not in {EventType.GESTURE_PINCH_BEGIN, EventType.GESTURE_PINCH_UPDATE, EventType.GESTURE_PINCH_END}):
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_gesture_get_scale(self._handle) | The absolute scale of a pinch gesture, the scale is
the division of the current distance between the fingers and
the distance at the start of the gesture.
The scale begins at 1.0, and if e.g. the fingers moved together by
50% then the scale will become 0.5, if they move twice as far apart
as initially the scale becomes 2.0, etc.
For gesture events that are of type
:attr:`~libinput.constant.EventType.GESTURE_PINCH_BEGIN`, this property
returns 1.0.
For gesture events that are of type
:attr:`~libinput.constant.EventType.GESTURE_PINCH_END`, this property
returns the scale value of the most recent
:attr:`~libinput.constant.EventType.GESTURE_PINCH_UPDATE` event
(if any) or 1.0 otherwise.
For all other events this property raises :exc:`AttributeError`.
Returns:
float: The absolute scale of a pinch gesture.
Raises:
AttributeError | codesearchnet |
def CheckCondition(condition, check_object):
try:
of = objectfilter.Parser(condition).Parse()
compiled_filter = of.Compile(objectfilter.BaseFilterImplementation)
return compiled_filter.Matches(check_object)
except objectfilter.Error as e:
raise ConditionError(e) | Check if a condition matches an object.
Args:
condition: A string condition e.g. "os == 'Windows'"
check_object: Object to validate, e.g. an rdf_client.KnowledgeBase()
Returns:
True or False depending on whether the condition matches.
Raises:
ConditionError: If condition is bad. | codesearchnet |
def scale_streaming_endpoint(access_token, streaming_endpoint_id, scale_units):
path = '/StreamingEndpoints'
full_path = ''.join([path, "('", streaming_endpoint_id, "')", '/Scale'])
full_path_encoded = urllib.parse.quote(full_path, safe='')
endpoint = ''.join([ams_rest_endpoint, full_path_encoded])
body = (('{"scaleUnits": "' + str(scale_units)) + '"}')
return do_ams_post(endpoint, full_path_encoded, body, access_token) | Scale Media Service Streaming Endpoint.
Args:
access_token (str): A valid Azure authentication token.
streaming_endpoint_id (str): A Media Service Streaming Endpoint ID.
scale_units (str): A Media Service Scale Units Number.
Returns:
HTTP response. JSON body. | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.