code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _ParseFileEntryWithParser(self, parser_mediator, parser, file_entry, file_object=None):
if (not isinstance(parser, (parsers_interface.FileEntryParser, parsers_interface.FileObjectParser))):
raise TypeError('Unsupported parser object type.')
parser_mediator.ClearParserChain()
reference_count = parser_mediator.resolver_context.GetFileObjectReferenceCount(file_entry.path_spec)
parser_mediator.SampleStartTiming(parser.NAME)
try:
if isinstance(parser, parsers_interface.FileEntryParser):
parser.Parse(parser_mediator)
elif isinstance(parser, parsers_interface.FileObjectParser):
parser.Parse(parser_mediator, file_object)
result = self._PARSE_RESULT_SUCCESS
except (IOError, dfvfs_errors.BackEndError) as exception:
display_name = parser_mediator.GetDisplayName(file_entry)
logger.warning('{0:s} unable to parse file: {1:s} with error: {2!s}'.format(parser.NAME, display_name, exception))
result = self._PARSE_RESULT_FAILURE
except errors.UnableToParseFile as exception:
display_name = parser_mediator.GetDisplayName(file_entry)
logger.debug('{0:s} unable to parse file: {1:s} with error: {2!s}'.format(parser.NAME, display_name, exception))
result = self._PARSE_RESULT_UNSUPPORTED
finally:
parser_mediator.SampleStopTiming(parser.NAME)
parser_mediator.SampleMemoryUsage(parser.NAME)
new_reference_count = parser_mediator.resolver_context.GetFileObjectReferenceCount(file_entry.path_spec)
if (reference_count != new_reference_count):
display_name = parser_mediator.GetDisplayName(file_entry)
logger.warning('[{0:s}] did not explicitly close file-object for file: {1:s}.'.format(parser.NAME, display_name))
return result
|
Parses a file entry with a specific parser.
Args:
parser_mediator (ParserMediator): parser mediator.
parser (BaseParser): parser.
file_entry (dfvfs.FileEntry): file entry.
file_object (Optional[file]): file-like object to parse.
If not set the parser will use the parser mediator to open
the file entry's default data stream as a file-like object.
Returns:
int: parse result which is _PARSE_RESULT_FAILURE if the file entry
could not be parsed, _PARSE_RESULT_SUCCESS if the file entry
successfully was parsed or _PARSE_RESULT_UNSUPPORTED when
UnableToParseFile was raised.
Raises:
TypeError: if parser object is not a supported parser type.
|
codesearchnet
|
def auth_proxy(self, method):
def _proxy(*args, **kwargs):
'The actual proxy, which instantiates and authenticates the API.\n\n Args:\n *args (mixed): Args to send to class instantiation.\n **kwargs (mixed): Kwargs to send to class instantiation.\n\n Returns:\n mixed: The result of the authenticated callable.\n '
return method(self.session, *args, **kwargs)
return _proxy
|
Authentication proxy for API requests.
This is required because the API objects are naive of ``HelpScout``,
so they would otherwise be unauthenticated.
Args:
method (callable): A method call that should be authenticated. It
should accept a ``requests.Session`` as its first parameter,
which should be used for the actual API call.
Returns:
mixed: The results of the authenticated callable.
|
codesearchnet
|
def mds(means, weights, d):
X = dim_reduce(means, weights, d)
if X.shape[0]==2:
return X.dot(weights)
else:
return X.T.dot(weights)
|
Dimensionality reduction using MDS.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells)
|
juraj-google-style
|
def release(self, subnets):
if isinstance(subnets, str) or isinstance(subnets, IPNetwork):
subnets = [subnets]
subnets_iter = (
str(subnet) if isinstance(subnet, IPNetwork) else subnet
for subnet in subnets
)
try:
with self._create_lock():
for subnet in subnets_iter:
self._release(self.create_lease_object_from_subnet(subnet))
except (utils.TimerException, IOError):
raise LagoSubnetLeaseLockException(self.path)
|
Free the lease of the given subnets
Args:
subnets (list of str or netaddr.IPAddress): dotted ipv4 subnet in
CIDR notation (for example ```192.168.200.0/24```) or IPAddress
object.
Raises:
LagoSubnetLeaseException: If subnet is a str and can't be parsed
LagoSubnetLeaseLockException:
If the lock to self.path can't be acquired.
|
juraj-google-style
|
def check_attribute_being_used(config_class, attributes, default_value, source_strings):
attribute_used = False
for attribute in attributes:
for modeling_source in source_strings:
if f'config.{attribute}' in modeling_source or f'getattr(config, "{attribute}"' in modeling_source or f'getattr(self.config, "{attribute}"' in modeling_source or ('TextConfig' in config_class.__name__ and f'config.get_text_config().{attribute}' in modeling_source):
attribute_used = True
elif re.search(f'getattr[ \\t\\v\\n\\r\\f]*\\([ \\t\\v\\n\\r\\f]*(self\\.)?config,[ \\t\\v\\n\\r\\f]*"{attribute}"', modeling_source) is not None:
attribute_used = True
if attribute_used:
break
if attribute_used:
break
attributes_to_allow = ['initializer_range', 'bos_index', 'eos_index', 'pad_index', 'unk_index', 'mask_index', 'image_token_id', 'video_token_id', 'image_seq_length', 'video_seq_length', 'image_size', 'text_config', 'use_cache', 'out_features', 'out_indices', 'sampling_rate', 'use_pretrained_backbone', 'backbone', 'backbone_config', 'use_timm_backbone', 'backbone_kwargs', 'rope_theta', 'partial_rotary_factor', 'pretraining_tp', 'boi_token_id', 'eoi_token_id']
attributes_used_in_generation = ['encoder_no_repeat_ngram_size']
case_allowed = True
if not attribute_used:
case_allowed = False
for attribute in attributes:
if attribute in ['is_encoder_decoder'] and default_value is True:
case_allowed = True
elif attribute in ['tie_word_embeddings'] and default_value is False:
case_allowed = True
elif attribute in attributes_to_allow + attributes_used_in_generation:
case_allowed = True
elif attribute.endswith('_token_id'):
case_allowed = True
if not case_allowed:
allowed_cases = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [])
case_allowed = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
|
Check if any name in `attributes` is used in one of the strings in `source_strings`
Args:
config_class (`type`):
The configuration class for which the arguments in its `__init__` will be checked.
attributes (`List[str]`):
The name of an argument (or attribute) and its variant names if any.
default_value (`Any`):
A default value for the attribute in `attributes` assigned in the `__init__` of `config_class`.
source_strings (`List[str]`):
The python source code strings in the same modeling directory where `config_class` is defined. The file
containing the definition of `config_class` should be excluded.
|
github-repos
|
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
|
Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
|
juraj-google-style
|
def preprocess(train_data_path: str, feature_thres: int, val_data_path: typing.Optional[str]=None) -> typing.Tuple[Dataset, typing.List[str], typing.Optional[Dataset]]:
features = extract_features(train_data_path, feature_thres)
feature_index = dict(((feature, i) for i, feature in enumerate(features)))
train_dataset = load_dataset(train_data_path, feature_index)
val_dataset = load_dataset(val_data_path, feature_index) if val_data_path else None
return (train_dataset, features, val_dataset)
|
Loads entries and translates them into JAX arrays. The boolean matrix of
the input data is represented by row indices and column indices of True values
instead of the matrix itself for memory efficiency, assuming the matrix is
highly sparse. Row and column indices are not guaranteed to be sorted.
Args:
train_data_path (str): A file path to the training data file.
feature_thres (str): A threshold to filter out features whose number of
occurances does not exceed the value.
val_data_path (str, optional): A file path to the validation data file.
Returns:
A tuple of following items:
- train_dataset (Dataset): The training dataset.
- features (List[str]): The list of features.
- val_dataset (Optional[Dataset]): The validation dataset.
This becomes None if val_data_path is None.
|
github-repos
|
def CEscape(text, as_utf8):
Ord = (ord if isinstance(text, six.string_types) else (lambda x: x))
if as_utf8:
return ''.join((_cescape_utf8_to_str[Ord(c)] for c in text))
return ''.join((_cescape_byte_to_str[Ord(c)] for c in text))
|
Escape a bytes string for use in an ascii protocol buffer.
text.encode('string_escape') does not seem to satisfy our needs as it
encodes unprintable characters using two-digit hex escapes whereas our
C++ unescaping function allows hex escapes to be any length. So,
"\0011".encode('string_escape') ends up being "\\x011", which will be
decoded in C++ as a single-character string with char code 0x11.
Args:
text: A byte string to be escaped
as_utf8: Specifies if result should be returned in UTF-8 encoding
Returns:
Escaped string
|
codesearchnet
|
def linear_add(self, other, scale_factor=1.0):
if self.structure != other.structure:
raise ValueError("Adding or subtraction operations can only be "
"performed for volumetric data with the exact "
"same structure.")
data = {}
for k in self.data.keys():
data[k] = self.data[k] + scale_factor * other.data[k]
return VolumetricData(self.structure, data, self._distance_matrix)
|
Method to do a linear sum of volumetric objects. Used by + and -
operators as well. Returns a VolumetricData object containing the
linear sum.
Args:
other (VolumetricData): Another VolumetricData object
scale_factor (float): Factor to scale the other data by.
Returns:
VolumetricData corresponding to self + scale_factor * other.
|
juraj-google-style
|
def GetMessages(self, formatter_mediator, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
document_type = event_values.get('document_type', None)
if document_type:
event_values['document_type'] = self._DOC_TYPES.get(
document_type, 'UNKNOWN')
shared = event_values.get('shared', False)
if shared:
event_values['shared'] = 'Shared'
else:
event_values['shared'] = 'Private'
return self._ConditionalFormatMessages(event_values)
|
Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
|
juraj-google-style
|
def job(self):
if (self._submitter and hasattr(self._submitter, '_job_access')):
return self._submitter._job_access()
return None
|
REST binding for the job associated with the submitted build.
Returns:
Job: REST binding for running job or ``None`` if connection information was not available or no job was submitted.
|
codesearchnet
|
def _ParseAccountsData(self, account_data):
if not account_data:
return {}
lines = [line for line in account_data.splitlines() if line]
user_map = {}
for line in lines:
if not all(ord(c) < 128 for c in line):
self.logger.info('SSH key contains non-ascii character: %s.', line)
continue
split_line = line.split(':', 1)
if len(split_line) != 2:
self.logger.info('SSH key is not a complete entry: %s.', split_line)
continue
user, key = split_line
if self._HasExpired(key):
self.logger.debug('Expired SSH key for user %s: %s.', user, key)
continue
if user not in user_map:
user_map[user] = []
user_map[user].append(key)
logging.debug('User accounts: %s.', user_map)
return user_map
|
Parse the SSH key data into a user map.
Args:
account_data: string, the metadata server SSH key attributes data.
Returns:
dict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.
|
juraj-google-style
|
def Filter(self, function=None):
flat = (lambda x: (x if isinstance(x, str) else ''.join([flat(y) for y in x])))
if (function is None):
function = (lambda row: bool(flat(row.values)))
new_table = self.__class__()
new_table._table = [self.header]
for row in self:
if (function(row) is True):
new_table.Append(row)
return new_table
|
Construct Textable from the rows of which the function returns true.
Args:
function: A function applied to each row which returns a bool. If
function is None, all rows with empty column values are
removed.
Returns:
A new TextTable()
Raises:
TableError: When an invalid row entry is Append()'d
|
codesearchnet
|
def paginate(db_query, items_per_page, offset=0, start_page=1):
return Paginator(db_query, items_per_page, offset=offset, start_page=start_page)
|
Instantiates a Paginator instance for database queries.
Args:
db_query: The SQLAlchemy database query to paginate.
items_per_page: The desired number of items per page.
offset: The number of items to skip when paginating.
start_page: The number of the first page when reporting on page numbers.
|
codesearchnet
|
def RawBytesToScriptHash(raw):
rawh = binascii.unhexlify(raw)
rawhashstr = binascii.unhexlify(bytes(Crypto.Hash160(rawh), encoding='utf-8'))
return UInt160(data=rawhashstr)
|
Get a hash of the provided raw bytes using the ripemd160 algorithm.
Args:
raw (bytes): byte array of raw bytes. e.g. b'\xAA\xBB\xCC'
Returns:
UInt160:
|
codesearchnet
|
def add_note(path, filename='note.txt'):
path = os.path.expanduser(path)
assert os.path.isdir(path), '{} is not a valid directory.'.format(path)
filepath = os.path.join(path, filename)
exists = os.path.isfile(filepath)
try:
subprocess.call([EDITOR, filepath])
except Exception as exc:
logger.error('Editing note failed!')
raise exc
if exists:
print('Note updated at:', filepath)
else:
print('Note created at:', filepath)
|
Opens a txt file at the given path where user can add and save notes.
Args:
path (str): Directory where note will be saved.
filename (str): Name of note. Defaults to "note.txt"
|
codesearchnet
|
def create(self, document_data):
batch = self._client.batch()
batch.create(self, document_data)
write_results = batch.commit()
return _first_write_result(write_results)
|
Create the current document in the Firestore database.
Args:
document_data (dict): Property names and values to use for
creating a document.
Returns:
google.cloud.firestore_v1beta1.types.WriteResult: The
write result corresponding to the committed document. A write
result contains an ``update_time`` field.
Raises:
~google.cloud.exceptions.Conflict: If the document already exists.
|
juraj-google-style
|
def keep_alive(self):
txn_response = self.api.http_request('POST', ('%sfcr:tx' % self.root), data=None, headers=None)
if (txn_response.status_code == 204):
logger.debug(('continuing transaction: %s' % self.root))
self.active = True
self.expires = txn_response.headers['Expires']
return True
elif (txn_response.status_code == 410):
logger.debug(('transaction does not exist: %s' % self.root))
self.active = False
return False
else:
raise Exception(('HTTP %s, could not continue transaction' % txn_response.status_code))
|
Keep current transaction alive, updates self.expires
Args:
None
Return:
None: sets new self.expires
|
codesearchnet
|
def coarse_graining(network, state, internal_indices):
max_phi = float('-inf')
max_coarse_grain = CoarseGrain((), ())
for coarse_grain in all_coarse_grains(internal_indices):
try:
subsystem = MacroSubsystem(network, state, internal_indices,
coarse_grain=coarse_grain)
except ConditionallyDependentError:
continue
phi = compute.phi(subsystem)
if (phi - max_phi) > constants.EPSILON:
max_phi = phi
max_coarse_grain = coarse_grain
return (max_phi, max_coarse_grain)
|
Find the maximal coarse-graining of a micro-system.
Args:
network (Network): The network in question.
state (tuple[int]): The state of the network.
internal_indices (tuple[int]): Nodes in the micro-system.
Returns:
tuple[int, CoarseGrain]: The phi-value of the maximal |CoarseGrain|.
|
juraj-google-style
|
def _as_node_def_input(self):
assert self._op.name
if self.value_index == 0:
return self._op.name
else:
return '%s:%d' % (self._op.name, self.value_index)
|
Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
|
github-repos
|
def serve(args):
port = args.serve_port or PORT
host = "0.0.0.0"
dir_path = Path().absolute()
web_dir = dir_path / "site"
utils.set_routes()
if args.offline:
os.environ["MKINX_OFFLINE"] = "true"
_ = subprocess.check_output("mkdocs build > /dev/null", shell=True)
utils.make_offline()
class MkinxHTTPHandler(SimpleHTTPRequestHandler):
def translate_path(self, path):
location = str(web_dir)
route = location
if len(path) != 0 and path != "/":
for key, loc in utils.get_routes():
if path.startswith(key):
location = loc
path = path[len(key) :]
break
if location[-1] == "/" or not path or path[0] == "/":
route = location + path
else:
route = location + "/" + path
return route.split("?")[0]
success = False
count = 0
print("Waiting for server port...")
try:
while not success:
try:
httpd = socketserver.TCPServer((host, port), MkinxHTTPHandler)
success = True
except OSError:
count += 1
finally:
if not success and count > 20:
s = "port {} seems occupied. Try with {} ? (y/n)"
if "y" in input(s.format(port, port + 1)):
port += 1
count = 0
else:
print("You can specify a custom port with mkinx serve -s")
return
time.sleep(0.5)
except KeyboardInterrupt:
print("Aborting.")
return
httpd.allow_reuse_address = True
print("\nServing at http:
thread = threading.Thread(target=httpd.serve_forever)
thread.daemon = True
thread.start()
event_handler = utils.MkinxFileHandler(
patterns=["*.rst", "*.md", "*.yml", "*.yaml"]
)
observer = Observer()
observer.schedule(event_handler, path=str(dir_path), recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
httpd.server_close()
observer.join()
|
Start a server which will watch .md and .rst files for changes.
If a md file changes, the Home Documentation is rebuilt. If a .rst
file changes, the updated sphinx project is rebuilt
Args:
args (ArgumentParser): flags from the CLI
|
juraj-google-style
|
def _BuildParser():
parser = argparse.ArgumentParser(prog='yapf', description='Formatter for Python code.')
parser.add_argument('-v', '--version', action='version', version='%(prog)s {}'.format(__version__))
diff_inplace_quiet_group = parser.add_mutually_exclusive_group()
diff_inplace_quiet_group.add_argument('-d', '--diff', action='store_true', help='print the diff for the fixed source')
diff_inplace_quiet_group.add_argument('-i', '--in-place', action='store_true', help='make changes to files in place')
diff_inplace_quiet_group.add_argument('-q', '--quiet', action='store_true', help='output nothing and set return value')
lines_recursive_group = parser.add_mutually_exclusive_group()
lines_recursive_group.add_argument('-r', '--recursive', action='store_true', help='run recursively over directories')
lines_recursive_group.add_argument('-l', '--lines', metavar='START-END', action='append', default=None, help='range of lines to reformat, one-based')
parser.add_argument('-e', '--exclude', metavar='PATTERN', action='append', default=None, help='patterns for files to exclude from formatting')
parser.add_argument('--style', action='store', help='specify formatting style: either a style name (for example "pep8" or "google"), or the name of a file with style settings. The default is pep8 unless a %s or %s or %s file located in the same directory as the source or one of its parent directories (for stdin, the current directory is used).' % (style.LOCAL_STYLE, style.SETUP_CONFIG, style.PYPROJECT_TOML))
parser.add_argument('--style-help', action='store_true', help='show style settings and exit; this output can be saved to .style.yapf to make your settings permanent')
parser.add_argument('--no-local-style', action='store_true', help="don't search for local style definition")
parser.add_argument('-p', '--parallel', action='store_true', help='run YAPF in parallel when formatting multiple files.')
parser.add_argument('-m', '--print-modified', action='store_true', help='print out file names of modified files')
parser.add_argument('-vv', '--verbose', action='store_true', help='print out file names while processing')
parser.add_argument('files', nargs='*', help='reads from stdin when no files are specified.')
return parser
|
Constructs the parser for the command line arguments.
Returns:
An ArgumentParser instance for the CLI.
|
github-repos
|
def group_pairs(pair_list):
groupid_to_items = defaultdict(list)
for (item, groupid) in pair_list:
groupid_to_items[groupid].append(item)
return groupid_to_items
|
Groups a list of items using the first element in each pair as the item and
the second element as the groupid.
Args:
pair_list (list): list of 2-tuples (item, groupid)
Returns:
dict: groupid_to_items: maps a groupid to a list of items
SeeAlso:
group_items
|
codesearchnet
|
def store_work_results(self, results, collection, md5):
results['md5'] = md5
results['__time_stamp'] = datetime.datetime.utcnow()
if 'mod_time' not in results:
results['mod_time'] = results['__time_stamp']
try:
self.database[collection].update({'md5':md5}, self.clean_for_storage(results), True)
except pymongo.errors.OperationFailure:
print 'Could not update exising object in capped collection, punting...'
print 'collection: %s md5:%s' % (collection, md5)
|
Store the output results of the worker.
Args:
results: a dictionary.
collection: the database collection to store the results in.
md5: the md5 of sample data to be updated.
|
juraj-google-style
|
def run_pip_command(command_args, pip_version=None, python_version=None):
pip_exe, context = find_pip(pip_version, python_version)
command = [pip_exe] + list(command_args)
if context is None:
return popen(command)
else:
return context.execute_shell(command=command, block=False)
|
Run a pip command.
Args:
command_args (list of str): Args to pip.
Returns:
`subprocess.Popen`: Pip process.
|
juraj-google-style
|
def get_op_or_tensor_by_name(name):
G = tfv1.get_default_graph()
def f(n):
if len(n) >= 3 and n[-2] == ':':
return G.get_tensor_by_name(n)
else:
return G.get_operation_by_name(n)
if not isinstance(name, list):
return f(name)
else:
return list(map(f, name))
|
Get either tf.Operation of tf.Tensor from names.
Args:
name (list[str] or str): names of operations or tensors.
Raises:
KeyError, if the name doesn't exist
|
juraj-google-style
|
def process(self, metrics, config):
LOG.debug('Process called')
for metric in metrics:
metric.tags['instance-id'] = config['instance-id']
return metrics
|
Processes metrics.
This method is called by the Snap deamon during the process phase
of the execution of a Snap workflow. Examples of processing metrics
include applying filtering, max, min, average functions as well as
adding additional context to the metrics to name just a few.
In this example we are adding a tag called 'context' to every metric.
Args:
metrics (obj:`list` of `snap_plugin.v1.Metric`):
List of metrics to be processed.
Returns:
:obj:`list` of `snap_plugin.v1.Metric`:
List of processed metrics.
|
codesearchnet
|
def get_rel_pos(self, q_size: int, k_size: int, rel_pos: tf.Tensor) -> tf.Tensor:
max_rel_dist = int(2 * max(q_size, k_size) - 1)
if rel_pos.shape[0] != max_rel_dist:
rel_pos_resized = tf.image.resize(tf.reshape(rel_pos, (1, rel_pos.shape[0], -1)), size=(max_rel_dist, rel_pos.shape[1]), method='bilinear')
rel_pos_resized = tf.reshape(rel_pos_resized, (-1, max_rel_dist))
else:
rel_pos_resized = rel_pos
q_coords = tf.expand_dims(tf.range(q_size, dtype=tf.float32), 1) * max(k_size / q_size, 1.0)
k_coords = tf.expand_dims(tf.range(k_size, dtype=tf.float32), 0) * max(q_size / k_size, 1.0)
relative_coords = q_coords - k_coords + (k_size - 1) * max(q_size / k_size, 1.0)
return tf.gather(rel_pos_resized, tf.cast(relative_coords, tf.int32))
|
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int):
size of the query.
k_size (int):
size of key k.
rel_pos (`tf.Tensor`):
relative position embeddings (L, channel).
Returns:
Extracted positional embeddings according to relative positions.
|
github-repos
|
def find_by_or(cls, payload):
if (not isinstance(payload, dict)):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
url = os.path.join(cls.URL, 'find_by_or')
payload = {'find_by_or': payload}
cls.debug_logger.debug('Searching Pulsar {} for {}'.format(cls.__name__, json.dumps(payload, indent=4)))
res = requests.post(url=url, json=payload, headers=HEADERS, verify=False)
cls.write_response_html_to_file(res, 'bob.html')
if res:
try:
res = res[cls.MODEL_NAME]
except KeyError:
pass
return res
|
Searches the model in question by OR joining the query parameters.
Implements a Railsy way of looking for a record using a method by the same name and passing
in the query as a string (for the OR operator joining to be specified).
Only the first hit is returned, and there is not particular ordering specified in the server-side
API method.
Args:
payload: `dict`. The attributes of a record to search for by using OR operator joining
for each query parameter.
Returns:
`dict`: The JSON serialization of the record, if any, found by the API call.
`None`: If the API call didnt' return any results.
|
codesearchnet
|
def _is_working_path(dom, path, element):
def i_or_none(el, i):
'\n Return ``el[i]`` if the list is not blank, or None otherwise.\n\n Args:\n el (list, tuple): Any indexable object.\n i (int): Index.\n\n Returns:\n obj: Element at index `i` if `el` is not blank, or ``None``.\n '
if (not el):
return None
return el[i]
path_functions = {'find': (lambda el, index, params: i_or_none(el.find(*params), index)), 'wfind': (lambda el, index, params: i_or_none(el.wfind(*params).childs, index)), 'match': (lambda el, index, params: i_or_none(el.match(*params), index)), 'left_neighbour_tag': (lambda el, index, neigh_data: i_or_none(el.find(neigh_data.tag_name, neigh_data.params, fn=utils.has_neigh(*neigh_data.fn_params, left=True)), index)), 'right_neighbour_tag': (lambda el, index, neigh_data: i_or_none(el.find(neigh_data.tag_name, neigh_data.params, fn=utils.has_neigh(*neigh_data.fn_params, left=False)), index))}
el = None
if isinstance(path, PathCall):
el = path_functions[path.call_type](dom, path.index, path.params)
elif isinstance(path, Chained):
for path in path.chain:
dom = path_functions[path.call_type](dom, path.index, path.params)
if (not dom):
return False
el = dom
else:
raise UserWarning(('Unknown type of path parameters! (%s)' % str(path)))
if (not el):
return False
return (el.getContent().strip() == element.getContent().strip())
|
Check whether the path is working or not.
Aply proper search function interpreting `path` to `dom` and check, if
returned object is `element`. If so, return ``True``, otherwise ``False``.
Args:
dom (obj): HTMLElement DOM.
path (obj): :class:`.PathCall` Instance containing informations about
path and which function it require to obtain element the
path is pointing to.
element (obj): HTMLElement instance used to decide whether `path`
points to correct `element` or not.
Returns:
bool: True if `path` correctly points to proper `element`.
|
codesearchnet
|
def delete(self, teamId):
check_type(teamId, basestring, may_be_none=False)
self._session.delete(API_ENDPOINT + '/' + teamId)
|
Delete a team.
Args:
teamId(basestring): The ID of the team to be deleted.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
|
juraj-google-style
|
def get_repeated_from_extensions(extension_list: List[message.Message], repeated_cls: Type[_T]) -> List[_T]:
result = []
if not extension_list:
return result
url = annotation_utils.get_structure_definition_url(repeated_cls.DESCRIPTOR)
for extension in extension_list:
if cast(Any, extension).url.value == url:
msg = extension_to_message(extension, repeated_cls)
result.append(msg)
return result
|
Extracts matching extensions from extension_list and serializes to protos.
Args:
extension_list: The list of FHIR extensions to examine.
repeated_cls: The type of message to serialize to.
Returns:
A list of protos of instance repeated_cls representing the extensions within
extension_list.
|
github-repos
|
def is_symbolic_tensor(tensor) -> bool:
return isinstance(tensor, SymbolicTensor)
|
Test if `tensor` is a symbolic Tensor.
Args:
tensor: a tensor-like object
Returns:
True if `tensor` is a symbolic tensor (not an eager tensor).
|
github-repos
|
def paginator(limit, offset, record_count, base_uri, page_nav_tpl='&limit={}&offset={}'):
total_pages = int(math.ceil((record_count / limit)))
next_cond = ((limit + offset) <= record_count)
prev_cond = (offset >= limit)
next_page = ((base_uri + page_nav_tpl.format(limit, (offset + limit))) if next_cond else None)
prev_page = ((base_uri + page_nav_tpl.format(limit, (offset - limit))) if prev_cond else None)
return OrderedDict([('total_count', record_count), ('total_pages', total_pages), ('next_page', next_page), ('prev_page', prev_page)])
|
Compute pagination info for collection filtering.
Args:
limit (int): Collection filter limit.
offset (int): Collection filter offset.
record_count (int): Collection filter total record count.
base_uri (str): Collection filter base uri (without limit, offset)
page_nav_tpl (str): Pagination template.
Returns:
A mapping of pagination info.
|
codesearchnet
|
def find_subclasses_in_module(base_classes, module):
subclasses = []
for _, module_member in module.__dict__.items():
if inspect.isclass(module_member):
for base_class in base_classes:
if issubclass(module_member, base_class):
subclasses.append(module_member)
return subclasses
|
Finds the subclasses of the given classes in the given module.
Args:
base_classes: list of classes, the base classes to look for the
subclasses of in the module.
module: module, the module to look for the subclasses in.
Returns:
A list of all of the subclasses found in the module.
|
github-repos
|
def _get_value_type(cls, value):
type_ = cls.typeDict.get(type(value))
if type_ is None:
type_ = 'CLASS' if inspect.isclass(value) else None
if type_ is None and value is None:
type_ = 'STRING'
return type_
|
Infers the type of a given value.
Args:
value: The value whose type needs to be inferred. For 'DURATION' and
'TIMESTAMP', the corresponding Python type is datetime.timedelta and
datetime.datetime respectively. For Python classes, the API type is
just 'STRING' at the moment.
Returns:
One of 'STRING', 'INTEGER', 'FLOAT', 'CLASS', 'DURATION', or
'TIMESTAMP', depending on the type of the value.
|
github-repos
|
def __init__(self, server_port, dump_dir, toggle_watch_on_core_metadata=None):
self.core_metadata_json_strings = []
self.partition_graph_defs = []
self.debug_tensor_values = collections.defaultdict(list)
self._initialize_toggle_watch_state(toggle_watch_on_core_metadata)
grpc_debug_server.EventListenerBaseServicer.__init__(self, server_port, functools.partial(EventListenerTestStreamHandler, dump_dir, self))
self._call_types = []
self._call_keys = []
self._origin_stacks = []
self._origin_id_to_strings = []
self._graph_tracebacks = []
self._graph_versions = []
self._source_files = []
|
Constructor of EventListenerTestServicer.
Args:
server_port: (int) The server port number.
dump_dir: (str) The root directory to which the data files will be
dumped. If empty or None, the received debug data will not be dumped
to the file system: they will be stored in memory instead.
toggle_watch_on_core_metadata: A list of
(node_name, output_slot, debug_op) tuples to toggle the
watchpoint status during the on_core_metadata calls (optional).
|
github-repos
|
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return cls + token_ids_0 + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An ALBERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
github-repos
|
def set_servo_angle(self, goalangle, goaltime, led):
if (self.servomodel==0x06) or (self.servomodel == 0x04):
goalposition = scale(goalangle, -159.9, 159.6, 10627, 22129)
else:
goalposition = scale(goalangle, -150, 150, 21, 1002)
self.set_servo_position(goalposition, goaltime, led)
|
Sets the servo angle (in degrees)
Enable torque using torque_on function before calling this
Args:
goalangle (int): The desired angle in degrees, range -150 to 150
goaltime (int): the time taken to move from present
position to goalposition
led (int): the LED color
0x00 LED off
0x04 GREEN
0x08 BLUE
0x10 RED
|
juraj-google-style
|
def get_request_message(cls, remote_info):
if remote_info in cls.__remote_info_cache:
return cls.__remote_info_cache[remote_info]
else:
return remote_info.request_type()
|
Gets request message or container from remote info.
Args:
remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding
to a method.
Returns:
Either an instance of the request type from the remote or the
ResourceContainer that was cached with the remote method.
|
juraj-google-style
|
def _initial_population_gsa(population_size, solution_size, lower_bounds, upper_bounds):
if ((len(lower_bounds) != solution_size) or (len(upper_bounds) != solution_size)):
raise ValueError('Lower and upper bounds much have a length equal to the problem size.')
return common.make_population(population_size, common.random_real_solution, solution_size, lower_bounds, upper_bounds)
|
Create a random initial population of floating point values.
Args:
population_size: an integer representing the number of solutions in the population.
problem_size: the number of values in each solution.
lower_bounds: a list, each value is a lower bound for the corresponding
part of the solution.
upper_bounds: a list, each value is a upper bound for the corresponding
part of the solution.
Returns:
list; A list of random solutions.
|
codesearchnet
|
def get_node_details(self, node_id: list) -> dict:
if not self._manager:
raise RuntimeError('Only the Swarm manager node can '
'retrieve node details.')
node = self._client.nodes.get(node_id)
return node.attrs
|
Get details of a node.
Only the manager nodes can retrieve details of a node
Args:
node_id (list): List of node ID
Returns:
dict, details of the node
|
juraj-google-style
|
def from_celery(cls, broker_dict):
return BrokerStats(hostname=broker_dict['hostname'], port=broker_dict['port'], transport=broker_dict['transport'], virtual_host=broker_dict['virtual_host'])
|
Create a BrokerStats object from the dictionary returned by celery.
Args:
broker_dict (dict): The dictionary as returned by celery.
Returns:
BrokerStats: A fully initialized BrokerStats object.
|
codesearchnet
|
def simple_repr(obj: Any, attrnames: List[str],
with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:
elements = ["{}={}".format(name, repr(getattr(obj, name)))
for name in attrnames]
return repr_result(obj, elements, with_addr=with_addr, joiner=joiner)
|
Convenience function for :func:`__repr__`.
Works its way through a list of attribute names, and creates a ``repr()``
representation assuming that parameters to the constructor have the same
names.
Args:
obj: object to display
attrnames: names of attributes to include
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
|
juraj-google-style
|
def remove_hallucinated_references(self, text: str) -> str:
lines = text.split('\n')
if len(lines) == 0:
return ''
clean_lines = remove_numbers(lines)
slices = get_slices(lines, clean_lines)
to_delete = []
for slice in slices:
to_delete.append(remove_slice_from_lines(lines, clean_lines, slice))
for to_delete in reversed(to_delete):
text = text.replace(to_delete, '\n\n[MISSING_PAGE_POST]\n\n')
text = re.sub('
return text
|
Remove hallucinated or missing references from the text.
This function identifies and removes references that are marked as missing or hallucinated from the input text.
Args:
text (`str`):
The input text containing references.
Returns:
`str`: The text with hallucinated references removed.
|
github-repos
|
def create(configs):
if not configs:
raise Error(ANDROID_DEVICE_EMPTY_CONFIG_MSG)
elif configs == ANDROID_DEVICE_PICK_ALL_TOKEN:
ads = get_all_instances()
elif not isinstance(configs, list):
raise Error(ANDROID_DEVICE_NOT_LIST_CONFIG_MSG)
elif isinstance(configs[0], dict):
ads = get_instances_with_configs(configs)
elif isinstance(configs[0], str):
ads = get_instances(configs)
else:
raise Error('No valid config found in: %s' % configs)
_start_services_on_ads(ads)
return ads
|
Creates AndroidDevice controller objects.
Args:
configs: Represents configurations for Android devices, this can take one of
the following forms:
* str, only asterisk symbol is accepted, indicating that all connected
Android devices will be used
* A list of dict, each representing a configuration for an Android device.
* A list of str, each representing the serial number of Android device.
Returns:
A list of AndroidDevice objects.
|
github-repos
|
def fastcc(model, epsilon, solver):
reaction_set = set(model.reactions)
subset = set((reaction_id for reaction_id in reaction_set if (model.limits[reaction_id].lower >= 0)))
logger.info('Checking {} irreversible reactions...'.format(len(subset)))
logger.debug('|J| = {}, J = {}'.format(len(subset), subset))
p = FastcoreProblem(model, solver, epsilon=epsilon)
p.lp7(subset)
consistent_subset = set((reaction_id for reaction_id in model.reactions if (abs(p.get_flux(reaction_id)) >= (0.999 * epsilon))))
logger.debug('|A| = {}, A = {}'.format(len(consistent_subset), consistent_subset))
for reaction in (subset - consistent_subset):
(yield reaction)
subset = ((reaction_set - subset) - consistent_subset)
logger.info('Checking reversible reactions...')
logger.debug('|J| = {}, J = {}'.format(len(subset), subset))
flipped = False
singleton = False
while (len(subset) > 0):
logger.info('{} reversible reactions left to check...'.format(len(subset)))
if singleton:
reaction = next(iter(subset))
subset_i = {reaction}
logger.debug('LP3 on {}'.format(subset_i))
p.maximize({reaction: ((- 1) if p.is_flipped(reaction) else 1)})
else:
subset_i = subset
logger.debug('LP7 on {}'.format(subset_i))
p.lp7(subset_i)
consistent_subset.update((reaction_id for reaction_id in subset if abs((p.get_flux(reaction_id) >= (0.999 * epsilon)))))
logger.debug('|A| = {}, A = {}'.format(len(consistent_subset), consistent_subset))
if (not subset.isdisjoint(consistent_subset)):
subset -= consistent_subset
logger.debug('|J| = {}, J = {}'.format(len(subset), subset))
flipped = False
else:
subset_rev_i = (subset_i & model.reversible)
if (flipped or (len(subset_rev_i) == 0)):
flipped = False
if singleton:
subset -= subset_rev_i
for reaction in subset_rev_i:
logger.info('Inconsistent: {}'.format(reaction))
(yield reaction)
else:
singleton = True
else:
p.flip(subset_rev_i)
flipped = True
logger.info('Flipped {} reactions'.format(len(subset_rev_i)))
|
Check consistency of model reactions.
Yield all reactions in the model that are not part of the consistent
subset.
Args:
model: :class:`MetabolicModel` to solve.
epsilon: Flux threshold value.
solver: LP solver instance to use.
|
codesearchnet
|
def take_reference_screenshot(webdriver, file_name):
folder_location = os.path.join(ProjectUtils.get_project_root(),
WebScreenShotUtil.REFERENCE_SCREEN_SHOT_LOCATION)
WebScreenShotUtil.__capture_screenshot(
webdriver, folder_location, file_name + ".png")
|
Captures a screenshot as a reference screenshot.
Args:
webdriver (WebDriver) - Selenium webdriver.
file_name (str) - File name to save screenshot as.
|
juraj-google-style
|
def get_course_current_grades(self, course_id):
resp = self.requester.get(urljoin(self.base_url, '/api/grades/v1/courses/{course_key}/'.format(course_key=course_id)))
resp.raise_for_status()
resp_json = resp.json()
if ('results' in resp_json):
grade_entries = [CurrentGrade(entry) for entry in resp_json['results']]
while (resp_json['next'] is not None):
resp = self.requester.get(resp_json['next'])
resp.raise_for_status()
resp_json = resp.json()
grade_entries.extend((CurrentGrade(entry) for entry in resp_json['results']))
else:
grade_entries = [CurrentGrade(entry) for entry in resp_json]
return CurrentGradesByCourse(grade_entries)
|
Returns a CurrentGradesByCourse object for all users in the specified course.
Args:
course_id (str): an edX course ids.
Returns:
CurrentGradesByCourse: object representing the student current grades
Authorization:
The authenticated user must have staff permissions to see grades for all users
in a course.
|
codesearchnet
|
def extract_q_df(self, state_key, action_key):
q = 0.0
if (self.q_df is None):
self.save_q_df(state_key, action_key, q)
return q
q_df = self.q_df[(self.q_df.state_key == state_key)]
q_df = q_df[(q_df.action_key == action_key)]
if q_df.shape[0]:
q = float(q_df['q_value'])
else:
self.save_q_df(state_key, action_key, q)
return q
|
Extract Q-Value from `self.q_df`.
Args:
state_key: The key of state.
action_key: The key of action.
Returns:
Q-Value.
|
codesearchnet
|
def cache_value(self, api_name, key, value):
self._cache.setdefault(api_name, {})
self._cache[api_name][key] = value
|
Add the value of an API call to the cache.
Args:
api_name: a string name of the API. Keys and values are segmented by api_name.
key: a string key for the specific call.
value: the value of the call using the specific key
|
juraj-google-style
|
def completer(*commands):
def decorated_func(f):
f.__complete_targets__ = list(commands)
return f
return decorated_func
|
Decorate a function to be the completer function of commands.
Arguments:
commands: Names of command that should trigger this function object.
------------------------------
Interface of completer methods:
@completer('some-other_command')
def complete_foo(self, args, text):
'''
Arguments:
args: A list of arguments. The first token, i.e, the command
itself, is not included.
text: The scope of text being replaced.
A few examples, with '$' representing the shell prompt and
'|' represents the cursor position:
$ |
$ history|
handled by the __driver_completer() method
$ history |
args = []
text = ''
$ history cle|
args = []
text = 'cle'
$ history clear |
args = ['clear']
text = ''
Returns:
A list of candidates. If no candidate was found, return
either [] or None.
'''
pass
|
codesearchnet
|
def tf_action_exploration(self, action, exploration, action_spec):
action_shape = tf.shape(input=action)
exploration_value = exploration.tf_explore(
episode=self.global_episode,
timestep=self.global_timestep,
shape=action_spec['shape']
)
exploration_value = tf.expand_dims(input=exploration_value, axis=0)
if action_spec['type'] == 'bool':
action = tf.where(
condition=(tf.random_uniform(shape=action_shape) < exploration_value),
x=(tf.random_uniform(shape=action_shape) < 0.5),
y=action
)
elif action_spec['type'] == 'int':
action = tf.where(
condition=(tf.random_uniform(shape=action_shape) < exploration_value),
x=tf.random_uniform(shape=action_shape, maxval=action_spec['num_actions'], dtype=util.tf_dtype('int')),
y=action
)
elif action_spec['type'] == 'float':
noise = tf.random_normal(shape=action_shape, dtype=util.tf_dtype('float'))
action += noise * exploration_value
if 'min_value' in action_spec:
action = tf.clip_by_value(
t=action,
clip_value_min=action_spec['min_value'],
clip_value_max=action_spec['max_value']
)
return action
|
Applies optional exploration to the action (post-processor for action outputs).
Args:
action (tf.Tensor): The original output action tensor (to be post-processed).
exploration (Exploration): The Exploration object to use.
action_spec (dict): Dict specifying the action space.
Returns:
The post-processed action output tensor.
|
juraj-google-style
|
def refund(request, invoice_id):
current_invoice = InvoiceController.for_id_or_404(invoice_id)
try:
current_invoice.refund()
messages.success(request, 'This invoice has been refunded.')
except ValidationError as ve:
messages.error(request, ve)
return redirect('invoice', invoice_id)
|
Marks an invoice as refunded and requests a credit note for the
full amount paid against the invoice.
This view requires a login, and the logged in user must be staff.
Arguments:
invoice_id (castable to int): The ID of the invoice to refund.
Returns:
redirect:
Redirects to ``invoice``.
|
codesearchnet
|
def _OpenFileObject(self, path_spec):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
fvde_volume = pyfvde.volume()
fvde.FVDEVolumeOpen(
fvde_volume, path_spec, file_object, resolver.Resolver.key_chain)
return fvde_volume
|
Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FileIO: a file-like object.
Raises:
PathSpecError: if the path specification is incorrect.
|
juraj-google-style
|
class MeanAbsoluteError(MeanMetricWrapper):
def __init__(self, name='mean_absolute_error', dtype=None):
super(MeanAbsoluteError, self).__init__(mean_absolute_error, name, dtype=dtype)
|
Computes the mean absolute error between the labels and predictions.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanAbsoluteError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.25
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanAbsoluteError()])
```
|
github-repos
|
def normalize_build_spec(self, build_spec):
for cmd in build_spec:
if not cmd:
continue
cmd_name = cmd.keys()[0]
cmd_options = cmd.values()[0]
cmd_handler = self.get_cmd_handler(cmd_name)
self.build_cmds.append(cmd_handler(cmd_options))
|
Convert a build spec into a list of Command tuples.
After running this command, self.build_cmds should hold all
the commands that should be run on the disk in self.disk_path.
Args:
build_spec (dict): The buildspec part from the init file
|
juraj-google-style
|
def begin_episode(self, agent_indices):
with tf.name_scope('begin_episode/'):
if (self._last_state is None):
reset_state = tf.no_op()
else:
reset_state = utility.reinit_nested_vars(self._last_state, agent_indices)
reset_buffer = self._current_episodes.clear(agent_indices)
with tf.control_dependencies([reset_state, reset_buffer]):
return tf.constant('')
|
Reset the recurrent states and stored episode.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
|
codesearchnet
|
def mul(left, right):
from .mv_mul import MvMul
length = max(left, right)
if length == 1:
return Mul(left, right)
return MvMul(left, right)
|
Distribution multiplication.
Args:
left (Dist, numpy.ndarray) : left hand side.
right (Dist, numpy.ndarray) : right hand side.
|
juraj-google-style
|
def _wrap_response(self, status=None, **kwargs):
kwargs['status'] = status if status is not None else self._status.OK
return kwargs
|
Convenience method to wrap a status with any key word args.
Args:
status (enum): enum response status, defaults to OK
Returns:
dict: inlcudes a 'status' attribute and any key word arguments
|
juraj-google-style
|
def _analyze_input_data(self, entry, k, depth=1, max_depth=3, max_list=3):
class _elementInfo(object):
def __init__(self, el, pos, depth=0, max_list=3):
self.shape = ""
self.type = type(el).__name__
self.dtype = ""
self.range = ""
self.sub_elements = []
self.ident = " " * (depth * 2)
self.pos = pos
numpy_scalar_types = list(itertools.chain(*np.sctypes.values()))
if isinstance(el, (int, float, bool)):
self.range = " with value {}".format(el)
elif type(el) is np.ndarray:
self.shape = " of shape {}".format(el.shape)
self.dtype = ":{}".format(str(el.dtype))
self.range = " in range [{}, {}]".format(el.min(), el.max())
elif type(el) in numpy_scalar_types:
self.range = " with value {}".format(el)
elif isinstance(el, (list)):
self.shape = " of len {}".format(len(el))
if depth < max_depth:
for k, subel in enumerate(el):
if k < max_list:
self.sub_elements.append(_elementInfo(subel, k, depth + 1, max_list))
else:
self.sub_elements.append(" " * ((depth + 1) * 2) + '...')
break
else:
if len(el) > 0:
self.sub_elements.append(" " * ((depth + 1) * 2) + ' ...')
def __str__(self):
strings = []
vals = (self.ident, self.pos, self.type, self.dtype, self.shape, self.range)
strings.append("{}{}: {}{}{}{}".format(*vals))
for k, el in enumerate(self.sub_elements):
strings.append(str(el))
return "\n".join(strings)
return str(_elementInfo(entry, k, depth, max_list))
|
Gather useful debug information from a datapoint.
Args:
entry: the datapoint component
k (int): index of this component in current datapoint
depth (int, optional): recursion depth
max_depth, max_list: same as in :meth:`__init__`.
Returns:
string: debug message
|
juraj-google-style
|
def write_log(self, message):
if self._is_write_log and self.log_file and not self.log_file.closed:
self.log_file.write(message + '\n')
|
Write a line to the VM instruction log file.
Args:
message (str): string message to write to file.
|
juraj-google-style
|
def to_valid_density_matrix(density_matrix_rep: Union[(int, np.ndarray)], num_qubits: int, dtype: Type[np.number]=np.complex64) -> np.ndarray:
if (isinstance(density_matrix_rep, np.ndarray) and (density_matrix_rep.ndim == 2)):
if (density_matrix_rep.shape != ((2 ** num_qubits), (2 ** num_qubits))):
raise ValueError('Density matrix was not square and of size 2 ** num_qubit, instead was {}'.format(density_matrix_rep.shape))
if (not np.allclose(density_matrix_rep, np.transpose(np.conj(density_matrix_rep)))):
raise ValueError('The density matrix is not hermitian.')
if (not np.isclose(np.trace(density_matrix_rep), 1.0)):
raise ValueError('Density matrix did not have trace 1 but instead {}'.format(np.trace(density_matrix_rep)))
if (density_matrix_rep.dtype != dtype):
raise ValueError('Density matrix had dtype {} but expected {}'.format(density_matrix_rep.dtype, dtype))
if (not np.all((np.linalg.eigvalsh(density_matrix_rep) > (- 1e-08)))):
raise ValueError('The density matrix is not positive semidefinite.')
return density_matrix_rep
state_vector = wave_function.to_valid_state_vector(density_matrix_rep, num_qubits, dtype)
return np.outer(state_vector, np.conj(state_vector))
|
Verifies the density_matrix_rep is valid and converts it to ndarray form.
This method is used to support passing a matrix, a vector (wave function),
or a computational basis state as a representation of a state.
Args:
density_matrix_rep: If an numpy array, if it is of rank 2 (a matrix),
then this is the density matrix. If it is a numpy array of rank 1
(a vector) then this is a wave function. If this is an int,
then this is the computation basis state.
num_qubits: The number of qubits for the density matrix. The
density_matrix_rep must be valid for this number of qubits.
dtype: The numpy dtype of the density matrix, will be used when creating
the state for a computational basis state (int), or validated
against if density_matrix_rep is a numpy array.
Returns:
A numpy matrix corresponding to the density matrix on the given number
of qubits.
Raises:
ValueError if the density_matrix_rep is not valid.
|
codesearchnet
|
def _ip_int_from_string(cls, ip_str):
if not ip_str:
raise AddressValueError('Address cannot be empty')
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
return _compat_int_from_byte_vals(
map(cls._parse_octet, octets), 'big')
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
|
Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
|
juraj-google-style
|
def copy_from_dict(self, attributes):
for attribute_name, attribute_value in attributes.items():
if attribute_name[0] == '_':
continue
setattr(self, attribute_name, attribute_value)
|
Copies the attribute container from a dictionary.
Args:
attributes (dict[str, object]): attribute values per name.
|
juraj-google-style
|
def filter_by_col(self, column_names):
if not isinstance(column_names, (list, tuple)):
column_names = [column_names, ]
sheet = self.table
identity = self.db_sheet_cols.id
exists = self.db_sheet_cols.exists
criterion = True
for column_name in column_names:
_criterion = sheet.loc[:, column_name] > 0
_exists = sheet.loc[:, exists] > 0
criterion = criterion & _criterion & _exists
return sheet.loc[criterion, identity].values.astype(int)
|
filters sheet/table by columns (input is column header)
The routine returns the serial numbers with values>1 in the selected
columns.
Args:
column_names (list): the column headers.
Returns:
pandas.DataFrame
|
juraj-google-style
|
def write(self, inputdata):
if VERBOSE:
_print_out('\nDummy_serial: Writing to port. Given:' + repr(inputdata) + '\n')
if sys.version_info[0] > 2:
if not type(inputdata) == bytes:
raise TypeError('The input must be type bytes. Given:' + repr(inputdata))
inputstring = str(inputdata, encoding='latin1')
else:
inputstring = inputdata
if not self._isOpen:
raise IOError('Dummy_serial: Trying to write, but the port is not open. Given:' + repr(inputdata))
try:
response = RESPONSES[inputstring]
except:
response = DEFAULT_RESPONSE
self._waiting_data = response
|
Write to a port on dummy_serial.
Args:
inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response
for subsequent read operations.
Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**.
|
juraj-google-style
|
def _find_mapreduce_yaml(start, checked):
dir = start
while (dir not in checked):
checked.add(dir)
for mr_yaml_name in MR_YAML_NAMES:
yaml_path = os.path.join(dir, mr_yaml_name)
if os.path.exists(yaml_path):
return yaml_path
dir = os.path.dirname(dir)
return None
|
Traverse the directory tree identified by start until a directory already
in checked is encountered or the path of mapreduce.yaml is found.
Checked is present both to make loop termination easy to reason about and so
that the same directories do not get rechecked.
Args:
start: the path to start in and work upward from
checked: the set of already examined directories
Returns:
the path of mapreduce.yaml file or None if not found.
|
codesearchnet
|
def receiveds_not_parsed(receiveds):
log.debug('Receiveds for this email are not parsed')
output = []
counter = Counter()
for i in receiveds[::(- 1)]:
j = {'raw': i.strip()}
j['hop'] = (counter['hop'] + 1)
counter['hop'] += 1
output.append(j)
else:
return output
|
If receiveds are not parsed, makes a new structure with raw
field. It's useful to have the same structure of receiveds
parsed.
Args:
receiveds (list): list of raw receiveds headers
Returns:
a list of not parsed receiveds headers with first hop in first position
|
codesearchnet
|
def Decode(self, encoded_data):
try:
decoded_data = base64.b64decode(encoded_data)
except (TypeError, binascii.Error) as exception:
raise errors.BackEndError(
'Unable to decode base64 stream with error: {0!s}.'.format(
exception))
return decoded_data, b''
|
Decode the encoded data.
Args:
encoded_data (byte): encoded data.
Returns:
tuple(bytes, bytes): decoded data and remaining encoded data.
Raises:
BackEndError: if the base64 stream cannot be decoded.
|
juraj-google-style
|
def __init__(self, path):
super(SQLiteStorageFileReader, self).__init__(path)
self._storage_file = sqlite_file.SQLiteStorageFile()
self._storage_file.Open(path=path)
|
Initializes a storage reader.
Args:
path (str): path to the input file.
|
juraj-google-style
|
def func(self, volume):
return self._func(np.array(volume), self.eos_params)
|
The equation of state function with the paramters other than volume set
to the ones obtained from fitting.
Args:
volume (list/numpy.array)
Returns:
numpy.array
|
juraj-google-style
|
def get_browser(browser_name, capabilities=None, **options):
if (browser_name == 'chrome'):
return webdriver.Chrome(desired_capabilities=capabilities, **options)
if (browser_name == 'edge'):
return webdriver.Edge(capabilities=capabilities, **options)
if (browser_name in ['ff', 'firefox']):
return webdriver.Firefox(capabilities=capabilities, **options)
if (browser_name in ['ie', 'internet_explorer']):
return webdriver.Ie(capabilities=capabilities, **options)
if (browser_name == 'phantomjs'):
return webdriver.PhantomJS(desired_capabilities=capabilities, **options)
if (browser_name == 'remote'):
return webdriver.Remote(desired_capabilities=capabilities, **options)
if (browser_name == 'safari'):
return webdriver.Safari(desired_capabilities=capabilities, **options)
raise ValueError('unsupported browser: {}'.format(repr(browser_name)))
|
Returns an instance of the given browser with the given capabilities.
Args:
browser_name (str): The name of the desired browser.
capabilities (Dict[str, str | bool], optional): The desired capabilities of the browser.
Defaults to None.
options: Arbitrary keyword arguments for the browser-specific subclass of
:class:`webdriver.Remote`.
Returns:
WebDriver: An instance of the desired browser.
|
codesearchnet
|
def stop(self, wait=True):
assert not self._stopped, "Already stopped"
self._stopped = True
self._tornado.stop(wait)
self._http.stop()
|
Stop the Bokeh Server.
This stops and removes all Bokeh Server ``IOLoop`` callbacks, as well
as stops the ``HTTPServer`` that this instance was configured with.
Args:
fast (bool):
Whether to wait for orderly cleanup (default: True)
Returns:
None
|
juraj-google-style
|
def _kl_beta_beta(d1, d2, name=None):
def delta(fn, is_property=True):
fn1 = getattr(d1, fn)
fn2 = getattr(d2, fn)
return (fn2 - fn1) if is_property else (fn2() - fn1())
with tf.name_scope(name or "kl_beta_beta"):
return (delta("_log_normalization", is_property=False) -
tf.math.digamma(d1.concentration1) * delta("concentration1") -
tf.math.digamma(d1.concentration0) * delta("concentration0") +
(tf.math.digamma(d1.total_concentration) *
delta("total_concentration")))
|
Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.
Args:
d1: instance of a Beta distribution object.
d2: instance of a Beta distribution object.
name: (optional) Name to use for created operations.
default is "kl_beta_beta".
Returns:
Batchwise KL(d1 || d2)
|
juraj-google-style
|
def __live_receivers(signal):
with __lock:
__purge()
receivers = [funcref() for funcref in __receivers[signal]]
return receivers
|
Return all signal handlers that are currently still alive for the
input `signal`.
Args:
signal: A signal name.
Returns:
A list of callable receivers for the input signal.
|
juraj-google-style
|
def _might_have_parameter(fn_or_cls, arg_name):
if inspect.isclass(fn_or_cls):
fn = _find_class_construction_fn(fn_or_cls)
else:
fn = fn_or_cls
while hasattr(fn, '__wrapped__'):
fn = fn.__wrapped__
arg_spec = _get_cached_arg_spec(fn)
if six.PY3:
if arg_spec.varkw:
return True
return ((arg_name in arg_spec.args) or (arg_name in arg_spec.kwonlyargs))
else:
if arg_spec.keywords:
return True
return (arg_name in arg_spec.args)
|
Returns True if `arg_name` might be a valid parameter for `fn_or_cls`.
Specifically, this means that `fn_or_cls` either has a parameter named
`arg_name`, or has a `**kwargs` parameter.
Args:
fn_or_cls: The function or class to check.
arg_name: The name fo the parameter.
Returns:
Whether `arg_name` might be a valid argument of `fn`.
|
codesearchnet
|
def _get_napp_key(self, key, user=None, napp=None):
if (user is None):
user = self.user
if (napp is None):
napp = self.napp
kytos_json = (((self._installed / user) / napp) / 'kytos.json')
try:
with kytos_json.open() as file_descriptor:
meta = json.load(file_descriptor)
return meta[key]
except (FileNotFoundError, json.JSONDecodeError, KeyError):
return ''
|
Return a value from kytos.json.
Args:
user (string): A Username.
napp (string): A NApp name
key (string): Key used to get the value within kytos.json.
Returns:
meta (object): Value stored in kytos.json.
|
codesearchnet
|
def get_etree_root(doc, encoding=None):
tree = get_etree(doc, encoding)
root = tree.getroot()
return root
|
Returns an instance of lxml.etree._Element for the given `doc` input.
Args:
doc: The input XML document. Can be an instance of
``lxml.etree._Element``, ``lxml.etree._ElementTree``, a file-like
object, or a string filename.
encoding: The character encoding of `doc`. If ``None``, an attempt
will be made to determine the character encoding by the XML
parser.
Returns:
An ``lxml.etree._Element`` instance for `doc`.
Raises:
IOError: If `doc` cannot be found.
lxml.ParseError: If `doc` is a malformed XML document.
|
codesearchnet
|
def _ParseFileData(self, knowledge_base, file_object):
text_file_object = dfvfs_text_file.TextFile(file_object, encoding='utf-8')
if not knowledge_base.GetHostname():
hostname = text_file_object.readline()
hostname = hostname.strip()
if hostname:
hostname_artifact = artifacts.HostnameArtifact(name=hostname)
knowledge_base.SetHostname(hostname_artifact)
|
Parses file content (data) for a hostname preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_object (dfvfs.FileIO): file-like object that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
|
juraj-google-style
|
def parse_line(line):
columns = line.split()
token = columns.pop(0)
values = [float(column) for column in columns]
return (token, values)
|
Parses a line of a text embedding file.
Args:
line: (str) One line of the text embedding file.
Returns:
A token string and its embedding vector in floats.
|
codesearchnet
|
def _compose_custom_getters(getter_a, getter_b):
if not getter_a:
return getter_b
if not getter_b:
return getter_a
def getter_fn(getter, *args, **kwargs):
return getter_b(functools.partial(getter_a, getter), *args, **kwargs)
return getter_fn
|
Compose two custom getters.
Example use:
tf.get_variable_scope().set_custom_getter(
compose_custom_getters(tf.get_variable_scope().custom_getter, new_getter))
This composes getters in the same way as creating a new variable scope with
the new_getter, but it does not actually create a new variable scope.
Args:
getter_a: a custom getter - generally from the existing variable scope.
getter_b: a custom getter
Returns:
a custom getter
|
juraj-google-style
|
def get_global_vars(func):
closure = getclosurevars(func)
if closure['nonlocal']:
raise TypeError(("Can't launch a job with closure variables: %s" % closure['nonlocals'].keys()))
globalvars = dict(modules={}, functions={}, vars={})
for (name, value) in closure['global'].items():
if inspect.ismodule(value):
globalvars['modules'][name] = value.__name__
elif (inspect.isfunction(value) or inspect.ismethod(value)):
globalvars['functions'][name] = value
else:
globalvars['vars'][name] = value
return globalvars
|
Store any methods or variables bound from the function's closure
Args:
func (function): function to inspect
Returns:
dict: mapping of variable names to globally bound VARIABLES
|
codesearchnet
|
def register_loss_scale_wrapper(optimizer_cls, wrapper_fn, wrapper_cls=None):
_REGISTERED_WRAPPER_OPTIMIZER_CLS[optimizer_cls] = (wrapper_fn, wrapper_cls or wrapper_fn)
|
Registers a loss scale optimizer wrapper.
`tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite`
automatically wraps an optimizer with an optimizer wrapper that performs loss
scaling. This function registers a
`(base_cls, wrapper_fn, wrapper_cls)` triple
that is used by `enable_mixed_precision_graph_rewrite`, where
`wrapper_fn` is called to create a `wrapper_cls` instance that wraps an
`optimizer_cls` instance.
Args:
optimizer_cls: A base optimizer class, e.g. `tf.keras.optimizers.Optimizer`.
wrapper_fn: A function that takes in arguments "optimizer" and
"loss_scale", and returns a loss scale optimizer of type "wrapper_cls"
that wraps "optimizer".
wrapper_cls: A loss scale optimizer class. Defaults to `wrapper_fn`, in
which case `wrapper_fn` should be a loss scale optimizer class whose
constructor takes in arguments "optimizer" and "loss_scale".
|
github-repos
|
def _get_syslog_format(event_type):
syslog_format_template = get_template('syslog_format.json')
fmt = syslog_format_template.render(
event_type=event_type,
host=dbconfig.get('instance_name', default='local')
)
return json.dumps(json.loads(fmt))
|
Take an event type argument and return a python logging format
In order to properly format the syslog messages to current standard, load the template and perform necessary
replacements and return the string.
Args:
event_type (str): Event type name
Returns:
`str`
|
juraj-google-style
|
def _get_measure_outcome(self, qubit):
axis = list(range(self._number_of_qubits))
axis.remove(self._number_of_qubits - 1 - qubit)
probabilities = np.sum(np.abs(self._statevector) ** 2, axis=tuple(axis))
random_number = self._local_random.rand()
if random_number < probabilities[0]:
return '0', probabilities[0]
return '1', probabilities[1]
|
Simulate the outcome of measurement of a qubit.
Args:
qubit (int): the qubit to measure
Return:
tuple: pair (outcome, probability) where outcome is '0' or '1' and
probability is the probability of the returned outcome.
|
juraj-google-style
|
def _nutation(date, eop_correction=True, terms=106):
ttt = date.change_scale('TT').julian_century
r = 360.0
epsilon_bar = (((84381.448 - (46.815 * ttt)) - (0.00059 * (ttt ** 2))) + (0.001813 * (ttt ** 3)))
epsilon_bar /= 3600.0
m_m = (((134.96298139 + (((1325 * r) + 198.8673981) * ttt)) + (0.0086972 * (ttt ** 2))) + (1.78e-05 * (ttt ** 3)))
m_s = (((357.52772333 + (((99 * r) + 359.05034) * ttt)) - (0.0001603 * (ttt ** 2))) - (3.3e-06 * (ttt ** 3)))
u_m_m = (((93.27191028 + (((1342 * r) + 82.0175381) * ttt)) - (0.0036825 * (ttt ** 2))) + (3.1e-06 * (ttt ** 3)))
d_s = (((297.85036306 + (((1236 * r) + 307.11148) * ttt)) - (0.0019142 * (ttt ** 2))) + (5.3e-06 * (ttt ** 3)))
om_m = (((125.04452222 - (((5 * r) + 134.1362608) * ttt)) + (0.0020708 * (ttt ** 2))) + (2.2e-06 * (ttt ** 3)))
delta_psi = 0.0
delta_eps = 0.0
for (integers, reals) in _tab(terms):
(a1, a2, a3, a4, a5) = integers
(A, B, C, D) = (np.array(list(reals)) / 36000000.0)
a_p = (((((a1 * m_m) + (a2 * m_s)) + (a3 * u_m_m)) + (a4 * d_s)) + (a5 * om_m))
delta_psi += ((A + (B * ttt)) * np.sin(np.deg2rad(a_p)))
delta_eps += ((C + (D * ttt)) * np.cos(np.deg2rad(a_p)))
if eop_correction:
delta_eps += (date.eop.deps / 3600000.0)
delta_psi += (date.eop.dpsi / 3600000.0)
return (epsilon_bar, delta_psi, delta_eps)
|
Model 1980 of nutation as described in Vallado p. 224
Args:
date (beyond.utils.date.Date)
eop_correction (bool): set to ``True`` to include model correction
from 'finals' files.
terms (int)
Return:
tuple : 3-elements, all floats in degrees
1. ̄ε
2. Δψ
3. Δε
Warning:
The good version of the nutation model can be found in the **errata**
of the 4th edition of *Fundamentals of Astrodynamics and Applications*
by Vallado.
|
codesearchnet
|
def regex_find(orig_screen_output, regex, font_attr):
new_screen_output = RichTextLines(orig_screen_output.lines, font_attr_segs=copy.deepcopy(orig_screen_output.font_attr_segs), annotations=orig_screen_output.annotations)
try:
re_prog = re.compile(regex)
except re.error:
raise ValueError('Invalid regular expression: "%s"' % regex)
regex_match_lines = []
for i, line in enumerate(new_screen_output.lines):
find_it = re_prog.finditer(line)
match_segs = []
for match in find_it:
match_segs.append((match.start(), match.end(), font_attr))
if match_segs:
if i not in new_screen_output.font_attr_segs:
new_screen_output.font_attr_segs[i] = match_segs
else:
new_screen_output.font_attr_segs[i].extend(match_segs)
new_screen_output.font_attr_segs[i] = sorted(new_screen_output.font_attr_segs[i], key=lambda x: x[0])
regex_match_lines.append(i)
new_screen_output.annotations[REGEX_MATCH_LINES_KEY] = regex_match_lines
return new_screen_output
|
Perform regex match in rich text lines.
Produces a new RichTextLines object with font_attr_segs containing highlighted
regex matches.
Example use cases include:
1) search for specific items in a large list of items, and
2) search for specific numerical values in a large tensor.
Args:
orig_screen_output: The original RichTextLines, in which the regex find
is to be performed.
regex: The regex used for matching.
font_attr: Font attribute used for highlighting the found result.
Returns:
A modified copy of orig_screen_output.
Raises:
ValueError: If input str regex is not a valid regular expression.
|
github-repos
|
def tersoff_potential(self, structure):
bv = BVAnalyzer()
el = [site.specie.symbol for site in structure]
valences = bv.get_valences(structure)
el_val_dict = dict(zip(el, valences))
gin = "species \n"
qerfstring = "qerfc\n"
for key in el_val_dict.keys():
if key != "O" and el_val_dict[key] % 1 != 0:
raise SystemError("Oxide has mixed valence on metal")
specie_string = key + " core " + str(el_val_dict[key]) + "\n"
gin += specie_string
qerfstring += key + " " + key + " 0.6000 10.0000 \n"
gin += "
met_oxi_ters = TersoffPotential().data
for key in el_val_dict.keys():
if key != "O":
metal = key + "(" + str(int(el_val_dict[key])) + ")"
ters_pot_str = met_oxi_ters[metal]
gin += ters_pot_str
gin += qerfstring
return gin
|
Generate the species, tersoff potential lines for an oxide structure
Args:
structure: pymatgen.core.structure.Structure
|
juraj-google-style
|
def block_depth(self):
return self._block_depth
|
Depth of recursively defined circulant blocks defining this `Operator`.
With `A` the dense representation of this `Operator`,
`block_depth = 1` means `A` is symmetric circulant. For example,
```
A = |w z y x|
|x w z y|
|y x w z|
|z y x w|
```
`block_depth = 2` means `A` is block symmetric circulant with symmetric
circulant blocks. For example, with `W`, `X`, `Y`, `Z` symmetric circulant,
```
A = |W Z Y X|
|X W Z Y|
|Y X W Z|
|Z Y X W|
```
`block_depth = 3` means `A` is block symmetric circulant with block
symmetric circulant blocks.
Returns:
Python `integer`.
|
github-repos
|
def needle_statistics_alignio(infile):
alignments = list(AlignIO.parse(infile, 'emboss'))
if (len(alignments) > 1):
raise ValueError('Alignment file contains more than one pairwise alignment')
alignment = alignments[0]
with open(infile) as f:
line = f.readline()
for i in range(len(alignments)):
while (line.rstrip() != '
line = f.readline()
if (not line):
raise StopIteration
while (line[0] == '
parts = line[1:].split(':', 1)
key = parts[0].lower().strip()
if (key == 'identity'):
ident_parse = parts[1].strip().replace('(', '').replace(')', '').replace('%', '').split()
ident_num = int(ident_parse[0].split('/')[0])
ident_percent = float(ident_parse[1])
alignment.annotations['identity'] = ident_num
alignment.annotations['percent_identity'] = ident_percent
if (key == 'similarity'):
sim_parse = parts[1].strip().replace('(', '').replace(')', '').replace('%', '').split()
sim_num = int(sim_parse[0].split('/')[0])
sim_percent = float(sim_parse[1])
alignment.annotations['similarity'] = sim_num
alignment.annotations['percent_similarity'] = sim_percent
if (key == 'gaps'):
gap_parse = parts[1].strip().replace('(', '').replace(')', '').replace('%', '').split()
gap_num = int(gap_parse[0].split('/')[0])
gap_percent = float(gap_parse[1])
alignment.annotations['gaps'] = gap_num
alignment.annotations['percent_gaps'] = gap_percent
if (key == 'score'):
score = float(parts[1].strip())
alignment.annotations['score'] = score
line = f.readline()
return alignment
|
Reads in a needle alignment file and returns an AlignIO object with annotations
Args:
infile (str): Alignment file name
Returns:
AlignIO: annotated AlignIO object
|
codesearchnet
|
def _spectrum_to_circulant_1d(self, spectrum, shape, dtype):
spectrum = _to_complex(spectrum)
spectrum_shape = self._shape_to_spectrum_shape(shape)
domain_dimension = spectrum_shape[-1]
if not domain_dimension:
return array_ops.zeros(shape, dtype)
matrix_rows = []
for m in range(domain_dimension):
x = np.zeros([domain_dimension])
x[m] = 1.0
fft_x = fft_ops.fft(math_ops.cast(x, spectrum.dtype))
h_convolve_x = fft_ops.ifft(spectrum * fft_x)
matrix_rows.append(h_convolve_x)
matrix = array_ops_stack.stack(matrix_rows, axis=-1)
return math_ops.cast(matrix, dtype)
|
Creates a circulant matrix from a spectrum.
Intentionally done in an explicit yet inefficient way. This provides a
cross check to the main code that uses fancy reshapes.
Args:
spectrum: Float or complex `Tensor`.
shape: Python list. Desired shape of returned matrix.
dtype: Type to cast the returned matrix to.
Returns:
Circulant (batch) matrix of desired `dtype`.
|
github-repos
|
def serialize_to_json(self, name, datas):
data_object = datas.get('object', None)
if (data_object is None):
msg = "JSON reference '{}' lacks of required 'object' variable"
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content
|
Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content.
|
codesearchnet
|
def _set_root(self, request):
if request.state_root:
root = request.state_root
else:
head = self._get_chain_head()
root = head.state_root_hash
try:
self._tree.set_merkle_root(root)
except KeyError as e:
LOGGER.debug('Unable to find root "%s" in database', e)
raise _ResponseFailed(self._status.NO_ROOT)
return root
|
Sets the root of the merkle tree, returning any head id used.
Note:
This method will fail if `_tree` has not been set
Args:
request (object): The parsed protobuf request object
Returns:
str: the state root of the head block used to specify the root
Raises:
ResponseFailed: Failed to set the root if the merkle tree
|
juraj-google-style
|
def set_consistent(self, consistent_config):
self.topology._add_job_control_plane()
self.oport.operator.consistent(consistent_config)
return self._make_placeable()
|
Indicates that the stream is the start of a consistent region.
Args:
consistent_config(consistent.ConsistentRegionConfig): the configuration of the consistent region.
Returns:
Stream: Returns this stream.
.. versionadded:: 1.11
|
codesearchnet
|
def get_filelikeobject(filename: str = None,
blob: bytes = None) -> BinaryIO:
if not filename and not blob:
raise ValueError("no filename and no blob")
if filename and blob:
raise ValueError("specify either filename or blob")
if filename:
return open(filename, 'rb')
else:
return io.BytesIO(blob)
|
Open a file-like object.
Guard the use of this function with ``with``.
Args:
filename: for specifying via a filename
blob: for specifying via an in-memory ``bytes`` object
Returns:
a :class:`BinaryIO` object
|
juraj-google-style
|
def __init__(self, min_value, max_value, scaling_type='Auto'):
self.min_value = min_value
self.max_value = max_value
self.scaling_type = scaling_type
|
Initialize a parameter range.
Args:
min_value (float or int): The minimum value for the range.
max_value (float or int): The maximum value for the range.
scaling_type (str): The scale used for searching the range during tuning (default: 'Auto').
Valid values: 'Auto', 'Linear', 'Logarithmic' and 'ReverseLogarithmic'.
|
juraj-google-style
|
def stop_replace(self, accountID, orderID, **kwargs):
return self.replace(accountID, orderID, order=StopOrderRequest(**kwargs))
|
Shortcut to replace a pending Stop Order in an Account
Args:
accountID : The ID of the Account
orderID : The ID of the Stop Order to replace
kwargs : The arguments to create a StopOrderRequest
Returns:
v20.response.Response containing the results from submitting
the request
|
codesearchnet
|
def get_rmsd(self, mol1, mol2):
(label1, label2) = self._mapper.uniform_labels(mol1, mol2)
if ((label1 is None) or (label2 is None)):
return float('Inf')
return self._calc_rms(mol1, mol2, label1, label2)
|
Get RMSD between two molecule with arbitrary atom order.
Returns:
RMSD if topology of the two molecules are the same
Infinite if the topology is different
|
codesearchnet
|
def build_user(user_info):
try:
email = user_info['email']
except KeyError as err:
raise KeyError("A user has to have a email")
try:
name = user_info['name']
except KeyError as err:
raise KeyError("A user has to have a name")
user_obj = User(email=email, name=name)
if 'roles' in user_info:
user_obj['roles'] = user_info['roles']
if 'location' in user_info:
user_obj['location'] = user_info['location']
if 'institutes' in user_info:
user_obj['institutes'] = user_info['institutes']
return user_obj
|
Build a user object
Args:
user_info(dict): A dictionary with user information
Returns:
user_obj(scout.models.User)
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.