code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def get_flat_neurites(neuron, tol=0.1, method='ratio'):
return [n for n in neuron.neurites if is_flat(n, tol, method)]
|
Check if a neuron has neurites that are flat within a tolerance
Args:
neurite(Neurite): neurite to operate on
tol(float): the tolerance or the ratio
method(string): 'tolerance' or 'ratio' described in :meth:`is_flat`
Returns:
Bool list corresponding to the flatness check for each neurite
in neuron neurites with respect to the given criteria
|
juraj-google-style
|
def __init__(self, object_id: str, allowed_states: List[str],
allowed_transitions: dict, allowed_target_states: dict):
self._id = object_id
self._type = STATES_KEY
self._key = '{}:{}'.format(STATES_KEY, self._id)
self._allowed_states = [state.lower() for state in allowed_states]
self._allowed_transitions = self._dict_lower(allowed_transitions)
self._allowed_target_states = self._dict_lower(allowed_target_states)
if not DB.key_exists(self._key):
DB.save_dict(self._key, self._initialise())
|
Initialise a state object.
Args:
allowed_states (List[str]): List of allowed states.
allowed_transitions (dict): Dict of allowed state transitions
allowed_target_states (dict): Dict of allowed target states
|
juraj-google-style
|
def GetFileObjectReferenceCount(self, path_spec):
cache_value = self._file_object_cache.GetCacheValue(path_spec.comparable)
if not cache_value:
return None
return cache_value.reference_count
|
Retrieves the reference count of a cached file-like object.
Args:
path_spec (PathSpec): path specification.
Returns:
int: reference count or None if there is no file-like object for
the corresponding path specification cached.
|
juraj-google-style
|
def compute_predecessors(nodes: Iterable[_PredecessorNode]) -> dict[_PredecessorNode, set[_PredecessorNode]]:
predecessors = {n: {n} for n in nodes}
discovered = set()
for start in nodes:
if start in discovered:
continue
unprocessed = [(start, n) for n in start.outgoing]
while unprocessed:
from_node, node = unprocessed.pop(0)
node_predecessors = predecessors[node]
length_before = len(node_predecessors)
node_predecessors |= predecessors[from_node]
if length_before != len(node_predecessors):
unprocessed.extend(((node, n) for n in node.outgoing))
discovered.add(node)
return predecessors
|
Build a transitive closure.
For a list of nodes, compute all the predecessors of each node.
Args:
nodes: A list of nodes or blocks.
Returns:
A dictionary that maps each node to a set of all the nodes that can reach
that node.
|
github-repos
|
def run_benchmark(self, dataset, num_elements, iters=1, warmup=True, apply_default_optimizations=False, session_config=None):
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = apply_default_optimizations
dataset = dataset.with_options(options)
dataset = dataset.skip(num_elements - 1)
if context.executing_eagerly():
median_duration = self._run_eager_benchmark(iterable=dataset, iters=iters, warmup=warmup)
return median_duration / float(num_elements)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
op = nest.flatten(next_element)[0].op
median_duration = self._run_graph_benchmark(iterable=op, iters=iters, warmup=warmup, session_config=session_config, initializer=iterator.initializer)
return median_duration / float(num_elements)
|
Benchmarks the dataset.
Runs the dataset `iters` times. In each iteration, the benchmark measures
the time it takes to go through `num_elements` elements of the dataset.
Args:
dataset: Dataset to benchmark.
num_elements: Number of dataset elements to iterate through each benchmark
iteration.
iters: Number of times to repeat the timing.
warmup: If true, warms up the session caches by running an untimed run.
apply_default_optimizations: Determines whether default optimizations
should be applied.
session_config: A ConfigProto protocol buffer with configuration options
for the session. Applicable only for benchmarking in graph mode.
Returns:
A float, representing the per-element wall time of the dataset in seconds.
This is the median time (with respect to `iters`) it takes for the dataset
to go through `num_elements` elements, divided by `num_elements.`
|
github-repos
|
def __init__(self, error_name, error_id, error_msg, stack_patterns):
self.error_name = error_name
self.error_id = error_id
self.error_msg = error_msg
self._stack_patterns = stack_patterns
|
Create a ParserError that matches against any of the |stack_patterns|.
Args:
error_name: A short, human readable name for the error,
using lowercase-with-dashes-format.
error_id: An integer to identify a specific error:
100s: Lexer errors.
200s: Low level parsing errors.
300s: High level parsing errors.
error_msg: A message to display with this error that describes
clearly what caused the error.
stack_patterns: A list of "stack patterns", where each stack pattern
is a list of strings corresponding to symbols on the parser's symbol
stack at the time it errored out. The string values for the symbols
can match essentially any terminal or non-terminal symbol used in the
grammar from parser.py.
Examples: ['TRANSITION', 'NAME', 'params', '=']
(or None to match against any symbol stack).
Returns:
ParserError that matches against |stack_patterns|.
|
github-repos
|
def detail_parking(self, **kwargs):
date = util.datetime_string(kwargs.get('day', 1), kwargs.get('month', 1), kwargs.get('year', 1970), kwargs.get('hour', 0), kwargs.get('minute', 0))
params = {'language': util.language_code(kwargs.get('lang')), 'publicData': True, 'date': date, 'id': kwargs.get('parking'), 'family': kwargs.get('family')}
result = self.make_request('detail_parking', {}, **params)
if (not util.check_result(result)):
return (False, result.get('message', 'UNKNOWN ERROR'))
values = util.response_list(result, 'Data')
return (True, [emtype.ParkingDetails(**a) for a in values])
|
Obtain detailed info of a given parking.
Args:
lang (str): Language code (*es* or *en*).
day (int): Day of the month in format DD.
The number is automatically padded if it only has one digit.
month (int): Month number in format MM.
The number is automatically padded if it only has one digit.
year (int): Year number in format YYYY.
hour (int): Hour of the day in format hh.
The number is automatically padded if it only has one digit.
minute (int): Minute of the hour in format mm.
The number is automatically padded if it only has one digit.
parking (int): ID of the parking to query.
family (str): Family code of the parking (3 chars).
Returns:
Status boolean and parsed response (list[ParkingDetails]), or message
string in case of error.
|
codesearchnet
|
def send_offer_assignment_email(self, user_email, offer_assignment_id, subject, email_body, site_code=None):
config = get_sailthru_configuration(site_code)
response = _send_offer_assignment_notification_email(config, user_email, subject, email_body, site_code, self)
if response and response.is_ok():
send_id = response.get_body().get('send_id')
if _update_assignment_email_status(offer_assignment_id, send_id, 'success'):
logger.info('[Offer Assignment] Offer assignment notification sent with message --- {message}'.format(
message=email_body))
else:
logger.exception(
'[Offer Assignment] An error occurred while updating email status data for '
'offer {token_offer} and email {token_email} via the ecommerce API.'.format(
token_offer=offer_assignment_id,
token_email=user_email,
)
)
|
Sends the offer assignment email.
Args:
self: Ignore.
user_email (str): Recipient's email address.
offer_assignment_id (str): Key of the entry in the offer_assignment model.
subject (str): Email subject.
email_body (str): The body of the email.
site_code (str): Identifier of the site sending the email.
|
juraj-google-style
|
def start(host, port, profiler_stats, dont_start_browser, debug_mode):
stats_handler = functools.partial(StatsHandler, profiler_stats)
if not debug_mode:
sys.stderr = open(os.devnull, 'w')
print('Starting HTTP server...')
if not dont_start_browser:
webbrowser.open('http:
try:
StatsServer((host, port), stats_handler).serve_forever()
except KeyboardInterrupt:
print('Stopping...')
sys.exit(0)
|
Starts HTTP server with specified parameters.
Args:
host: Server host name.
port: Server port.
profiler_stats: A dict with collected program stats.
dont_start_browser: Whether to open browser after profiling.
debug_mode: Whether to redirect stderr to /dev/null.
|
juraj-google-style
|
def get_cmd_out(command):
if isinstance(command, list):
result = sp.check_output(command)
else:
result = sp.check_output(command, shell=True)
return result.decode('utf-8').rstrip()
|
Get the output of a command.
Gets a nice Unicode no-extra-whitespace string of the ``stdout`` of a given command.
Args:
command (str or list): A string of the command, or a list of the arguments (as would be used in :class:`subprocess.Popen`).
Note:
If ``command`` is a ``str``, it will be evaluated with ``shell=True`` i.e. in the default shell (for example, bash).
Returns:
str: The ``stdout`` of the command.
|
juraj-google-style
|
def _kl_normal_normal(n_a, n_b, name=None):
with ops.name_scope(name, 'kl_normal_normal', [n_a.loc, n_b.loc]):
one = constant_op.constant(1, dtype=n_a.dtype)
two = constant_op.constant(2, dtype=n_a.dtype)
half = constant_op.constant(0.5, dtype=n_a.dtype)
s_a_squared = math_ops.square(n_a.scale)
s_b_squared = math_ops.square(n_b.scale)
ratio = s_a_squared / s_b_squared
return math_ops.squared_difference(n_a.loc, n_b.loc) / (two * s_b_squared) + half * (ratio - one - math_ops.log(ratio))
|
Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
|
github-repos
|
def add_frequency(self, name, value):
logger.debug('Adding frequency {0} with value {1} to variant {2}'.format(name, value, self['variant_id']))
self['frequencies'].append({'label': name, 'value': value})
|
Add a frequency that will be displayed on the variant level
Args:
name (str): The name of the frequency field
|
codesearchnet
|
def read_from_hdx(identifier, configuration=None):
dataset = Dataset(configuration=configuration)
result = dataset._dataset_load_from_hdx(identifier)
if result:
return dataset
return None
|
Reads the dataset given by identifier from HDX and returns Dataset object
Args:
identifier (str): Identifier of dataset
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[Dataset]: Dataset object if successful read, None if not
|
juraj-google-style
|
def script(experiment, projects):
benchbuild_c = local[local.path(sys.argv[0])]
slurm_script = (((local.cwd / experiment.name) + '-') + str(CFG['slurm']['script']))
srun = local['srun']
srun_args = []
if (not CFG['slurm']['multithread']):
srun_args.append('--hint=nomultithread')
if (not CFG['slurm']['turbo']):
srun_args.append('--pstate-turbo=off')
srun = srun[srun_args]
srun = srun[benchbuild_c['run']]
return __save__(slurm_script, srun, experiment, projects)
|
Prepare a slurm script that executes the experiment for a given project.
Args:
experiment: The experiment we want to execute
projects: All projects we generate an array job for.
|
codesearchnet
|
def setall(self, key, values):
self.delall(key)
for tag in values:
self[tag.HashKey] = tag
|
Delete frames of the given type and add frames in 'values'.
Args:
key (text): key for frames to delete
values (list[Frame]): frames to add
|
juraj-google-style
|
def select_sites(self, site_labels):
if (type(site_labels) in (list, set)):
selected_sites = [s for s in self.sites if (s.label in site_labels)]
elif (type(site_labels) is str):
selected_sites = [s for s in self.sites if (s.label is site_labels)]
else:
raise ValueError(str(site_labels))
return selected_sites
|
Selects sites in the lattice with specified labels.
Args:
site_labels (List(Str)|Set(Str)|Str): Labels of sites to select.
This can be a List [ 'A', 'B' ], a Set ( 'A', 'B' ), or a String 'A'.
Returns:
(List(Site)): List of sites with labels given by `site_labels`.
|
codesearchnet
|
def __init__(self, experimental_debug_info_func):
super(TFLiteConverterBaseV1, self).__init__()
self.inference_type = _dtypes.float32
self.inference_input_type = None
self.inference_output_type = None
self.output_format = constants.TFLITE
self.quantized_input_stats = {}
self.default_ranges_stats = None
self.drop_control_dependency = True
self.reorder_across_fake_quant = False
self.change_concat_input_ranges = False
self.dump_graphviz_dir = None
self.dump_graphviz_video = False
self.conversion_summary_dir = None
self._debug_info_func = experimental_debug_info_func
self._metadata.environment.apiVersion = 1
|
Constructor for TFLiteConverter.
Args:
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
|
github-repos
|
def _resolve_prefix(self, token):
if token in self._handlers:
return token
elif token in self._alias_to_prefix:
return self._alias_to_prefix[token]
else:
return None
|
Resolve command prefix from the prefix itself or its alias.
Args:
token: a str to be resolved.
Returns:
If resolvable, the resolved command prefix.
If not resolvable, None.
|
github-repos
|
def _parse_state_value(state, user):
uri, token = state.rsplit(':', 1)
if xsrfutil.validate_token(xsrf_secret_key(), token, user.user_id(),
action_id=uri):
return uri
else:
return None
|
Parse the value of the 'state' parameter.
Parses the value and validates the XSRF token in the state parameter.
Args:
state: string, The value of the state parameter.
user: google.appengine.api.users.User, The current user.
Returns:
The redirect URI, or None if XSRF token is not valid.
|
juraj-google-style
|
def merge_with(x, other):
return type(x)(tf.TensorShape(x).merge_with(other))
|
Returns a shape combining the information in `x` and `other`.
The dimensions in `x` and `other` are merged elementwise, according to the
rules defined for `tf.Dimension.merge_with()`.
For more details, see `help(tf.TensorShape.merge_with)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
other: object representing a shape; convertible to `tf.TensorShape`.
Returns:
merged_shape: shape having `type(x)` containing the combined information of
`x` and `other`.
Raises:
ValueError: If `x` and `other` are not compatible.
|
codesearchnet
|
def clear(self):
self._push_all_models_freeze()
try:
while (len(self._roots) > 0):
r = next(iter(self._roots))
self.remove_root(r)
finally:
self._pop_all_models_freeze()
|
Remove all content from the document but do not reset title.
Returns:
None
|
codesearchnet
|
def layout(self, dimensions=None, **kwargs):
return self.groupby(dimensions, container_type=NdLayout, **kwargs)
|
Groups data by supplied dimension(s) laying the groups along
the dimension(s) out in a NdLayout.
Args:
dimensions: Dimension/str or list
Dimension or list of dimensions to group by
Returns:
layout: NdLayout
NdLayout with supplied dimensions
|
juraj-google-style
|
def _parse_url_and_validate(cls, url):
parsed_url = urlparse(url)
if parsed_url.scheme and parsed_url.netloc:
final_url = parsed_url.geturl()
else:
raise BadURLException
return final_url
|
Recieves a URL string and validates it using urlparse.
Args:
url: A URL string
Returns:
parsed_url: A validated URL
Raises:
BadURLException
|
juraj-google-style
|
def set_default_language(self, language: str):
if language not in self.config.languages:
raise ValueError(f'{self} does not have an adapter for {language}. Supported languages: {list(self.config.languages)}')
self.config.default_language = language
|
Set the default language code for the model. This is used when the language is not specified in the input.
Args:
language (`str`): The language code, such as `"en_XX"` or `"de_DE"`.
|
github-repos
|
def List(self, request, global_params=None):
config = self.GetMethodConfig('List')
return self._RunMethod(config, request, global_params=global_params)
|
Lists all routines in the specified dataset. Requires the READER dataset role.
Args:
request: (BigqueryRoutinesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListRoutinesResponse) The response message.
|
github-repos
|
def wait(self, timeout_s: float = None) -> int:
if not self.running:
return 0
retcode = self.process.wait(timeout=timeout_s)
if retcode is None:
self.error("Subprocess finished, but return code was None")
retcode = 1
elif retcode == 0:
self.info("Subprocess finished cleanly (return code 0).")
else:
self.error(
"Subprocess finished, but FAILED (return code {}). "
"Logs were: {} (stdout), {} (stderr)".format(
retcode,
self.details.logfile_out,
self.details.logfile_err))
self.running = False
return retcode
|
Wait for up to ``timeout_s`` for the child process to finish.
Args:
timeout_s: maximum time to wait or ``None`` to wait forever
Returns:
process return code; or ``0`` if it wasn't running, or ``1`` if
it managed to exit without a return code
Raises:
subprocess.TimeoutExpired: if the process continues to run
|
juraj-google-style
|
def list_storage_accounts_rg(access_token, subscription_id, rgname):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.Storage/storageAccounts',
'?api-version=', STORAGE_API])
return do_get(endpoint, access_token)
|
List the storage accounts in the specified resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. JSON body list of storage accounts.
|
juraj-google-style
|
def get_environ(cls, prefix):
return ((key[len(prefix) + 1:], value)
for key, value in os.environ.items()
if key.startswith('%s_' % prefix))
|
Retrieves environment variables from a namespace.
Args:
prefix (str): The prefix, without a trailing underscore.
Returns:
list: A list of environment variable keys and values.
|
juraj-google-style
|
def abs(x):
return math_ops.abs(x)
|
Element-wise absolute value.
Args:
x: Tensor or variable.
Returns:
A tensor.
|
github-repos
|
def verify_ed25519_signature(public_key, contents, signature, message):
try:
public_key.verify(signature, contents)
except InvalidSignature as exc:
raise ScriptWorkerEd25519Error(message % {'exc': str(exc)})
|
Verify that ``signature`` comes from ``public_key`` and ``contents``.
Args:
public_key (Ed25519PublicKey): the key to verify the signature
contents (bytes): the contents that was signed
signature (bytes): the signature to verify
message (str): the error message to raise.
Raises:
ScriptWorkerEd25519Error: on failure
|
juraj-google-style
|
def _set_scripts(self, host_metadata, scripts):
scripts_key = 'deploy-scripts'
if ('ovirt-scritps' in host_metadata):
scripts_key = 'ovirt-scripts'
host_metadata[scripts_key] = scripts
return host_metadata
|
Temporary method to set the host scripts
TODO:
remove once the "ovirt-scripts" option gets deprecated
Args:
host_metadata(dict): host metadata to set scripts in
Returns:
dict: the updated metadata
|
codesearchnet
|
def hamming_distance(str1, str2):
if (len(str1) != len(str2)):
raise VisualizationError('Strings not same length.')
return sum(((s1 != s2) for (s1, s2) in zip(str1, str2)))
|
Calculate the Hamming distance between two bit strings
Args:
str1 (str): First string.
str2 (str): Second string.
Returns:
int: Distance between strings.
Raises:
VisualizationError: Strings not same length
|
codesearchnet
|
def copy(self, src, dst, other_system=None):
copy_source = self.get_client_kwargs(src)
copy_destination = self.get_client_kwargs(dst)
with _handle_oss_error():
bucket = self._get_bucket(copy_destination)
bucket.copy_object(
source_bucket_name=copy_source['bucket_name'],
source_key=copy_source['key'],
target_key=copy_destination['key'])
|
Copy object of the same storage.
Args:
src (str): Path or URL.
dst (str): Path or URL.
other_system (pycosio._core.io_system.SystemBase subclass): Unused.
|
juraj-google-style
|
def token_to_id(self, token):
token = self.process_token(token)
return self._token2id.get(token, len(self._token2id) - 1)
|
Get the token_id of given token.
Args:
token (str): token from vocabulary.
Returns:
int: int id of token.
|
juraj-google-style
|
def find_config(test_file=None, defaults=None, root=os.curdir):
if (defaults is None):
defaults = ['.benchbuild.yml', '.benchbuild.yaml']
def walk_rec(cur_path, root):
cur_path = (local.path(root) / test_file)
if cur_path.exists():
return cur_path
new_root = (local.path(root) / os.pardir)
return (walk_rec(cur_path, new_root) if (new_root != root) else None)
if (test_file is not None):
return walk_rec(test_file, root)
for test_file in defaults:
ret = walk_rec(test_file, root)
if (ret is not None):
return ret
|
Find the path to the default config file.
We look at :root: for the :default: config file. If we can't find it
there we start looking at the parent directory recursively until we
find a file named :default: and return the absolute path to it.
If we can't find anything, we return None.
Args:
default: The name of the config file we look for.
root: The directory to start looking for.
Returns:
Path to the default config file, None if we can't find anything.
|
codesearchnet
|
def draw_line(self, x1, y1, x2, y2, color):
check_int_err(lib.lineRGBA(self._ptr, x1, y1, x2, y2, color[0], color[1], color[2], color[3]))
|
Draw a line.
Args:
x1 (int): The x coordinate of the start of the line.
y1 (int): The y coordinate of the start of the line.
x2 (int): The x coordinate of the end of the line.
y2 (int): The y coordinate of the end of the line.
color (Tuple[int, int, int, int]): The color of the circle.
Raises:
SDLError: If an error is encountered.
|
juraj-google-style
|
def db004(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `db004`'.format(value))
self._db004 = value
|
Corresponds to IDD Field `db004`
Dry-bulb temperature corresponding to 0.4% annual cumulative frequency of occurrence (warm conditions)
Args:
value (float): value for IDD Field `db004`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def scatter_add(self, sparse_delta, use_locking=False, name=None):
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)
return gen_state_ops.scatter_add(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)
|
Adds `tf.IndexedSlices` to this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be added to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered addition has completed.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
|
github-repos
|
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1 is not None:
output += token_ids_1 + [self.sep_token_id]
return output
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
github-repos
|
def StoreRequestsAndResponses(self,
new_requests=None,
new_responses=None,
requests_to_delete=None):
to_write = {}
if new_requests is not None:
for request, timestamp in new_requests:
subject = request.session_id.Add("state")
queue = to_write.setdefault(subject, {})
queue.setdefault(self.FLOW_REQUEST_TEMPLATE % request.id, []).append(
(request.SerializeToString(), timestamp))
if new_responses is not None:
for response, timestamp in new_responses:
if response.type == rdf_flows.GrrMessage.Type.STATUS:
subject = response.session_id.Add("state")
attribute = self.FLOW_STATUS_TEMPLATE % response.request_id
to_write.setdefault(subject, {}).setdefault(attribute, []).append(
(response.SerializeToString(), timestamp))
subject = self.GetFlowResponseSubject(response.session_id,
response.request_id)
attribute = self.FLOW_RESPONSE_TEMPLATE % (response.request_id,
response.response_id)
to_write.setdefault(subject, {}).setdefault(attribute, []).append(
(response.SerializeToString(), timestamp))
to_delete = {}
if requests_to_delete is not None:
for request in requests_to_delete:
queue = to_delete.setdefault(request.session_id.Add("state"), [])
queue.append(self.FLOW_REQUEST_TEMPLATE % request.id)
queue.append(self.FLOW_STATUS_TEMPLATE % request.id)
for subject in set(to_write) | set(to_delete):
self.MultiSet(
subject,
to_write.get(subject, {}),
to_delete=to_delete.get(subject, []),
sync=True)
|
Stores new flow requests and responses to the data store.
Args:
new_requests: A list of tuples (request, timestamp) to store in the data
store.
new_responses: A list of tuples (response, timestamp) to store in the data
store.
requests_to_delete: A list of requests that should be deleted from the
data store.
|
juraj-google-style
|
def from_snl(cls, snl):
hist = []
for h in snl.history:
d = h.description
d['_snl'] = {'url': h.url, 'name': h.name}
hist.append(d)
return cls(snl.structure, history=hist)
|
Create TransformedStructure from SNL.
Args:
snl (StructureNL): Starting snl
Returns:
TransformedStructure
|
codesearchnet
|
def buckets_insert(self, bucket, project_id=None):
args = {'project': (project_id if project_id else self._project_id)}
data = {'name': bucket}
url = (Api._ENDPOINT + (Api._BUCKET_PATH % ''))
return datalab.utils.Http.request(url, args=args, data=data, credentials=self._credentials)
|
Issues a request to create a new bucket.
Args:
bucket: the name of the bucket.
project_id: the project to use when inserting the bucket.
Returns:
A parsed bucket information dictionary.
Raises:
Exception if there is an error performing the operation.
|
codesearchnet
|
def __init__(self, input_reader=None, output_writer=None):
super(CLITool, self).__init__()
preferred_encoding = locale.getpreferredencoding()
if not preferred_encoding:
preferred_encoding = self._PREFERRED_ENCODING
elif isinstance(preferred_encoding, py2to3.BYTES_TYPE):
preferred_encoding = preferred_encoding.decode('utf-8')
if not input_reader:
input_reader = StdinInputReader(encoding=preferred_encoding)
if not output_writer:
output_writer = StdoutOutputWriter(encoding=preferred_encoding)
self._data_location = None
self._debug_mode = False
self._encode_errors = 'strict'
self._input_reader = input_reader
self._log_file = None
self._output_writer = output_writer
self._preferred_time_zone = None
self._quiet_mode = False
self._views_format_type = views.ViewsFactory.FORMAT_TYPE_CLI
self.list_timezones = False
self.preferred_encoding = preferred_encoding
|
Initializes a command line interface tool.
Args:
input_reader (Optional[CLIInputReader]): input reader, where None
indicates that the stdin input reader should be used.
output_writer (Optional[CLIOutputWriter]): output writer, where None
indicates that the stdout output writer should be used.
|
juraj-google-style
|
def sync_results(vcs, signature):
results_directory = _get_results_directory(vcs, signature)
if not os.path.exists(results_directory):
raise ResultsNotFoundError
with open(os.path.join(results_directory, 'patterns'), 'r') as f:
patterns = f.read().strip().split()
includes = ['--include={}'.format(x)
for x in patterns]
cmd = ['rsync', '-r'] + includes + ['--exclude=*',
os.path.join(
results_directory, 'results', ''),
os.path.join(vcs.path, '')]
subprocess.check_call(cmd)
|
Sync the saved results for `signature` back to the project.
Args:
vcs (easyci.vcs.base.Vcs)
signature (str)
Raises:
ResultsNotFoundError
|
juraj-google-style
|
def find_triggers(
nodes,
trigger_words,
nosec_lines
):
trigger_nodes = list()
for node in nodes:
if node.line_number not in nosec_lines:
trigger_nodes.extend(iter(label_contains(node, trigger_words)))
return trigger_nodes
|
Find triggers from the trigger_word_list in the nodes.
Args:
nodes(list[Node]): the nodes to find triggers in.
trigger_word_list(list[Union[Sink, Source]]): list of trigger words to look for.
nosec_lines(set): lines with # nosec whitelisting
Returns:
List of found TriggerNodes
|
juraj-google-style
|
def laid_out_slice_num(self, tensor_shape):
ret = self.slicewise((lambda : tf.to_int32(0)))
tensor_layout = self.tensor_layout(tensor_shape)
for mesh_axis in tensor_layout.tensor_axis_to_mesh_axis:
if (mesh_axis is not None):
def my_fn(x, pcoord, mesh_dim_size):
return ((x * mesh_dim_size) + pcoord)
ret = self.slicewise(my_fn, ret, self.laid_out_pcoord(mesh_axis), self.shape[mesh_axis].size)
return ret
|
A LaidOutTensor with an int32 scalar, identical for identical slices.
This is useful for synchronizing random operations.
Args:
tensor_shape: a TensorShape
Returns:
a LaidOutTensor where each slice is an integer scalar.
|
codesearchnet
|
def get_cmd_out(command):
if isinstance(command, list):
result = sp.check_output(command)
else:
result = sp.check_output(command, shell=True)
return result.decode('utf-8').rstrip()
|
Get the output of a command.
Gets a nice Unicode no-extra-whitespace string of the ``stdout`` of a given command.
Args:
command (str or list): A string of the command, or a list of the arguments (as would be used in :class:`subprocess.Popen`).
Note:
If ``command`` is a ``str``, it will be evaluated with ``shell=True`` i.e. in the default shell (for example, bash).
Returns:
str: The ``stdout`` of the command.
|
codesearchnet
|
def infer_types(source, options):
with io.wrap_pytype_exceptions(PytypeError, filename=options.input):
return traces.trace(source, options)
|
Infer types for the provided source.
Args:
source: Text, the source code to analyze.
options: pytype.config.Options, the options to pass onto Pytype.
Returns:
source.Code object with information gathered by Pytype.
|
github-repos
|
def to_dense_one_hot(labels, class_count):
if (not isinstance(class_count, tf.compat.integral_types)):
raise TypeError('class_count must be an integer type.')
if (labels.dtype.base_dtype not in (tf.int32, tf.int64)):
raise TypeError(('Labels must be an integer: %s' % labels.dtype))
if (labels.get_shape().ndims != 1):
raise ValueError(('Labels must be a rank 1 tensor: %s' % labels.get_shape()))
dtype = labels.dtype.base_dtype
class_tensor = tf.convert_to_tensor(class_count, dtype=dtype, name='class_count')
batch = tf.gather(tf.shape(labels), 0)
count = tf.expand_dims(tf.range(0, limit=batch), 1)
labels = tf.expand_dims(labels, 1)
batch = tf.gather(tf.shape(labels), 0)
if (dtype != tf.int32):
count = tf.cast(count, dtype)
batch = tf.cast(batch, dtype)
result = tf.sparse_to_dense(tf.concat([count, labels], 1), tf.concat([tf.expand_dims(batch, 0), tf.expand_dims(class_tensor, 0)], 0), 1.0, 0.0)
result.set_shape([labels.get_shape().dims[0], class_count])
return result
|
Converts a vector that specified one-hot per batch into a dense version.
Args:
labels: The labels input.
class_count: The number of classes as an int.
Returns:
One dense vector for each item in the batch.
Raises:
ValueError: If labels is not rank 1.
TypeError: If class_count is not an integer or labels is not an integer
Tensor.
|
codesearchnet
|
def save_source(driver, name):
source = driver.page_source
file_name = os.path.join(os.environ.get('SAVED_SOURCE_DIR'), '{name}.html'.format(name=name))
try:
with open(file_name, 'wb') as output_file:
output_file.write(source.encode('utf-8'))
except Exception:
msg = u'Could not save the browser page source to {}.'.format(file_name)
LOGGER.warning(msg)
|
Save the rendered HTML of the browser.
The location of the source can be configured
by the environment variable `SAVED_SOURCE_DIR`. If not set,
this defaults to the current working directory.
Args:
driver (selenium.webdriver): The Selenium-controlled browser.
name (str): A name to use in the output file name.
Note that ".html" is appended automatically
Returns:
None
|
codesearchnet
|
def ModuleHelp(self, module):
helplist = []
self.__RenderOurModuleKeyFlags(module, helplist)
return '\n'.join(helplist)
|
Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module.
|
codesearchnet
|
def runcoro(async_function):
future = _asyncio.run_coroutine_threadsafe(async_function, client.loop)
result = future.result()
return result
|
Runs an asynchronous function without needing to use await - useful for lambda
Args:
async_function (Coroutine): The asynchronous function to run
|
juraj-google-style
|
def get_table(self, project_id, dataset_id, table_id):
request = bigquery.BigqueryTablesGetRequest(projectId=project_id, datasetId=dataset_id, tableId=table_id)
response = self.client.tables.Get(request)
return response
|
Lookup a table's metadata object.
Args:
client: bigquery.BigqueryV2 instance
project_id: table lookup parameter
dataset_id: table lookup parameter
table_id: table lookup parameter
Returns:
bigquery.Table instance
Raises:
HttpError: if lookup failed.
|
github-repos
|
def parse_node(self, node):
spec = super(CamundaProcessParser, self).parse_node(node)
spec.data = self._parse_input_data(node)
spec.data['lane_data'] = self._get_lane_properties(node)
spec.defines = spec.data
service_class = node.get(full_attr('assignee'))
if service_class:
self.parsed_nodes[node.get('id')].service_class = node.get(full_attr('assignee'))
return spec
|
Overrides ProcessParser.parse_node
Parses and attaches the inputOutput tags that created by Camunda Modeller
Args:
node: xml task node
Returns:
TaskSpec
|
juraj-google-style
|
def flaskify(response, headers=None, encoder=None):
status_code = response.status
data = (response.errors or response.message)
mimetype = 'text/plain'
if (isinstance(data, list) or isinstance(data, dict)):
mimetype = 'application/json'
data = json.dumps(data, cls=encoder)
return flask.Response(response=data, status=status_code, headers=headers, mimetype=mimetype)
|
Format the response to be consumeable by flask.
The api returns mostly JSON responses. The format method converts the dicts
into a json object (as a string), and the right response is returned (with
the valid mimetype, charset and status.)
Args:
response (Response): The dictionary object to convert into a json
object. If the value is a string, a dictionary is created with the
key "message".
headers (dict): optional headers for the flask response.
encoder (Class): The class of the encoder (if any).
Returns:
flask.Response: The flask response with formatted data, headers, and
mimetype.
|
codesearchnet
|
def conv_elems_1d(x, factor, out_depth=None):
out_depth = (out_depth or x.get_shape().as_list()[(- 1)])
x = tf.expand_dims(x, 1)
x = layers().Conv2D(filters=out_depth, kernel_size=(1, factor), strides=(1, factor), padding='valid', data_format='channels_last')(x)
x = tf.squeeze(x, 1)
return x
|
Decrease the length and change the dimensionality.
Merge/restore/compress factors positions of dim depth of the input into
a single position of dim out_depth.
This is basically just a strided convolution without overlap
between each strides. The original length has to be divided by factor.
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Length compression factor.
out_depth (int): Output depth
Returns:
tf.Tensor: shape [batch_size, length//factor, out_depth]
|
codesearchnet
|
def tpu_device_ordinal_at_coordinates(self, device_coordinates):
return self._topology_devices[tuple(device_coordinates)]
|
Returns the TensorFlow device number at `device_coordinates`.
Args:
device_coordinates: An integer sequence describing a device's physical
coordinates in the TPU fabric.
Returns:
Returns the TensorFlow device number within the task corresponding to
attached to the device with those physical coordinates.
|
github-repos
|
def __init__(self, config=None):
self.http = urllib3.PoolManager()
self.serving_port = 8080
self.config = config
self.serving_port = get_config_value('local.serving_port', config) or 8080
|
Initializes a LocalSageMakerRuntimeClient
Args:
config (dict): Optional configuration for this client. In particular only
the local port is read.
|
juraj-google-style
|
def _stop_profiler(self, save=True):
if not self._profiler_started:
return
try:
profiler.stop(save=save)
except errors.UnavailableError as e:
logging.error('Failed to stop profiler: %s', e.message)
finally:
self._profiler_started = False
|
Stops the profiler if currently active.
Args:
save: Whether to save the profiler results to TensorBoard.
|
github-repos
|
def __init__(self, year, month, day_of_month):
super(DateTimeEpoch, self).__init__()
self.day_of_month = day_of_month
self.month = month
self.year = year
|
Initializes a date time epoch.
Args:
year (int): year that is the start of the epoch e.g. 1970.
month (int): month that is the start of the epoch, where 1 represents
January.
day_of_month (int): day of the month that is the start of the epoch,
where 1 represents the first day.
|
juraj-google-style
|
def Process(self, parser_mediator, plist_name, top_level, **kwargs):
super(MacUserPlugin, self).Process(
parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level)
|
Check if it is a valid MacOS system account plist file name.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
plist_name (str): name of the plist.
top_level (dict[str, object]): plist top-level key.
|
juraj-google-style
|
def to_dict(self):
entity_dict = {}
for (field, val) in six.iteritems(self._fields):
if field.multiple:
if val:
val = [_dictify(field, x) for x in val]
else:
val = []
else:
val = _dictify(field, val)
if ((val is not None) and (val != [])):
entity_dict[field.key_name] = val
self._finalize_dict(entity_dict)
return entity_dict
|
Convert to a ``dict``
Subclasses can override this function.
Returns:
Python dict with keys set from this Entity.
|
codesearchnet
|
def get_conf(conf, sect, opt):
argu = getattr(args, ('mambupy_' + opt.lower()))
if (not argu):
envir = os.environ.get(('MAMBUPY_' + opt.upper()))
if (not envir):
try:
return conf.get(sect, opt)
except NoSectionError:
return default_configs[opt]
return envir
return argu
|
Gets a config 'opt' from 'conf' file, under section 'sect'.
If no 'opt' exists under 'sect', it looks for option on the default_configs
dictionary
If there exists an environmental variable named MAMBUPY_{upper_case_opt},
it overrides whatever the conf files or default_configs dict says.
But if you send a command line argument named mambupy_{lower_case_opt},
it overrides anything else.
Args:
conf (ConfigParser): ConfigParser that reads from certain config file (INI
format)
sect (string): section under the config file
opt (string): option to read
Returns:
string: configuration option. If not found on conf, returns a value from
default_configs dict. If environmental variable exists with name
MAMBUPY_{upper_case_opt} it overrides anything else
|
codesearchnet
|
def _validate_config(config):
required_keys = [KEY_ADDRESS, KEY_MODEL, KEY_PORT, KEY_PATHS]
for key in required_keys:
if (key not in config):
raise Error('Required key %s missing from config %s', (key, config))
|
Verifies that a config dict for an attenuator device is valid.
Args:
config: A dict that is the configuration for an attenuator device.
Raises:
attenuator.Error: A config is not valid.
|
codesearchnet
|
def _prepare_resource_chunks(self, resources, resource_delim=','):
return [self._prepare_resource_chunk(resources, resource_delim, pos) for pos in range(0, len(resources), self._resources_per_req)]
|
As in some VirusTotal API methods the call can be made for multiple
resources at once this method prepares a list of concatenated resources
according to the maximum number of resources per requests.
Args:
resources: a list of the resources.
resource_delim: a string used to separate the resources.
Default value is a comma.
Returns:
A list of the concatenated resources.
|
codesearchnet
|
def get_drift_corrected_structures(self, start=None, stop=None, step=None):
coords = np.array(self.structure.cart_coords)
species = self.structure.species_and_occu
lattices = self.lattices
nsites, nsteps, dim = self.corrected_displacements.shape
for i in range(start or 0, stop or nsteps, step or 1):
latt = lattices[0] if len(lattices) == 1 else lattices[i]
yield Structure(
latt, species,
coords + self.corrected_displacements[:, i, :],
coords_are_cartesian=True)
|
Returns an iterator for the drift-corrected structures. Use of
iterator is to reduce memory usage as # of structures in MD can be
huge. You don't often need all the structures all at once.
Args:
start, stop, step (int): applies a start/stop/step to the iterator.
Faster than applying it after generation, as it reduces the
number of structures created.
|
juraj-google-style
|
def find(cls, session, resource_id, include=None):
url = session._build_url(cls._resource_path(), resource_id)
params = build_request_include(include, None)
process = cls._mk_one(session, include=include)
return session.get(url, CB.json(200, process), params=params)
|
Retrieve a single resource.
This should only be called from sub-classes.
Args:
session(Session): The session to find the resource in
resource_id: The ``id`` for the resource to look up
Keyword Args:
include: Resource classes to include
Returns:
Resource: An instance of a resource, or throws a
:class:`NotFoundError` if the resource can not be found.
|
juraj-google-style
|
def read(self, filename, binary_mode=False, size=None, offset=None):
s3 = boto3.resource("s3")
bucket, path = self.bucket_and_path(filename)
args = {}
endpoint = 0
if size is not None or offset is not None:
if offset is None:
offset = 0
endpoint = '' if size is None else (offset + size)
args['Range'] = 'bytes={}-{}'.format(offset, endpoint)
try:
stream = s3.Object(bucket, path).get(**args)['Body'].read()
except botocore.exceptions.ClientError as exc:
if exc.response['Error']['Code'] == '416':
if size is not None:
client = boto3.client("s3")
obj = client.head_object(Bucket=bucket, Key=path)
len = obj['ContentLength']
endpoint = min(len, offset + size)
if offset == endpoint:
stream = b''
else:
args['Range'] = 'bytes={}-{}'.format(offset, endpoint)
stream = s3.Object(bucket, path).get(**args)['Body'].read()
else:
raise
if binary_mode:
return bytes(stream)
else:
return stream.decode('utf-8')
|
Reads contents of a file to a string.
Args:
filename: string, a path
binary_mode: bool, read as binary if True, otherwise text
size: int, number of bytes or characters to read, otherwise
read all the contents of the file from the offset
offset: int, offset into file to read from, otherwise read
from the very beginning
Returns:
Subset of the contents of the file as a string or bytes.
|
juraj-google-style
|
def get_all_boards(*args, **kwargs):
https = kwargs.get('https', (args[1] if (len(args) > 1) else False))
url_generator = Url(None, https)
_fetch_boards_metadata(url_generator)
return get_boards(_metadata.keys(), *args, **kwargs)
|
Returns every board on 4chan.
Returns:
dict of :class:`basc_py4chan.Board`: All boards.
|
codesearchnet
|
def logs(self, container, stdout=True, stderr=True, stream=False, timestamps=False, tail='all', since=None, follow=None, until=None):
if (follow is None):
follow = stream
params = {'stderr': ((stderr and 1) or 0), 'stdout': ((stdout and 1) or 0), 'timestamps': ((timestamps and 1) or 0), 'follow': ((follow and 1) or 0)}
if ((tail != 'all') and ((not isinstance(tail, int)) or (tail < 0))):
tail = 'all'
params['tail'] = tail
if (since is not None):
if isinstance(since, datetime):
params['since'] = utils.datetime_to_timestamp(since)
elif (isinstance(since, int) and (since > 0)):
params['since'] = since
else:
raise errors.InvalidArgument('since value should be datetime or positive int, not {}'.format(type(since)))
if (until is not None):
if utils.version_lt(self._version, '1.35'):
raise errors.InvalidVersion('until is not supported for API version < 1.35')
if isinstance(until, datetime):
params['until'] = utils.datetime_to_timestamp(until)
elif (isinstance(until, int) and (until > 0)):
params['until'] = until
else:
raise errors.InvalidArgument('until value should be datetime or positive int, not {}'.format(type(until)))
url = self._url('/containers/{0}/logs', container)
res = self._get(url, params=params, stream=stream)
output = self._get_result(container, stream, res)
if stream:
return CancellableStream(output, res)
else:
return output
|
Get logs from a container. Similar to the ``docker logs`` command.
The ``stream`` parameter makes the ``logs`` function return a blocking
generator you can iterate over to retrieve log output as it happens.
Args:
container (str): The container to get logs from
stdout (bool): Get ``STDOUT``. Default ``True``
stderr (bool): Get ``STDERR``. Default ``True``
stream (bool): Stream the response. Default ``False``
timestamps (bool): Show timestamps. Default ``False``
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
follow (bool): Follow log output. Default ``False``
until (datetime or int): Show logs that occurred before the given
datetime or integer epoch (in seconds)
Returns:
(generator or str)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def days_in_leap_years_between(start_date, end_date):
def days_in_leap_years_since_1jan0001(date):
prev_year = date.year() - 1
leap_years_before = prev_year
n_leap_days = leap_years_before * 366
days_in_cur_year = date.day_of_year() - 1
n_leap_days += tf.where(is_leap_year(date.year()), days_in_cur_year, 0)
return n_leap_days
return days_in_leap_years_since_1jan0001(end_date) - days_in_leap_years_since_1jan0001(start_date)
|
Calculates number of days between two dates that fall on leap years.
'start_date' is included and 'end_date' is excluded from the period.
For example, for dates `2019-12-24` and `2024-2-10` the result is
406: 366 days in 2020, 31 in Jan 2024 and 9 in Feb 2024.
If `end_date` is earlier than `start_date`, the result will be negative or
zero.
Args:
start_date: DateTensor.
end_date: DateTensor compatible with `start_date`.
Returns:
Tensor of type 'int32'.
|
github-repos
|
def normalize_url(url):
uri = urlparse(url)
query = (uri.query or '')
pairs = parse_qsl(query)
decoded_pairs = [(unquote(key), value) for (key, value) in pairs]
encoded_pairs = [(quote(key), value) for (key, value) in decoded_pairs]
normalized_query = urlencode(encoded_pairs)
return ParseResult(scheme=uri.scheme, netloc=uri.netloc, path=uri.path, params=uri.params, query=normalized_query, fragment=uri.fragment).geturl()
|
Returns the given URL with all query keys properly escaped.
Args:
url (str): The URL to normalize.
Returns:
str: The normalized URL.
|
codesearchnet
|
def _data_to_json(data):
if (type(data) not in [str, unicode]):
data = json.dumps(data)
return data
|
Convert to json if it isn't already a string.
Args:
data (str): data to convert to json
|
codesearchnet
|
def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:
out = []
for i, hidden_state in enumerate(hidden_states):
hidden_state = hidden_state[:, 1:]
batch_size, _, num_channels = hidden_state.shape
hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
hidden_state = self.layers[i](hidden_state)
out.append(hidden_state)
return out
|
Args:
hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`):
List of hidden states from the backbone.
|
github-repos
|
def content_matchs(tag_content, content_transformer=None):
def content_matchs_closure(element):
if not element.isTag():
return False
cont = element.getContent()
if content_transformer:
cont = content_transformer(cont)
return tag_content == cont
return content_matchs_closure
|
Generate function, which checks whether the content of the tag matchs
`tag_content`.
Args:
tag_content (str): Content of the tag which will be matched thru whole
DOM.
content_transformer (fn, default None): Function used to transform all
tags before matching.
This function can be used as parameter for .find() method in HTMLElement.
|
juraj-google-style
|
def __init__(self, retry_definition):
logger.debug("starting")
if isinstance(retry_definition, dict):
self.max = retry_definition.get('max', None)
self.sleep = retry_definition.get('sleep', 0)
self.stop_on = retry_definition.get('stopOn', None)
self.retry_on = retry_definition.get('retryOn', None)
else:
logger.error(f"retry decorator definition incorrect.")
raise PipelineDefinitionError("retry decorator must be a dict "
"(i.e a map) type.")
logger.debug("done")
|
Initialize the class. No duh, huh.
You can happily expect the initializer to initialize all
member attributes.
Args:
retry_definition: dict. This is the actual retry definition as it
exists in the pipeline yaml.
|
juraj-google-style
|
def run_plugins(context_obj, boto3_clients):
def print_if_verbose(message):
if context_obj.verbose:
print(message)
service_name = os.path.basename(sys.argv[0]).replace('.py', '')
try:
import plugins
except ImportError:
print_if_verbose('no plugins detected.')
return
else:
for (plugin_importer, plugin_name, plugin_ispkg) in pkgutil.iter_modules(plugins.__path__):
if plugin_ispkg:
plugin_package = importlib.import_module('plugins.{}'.format(plugin_name))
for (importer, modname, ispkg) in pkgutil.iter_modules(plugin_package.__path__):
plugin_module = importlib.import_module('plugins.{}.{}'.format(plugin_name, modname))
for (name, obj) in inspect.getmembers(plugin_module):
if (inspect.isclass(obj) and (obj.__name__ == 'EFPlugin')):
plugin_class = getattr(plugin_module, name)
plugin_instance = plugin_class(context=context_obj, clients=boto3_clients)
if (plugin_instance.service == service_name):
print_if_verbose("plugin '{}' loaded".format(plugin_name))
if (not context_obj.commit):
print_if_verbose('dryrun: skipping plugin execution.')
else:
try:
plugin_instance.run()
except AttributeError:
print("error executing plugin '{}'".format(modname))
|
Executes all loaded plugins designated for the service calling the function.
Args:
context_obj (obj:EFContext): The EFContext object created by the service.
boto3_clients (dict): Dictionary of boto3 clients created by ef_utils.create_aws_clients()
|
codesearchnet
|
def _ParseFileEntry(self, knowledge_base, file_entry):
file_object = file_entry.GetFileObject()
try:
self._ParseFileData(knowledge_base, file_object)
finally:
file_object.close()
|
Parses a file entry for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
PreProcessFail: if the preprocessing fails.
|
juraj-google-style
|
def add(self, private_key):
if (not isinstance(private_key, PaillierPrivateKey)):
raise TypeError(('private_key should be of type PaillierPrivateKey, not %s' % type(private_key)))
self.__keyring[private_key.public_key] = private_key
|
Add a key to the keyring.
Args:
private_key (PaillierPrivateKey): a key to add to this keyring.
|
codesearchnet
|
def parse_results_mol2(mol2_outpath):
docked_ligands = pd.DataFrame()
lines = [line.strip() for line in open(mol2_outpath, 'r')]
props = {}
for i, line in enumerate(lines):
if line.startswith('
ligand = line.strip().strip('
line = lines[i + 1]
props = {}
props['Ligand'] = ligand
if line.startswith('
splitter = line.strip().strip('
props[splitter[0]] = float(splitter[1])
if line.startswith('@<TRIPOS>MOLECULE'):
if props:
docked_ligands = docked_ligands.append(props, ignore_index=True)
return docked_ligands
|
Parse a DOCK6 mol2 output file, return a Pandas DataFrame of the results.
Args:
mol2_outpath (str): Path to mol2 output file
Returns:
DataFrame: Pandas DataFrame of the results
|
juraj-google-style
|
def get_head_mask(self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool=False) -> Tensor:
if head_mask is not None:
head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
if is_attention_chunked is True:
head_mask = head_mask.unsqueeze(-1)
else:
head_mask = [None] * num_hidden_layers
return head_mask
|
Prepare the head mask if needed.
Args:
head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*):
The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
num_hidden_layers (`int`):
The number of hidden layers in the model.
is_attention_chunked (`bool`, *optional*, defaults to `False`):
Whether or not the attentions scores are computed by chunks or not.
Returns:
`torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with
`[None]` for each layer.
|
github-repos
|
def fit(self, X, *args, **kwargs):
self.constant_value = self._get_constant_value(X)
if (self.constant_value is None):
if self.unfittable_model:
self.model = getattr(scipy.stats, self.model_class)(*args, **kwargs)
else:
self.model = getattr(scipy.stats, self.model_class)(X, *args, **kwargs)
for name in self.METHOD_NAMES:
attribute = getattr(self.__class__, name)
if isinstance(attribute, str):
setattr(self, name, getattr(self.model, attribute))
elif (attribute is None):
setattr(self, name, missing_method_scipy_wrapper((lambda x: x)))
else:
self._replace_constant_methods()
self.fitted = True
|
Fit scipy model to an array of values.
Args:
X(`np.ndarray` or `pd.DataFrame`): Datapoints to be estimated from. Must be 1-d
Returns:
None
|
codesearchnet
|
class QuantAct(nn.Module):
def __init__(self, activation_bit, act_range_momentum=0.95, per_channel=False, channel_len=None, quant_mode=False):
super().__init__()
self.activation_bit = activation_bit
self.act_range_momentum = act_range_momentum
self.quant_mode = quant_mode
self.per_channel = per_channel
self.percentile = False
self.act_function = SymmetricQuantFunction.apply
if not self.per_channel:
self.register_buffer('x_min', torch.zeros(1))
self.register_buffer('x_max', torch.zeros(1))
self.register_buffer('act_scaling_factor', torch.zeros(1))
self.x_min -= 1e-05
self.x_max += 1e-05
else:
raise NotImplementedError('per-channel mode is not currently supported for activation.')
def __repr__(self):
return f'{self.__class__.__name__}(activation_bit={self.activation_bit}, quant_mode: {self.quant_mode}, Act_min: {self.x_min.item():.2f}, Act_max: {self.x_max.item():.2f})'
def forward(self, x, pre_act_scaling_factor=None, identity=None, identity_scaling_factor=None, specified_min=None, specified_max=None):
x_act = x if identity is None else identity + x
if self.training:
assert not self.percentile, 'percentile mode is not currently supported for activation.'
assert not self.per_channel, 'per-channel mode is not currently supported for activation.'
x_min = x_act.data.min()
x_max = x_act.data.max()
assert x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0, 'NaN detected when computing min/max of the activation'
if self.x_min.min() > -1.1e-05 and self.x_max.max() < 1.1e-05:
self.x_min = self.x_min + x_min
self.x_max = self.x_max + x_max
elif self.act_range_momentum == -1:
self.x_min = torch.min(self.x_min, x_min)
self.x_max = torch.max(self.x_max, x_max)
else:
self.x_min = self.x_min * self.act_range_momentum + x_min * (1 - self.act_range_momentum)
self.x_max = self.x_max * self.act_range_momentum + x_max * (1 - self.act_range_momentum)
if not self.quant_mode:
return (x_act, None)
x_min = self.x_min if specified_min is None else specified_min
x_max = self.x_max if specified_max is None else specified_max
self.act_scaling_factor = symmetric_linear_quantization_params(self.activation_bit, x_min, x_max, per_channel=self.per_channel)
if pre_act_scaling_factor is None:
quant_act_int = self.act_function(x, self.activation_bit, self.percentile, self.act_scaling_factor)
else:
quant_act_int = FixedPointMul.apply(x, pre_act_scaling_factor, self.activation_bit, self.act_scaling_factor, identity, identity_scaling_factor)
correct_output_scale = self.act_scaling_factor.view(-1)
return (quant_act_int * correct_output_scale, self.act_scaling_factor)
|
Quantizes the given activation.
Args:
activation_bit (`int`):
Bitwidth for the quantized activation.
act_range_momentum (`float`, *optional*, defaults to `0.95`):
Momentum for updating the activation quantization range.
per_channel (`bool`, *optional*, defaults to `False`):
Whether to or not use channel-wise quantization.
channel_len (`int`, *optional*):
Specify the channel length when set the *per_channel* True.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
|
github-repos
|
def action_size(self) -> Sequence[Sequence[int]]:
fluents = self.domain.action_fluents
ordering = self.domain.action_fluent_ordering
return self._fluent_size(fluents, ordering)
|
The size of each action fluent in canonical order.
Returns:
Sequence[Sequence[int]]: A tuple of tuple of integers
representing the shape and size of each fluent.
|
codesearchnet
|
def signature(cert, sig, body):
body = six.b(body)
sig = base64.decodestring(sig)
padder = padding.PKCS1v15()
public_key = cert.public_key()
try:
public_key.verify(sig, body, padder, hashes.SHA1())
return True
except InvalidSignature:
warnings.warn('Signature verification failed.')
return False
|
Validate data request signature.
See `validate.request` for additional info.
Args:
cert: cryptography.hazmat.backends.openssl.x509._Certificate. The Amazon
signing certificate.
sig: str. Signature header value sent by request.
body: str. HTTPS request body.
Returns:
bool: True if valid, False otherwise.
|
juraj-google-style
|
def _ws_on_error(self, ws: websocket.WebSocketApp, error: Exception):
self.logger.error(f'Got error from websocket connection: {str(error)}')
|
Callback for receiving errors from the websocket connection
Args:
ws: websocket connection
error: exception raised
|
juraj-google-style
|
def _AttemptAutoDetectTagFile(self, analysis_mediator):
self._autodetect_tag_file_attempt = True
if (not analysis_mediator.data_location):
return False
operating_system = analysis_mediator.operating_system.lower()
filename = self._OS_TAG_FILES.get(operating_system, None)
if (not filename):
return False
logger.info('Using auto detected tag file: {0:s}'.format(filename))
tag_file_path = os.path.join(analysis_mediator.data_location, filename)
self.SetAndLoadTagFile(tag_file_path)
return True
|
Detects which tag file is most appropriate.
Args:
analysis_mediator (AnalysisMediator): analysis mediator.
Returns:
bool: True if a tag file is autodetected.
|
codesearchnet
|
def gets(self, key, default=None, cas_default=None):
defaults = (default, cas_default)
return self._fetch_cmd(b'gets', [key], True).get(key, defaults)
|
The memcached "gets" command for one key, as a convenience.
Args:
key: str, see class docs for details.
default: value that will be returned if the key was not found.
cas_default: same behaviour as default argument.
Returns:
A tuple of (value, cas)
or (default, cas_defaults) if the key was not found.
|
codesearchnet
|
class RemoteMonitor(Callback):
def __init__(self, root='http:
super(RemoteMonitor, self).__init__()
self.root = root
self.path = path
self.field = field
self.headers = headers
self.send_as_json = send_as_json
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
if isinstance(v, (np.ndarray, np.generic)):
send[k] = v.item()
else:
send[k] = v
try:
if self.send_as_json:
requests.post(self.root + self.path, json=send, headers=self.headers)
else:
requests.post(self.root + self.path, {self.field: json.dumps(send)}, headers=self.headers)
except requests.exceptions.RequestException:
logging.warning('Warning: could not reach RemoteMonitor root server at ' + str(self.root))
|
Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
If `send_as_json=True`, the content type of the request will be
`"application/json"`.
Otherwise the serialized JSON will be sent within a form.
Args:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
The field is used only if the payload is sent within a form
(i.e. send_as_json is set to False).
headers: Dictionary; optional custom HTTP headers.
send_as_json: Boolean; whether the request should be
sent as `"application/json"`.
|
github-repos
|
def upload_file(self, local_file, dest_path, mimetype):
self.__validate_storage_path(dest_path)
if dest_path.endswith('/'):
raise StorageArgumentException('Must specify target file name in dest_path argument')
if local_file.endswith(os.path.sep):
raise StorageArgumentException('Must specify source file name in local_file argument, directory upload not supported')
new_file = self.api_client.create_file(name=dest_path.split('/').pop(), content_type=mimetype, parent=self.get_parent(dest_path)['uuid'])
etag = self.api_client.upload_file_content(new_file['uuid'], source=local_file)
new_file['etag'] = etag
return new_file
|
Upload local file content to a storage service destination folder.
Args:
local_file(str)
dest_path(str):
absolute Storage service path '/project' prefix is essential
suffix should be the name the file will have on in the destination folder
i.e.: /project/folder/.../file_name
mimetype(str): set the contentType attribute
Returns:
The uuid of created file entity as string
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
|
codesearchnet
|
def nearest_neighbor(self, x, means):
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True)
scalar_prod = tf.matmul(
tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1]))
scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2])
dist = x_norm_sq + tf.transpose(
means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod
if self.hparams.soft_em:
nearest_idx = tf.stack(
[
tf.multinomial(
-dist[:, i, :], num_samples=self.hparams.num_samples)
for i in range(self.hparams.num_blocks)
],
axis=1)
nearest_hot = tf.one_hot(nearest_idx, depth=self.hparams.block_v_size)
nearest_hot = tf.reduce_mean(nearest_hot, axis=-2)
else:
if self.hparams.random_top_k > 1:
_, top_k_idx = tf.nn.top_k(-dist, k=self.hparams.random_top_k)
nearest_idx = tf.gather(
top_k_idx,
tf.random_uniform(
[1],
minval=0,
maxval=self.hparams.random_top_k - 1,
dtype=tf.int32),
axis=-1)
else:
if self.hparams.use_scales:
dist /= tf.reshape(self.hparams.scales,
[1, 1, self.hparams.moe_num_experts])
nearest_idx = tf.argmax(-dist, axis=-1)
nearest_hot = tf.one_hot(nearest_idx, self.hparams.block_v_size)
return nearest_hot
|
Find the nearest element in means to elements in x.
Args:
x: Batch of encoder continuous latent states sliced/projected into
shape [-1, num_blocks, block_dim].
means: Embedding means of shape.
Returns:
Tensor with nearest element in mean encoded in one-hot notation.
|
juraj-google-style
|
def qualified_name(self):
idxstr = ('' if (self.index is None) else str(self.index))
return ('%s[%s]' % (self.qualified_package_name, idxstr))
|
Get the qualified name of the variant.
Returns:
str: Name of the variant with version and index, eg "maya-2016.1[1]".
|
codesearchnet
|
def flatten(self, in_place=True):
new_dataset = TaskData()
for (i, dataset) in enumerate(self._datasets):
if (i != self._default_index):
new_dataset.merge(dataset)
new_dataset.merge(self.default_dataset)
new_aliases = {alias: 0 for (alias, _) in self._aliases.items()}
if in_place:
self._datasets = [new_dataset]
self._aliases = new_aliases
self._default_index = 0
else:
return MultiTaskData(dataset=new_dataset, aliases=list(new_aliases.keys()))
|
Merge all datasets into a single dataset.
The default dataset is the last dataset to be merged, as it is considered to be
the primary source of information and should overwrite all existing fields with
the same key.
Args:
in_place (bool): Set to ``True`` to replace the existing datasets with the
merged one. If set to ``False``, will return a new MultiTaskData
object containing the merged dataset.
Returns:
MultiTaskData: If the in_place flag is set to False.
|
codesearchnet
|
def AddColumn(self, column, default="", col_index=-1):
if column in self.table:
raise TableError("Column %r already in table." % column)
if col_index == -1:
self._table[0][column] = column
for i in range(1, len(self._table)):
self._table[i][column] = default
else:
self._table[0].Insert(column, column, col_index)
for i in range(1, len(self._table)):
self._table[i].Insert(column, default, col_index)
|
Appends a new column to the table.
Args:
column: A string, name of the column to add.
default: Default value for entries. Defaults to ''.
col_index: Integer index for where to insert new column.
Raises:
TableError: Column name already exists.
|
juraj-google-style
|
def bitwise_right_shift(x, y):
if any_symbolic_tensors((x, y)):
return BitwiseRightShift().symbolic_call(x, y)
return backend.numpy.bitwise_right_shift(x, y)
|
Shift the bits of an integer to the right.
Bits are shifted to the right `y`. Because the internal representation of
numbers is in binary format, this operation is equivalent to dividing `x` by
`2**y`.
Args:
x: Input integer tensor.
y: Input integer tensor.
Returns:
Result tensor.
|
github-repos
|
def search(cls, term, fields=()):
if (not any((cls._meta.search_fields, fields))):
raise AttributeError("A list of searchable fields must be provided in the class's search_fields or provided to this function in the `fields` kwarg.")
if (not fields):
fields = cls._meta.search_fields
query = cls.select()
like_term = ''.join((term, '%'))
full_like_term = ''.join(('%', term, '%'))
order_by = []
clauses = []
for field_name in fields:
field = getattr(cls, field_name)
clauses.append((((field == term) | (field ** like_term)) | (field ** full_like_term)))
order_by.append(case(None, (((field == term), 0), ((field ** like_term), 1), ((field ** full_like_term), 2)), default=3).asc())
query = query.where(reduce(operator.or_, clauses))
query = query.order_by(*order_by)
return query
|
Generic SQL search function that uses SQL ``LIKE`` to search the
database for matching records. The records are sorted by their
relavancey to the search term.
The query searches and sorts on the folling criteria, in order, where
the target string is ``exactly``:
1. Straight equality (``x = 'exactly'``)
2. Right hand ``LIKE`` (``x LIKE 'exact%'``)
3. Substring ``LIKE`` (``x LIKE %act%``)
Args:
term (str): The search term to apply to the query.
Keyword Args:
fields (list|tuple|None): An optional list of fields to apply the
search to. If not provided, the class variable
``Meta.search_fields`` will be used by default.
Returns:
peewee.SelectQuery: An unexecuted query for the records.
Raises:
AttributeError: Raised if `search_fields` isn't defined in the
class and `fields` aren't provided for the function.
|
codesearchnet
|
def add_electrode(self, electrode, label=None):
if not label:
label = "Electrode {}".format(len(self._electrodes) + 1)
self._electrodes[label] = electrode
|
Add an electrode to the plot.
Args:
electrode: An electrode. All electrodes satisfying the
AbstractElectrode interface should work.
label: A label for the electrode. If None, defaults to a counting
system, i.e. 'Electrode 1', 'Electrode 2', ...
|
juraj-google-style
|
def encrypt(self, message, public_key):
max_str_len = (rsa.common.byte_size(public_key.n) - 11)
if (len(message) > max_str_len):
message = textwrap.wrap(message, width=max_str_len)
else:
message = [message]
enc_msg = []
for line in message:
enc_line = rsa.encrypt(line, public_key)
enc_line_converted = binascii.b2a_base64(enc_line)
enc_msg.append(enc_line_converted)
enc_msg = json.dumps(enc_msg)
return enc_msg
|
Encrypts a string using a given rsa.PublicKey object. If the message
is larger than the key, it will split it up into a list and encrypt
each line in the list.
Args:
message (string): The string to encrypt.
public_key (rsa.PublicKey): The key object used to encrypt the
message. Only the paired private key can decrypt it.
Returns:
A json string of the list of encrypted lines of the message.
|
codesearchnet
|
def _create_mlir_loc(self, loc):
if loc is not None and loc.loc.filename:
file_name = os.path.basename(loc.loc.filename)
return 'loc("{}":{}:{})'.format(file_name, loc.loc.lineno, loc.loc.col_offset)
else:
return 'loc(unknown)'
|
Creates mlir location from autograph ORIGIN value.
Args:
loc: OriginInfo
Returns:
A serialized mlir location string.
|
github-repos
|
def _base_expansion_size(num, bases):
return np.floor(np.log(num) / np.log(bases)) + 1
|
Computes the number of terms in the place value expansion.
Let num = a0 + a1 b + a2 b^2 + ... ak b^k be the place value expansion of
`num` in base b (ak <> 0). This function computes and returns `k+1` for each
base `b` specified in `bases`.
This can be inferred from the base `b` logarithm of `num` as follows:
$$k = Floor(log_b (num)) + 1 = Floor( log(num) / log(b)) + 1$$
Args:
num: Scalar numpy array of dtype either `float32` or `float64`. The number
to compute the base expansion size of.
bases: Numpy array of the same dtype as num. The bases to compute the size
against.
Returns:
Tensor of same dtype and shape as `bases` containing the size of num when
written in that base.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.