code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def convert_avgpool(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting pooling ...')
if names == 'short':
tf_name = 'P' + random_string(7)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
if 'kernel_shape' in params:
height, width = params['kernel_shape']
else:
height, width = params['kernel_size']
if 'strides' in params:
stride_height, stride_width = params['strides']
else:
stride_height, stride_width = params['stride']
if 'pads' in params:
padding_h, padding_w, _, _ = params['pads']
else:
padding_h, padding_w = params['padding']
input_name = inputs[0]
pad = 'valid'
if height % 2 == 1 and width % 2 == 1 and \
height
stride_height == 1 and stride_width == 1:
pad = 'same'
else:
padding_name = tf_name + '_pad'
padding_layer = keras.layers.ZeroPadding2D(
padding=(padding_h, padding_w),
name=padding_name
)
layers[padding_name] = padding_layer(layers[inputs[0]])
input_name = padding_name
pooling = keras.layers.AveragePooling2D(
pool_size=(height, width),
strides=(stride_height, stride_width),
padding=pad,
name=tf_name,
data_format='channels_first'
)
layers[scope_name] = pooling(layers[input_name]) | Convert Average pooling.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers | juraj-google-style |
def __init__(self, interval=3600):
self.interval = interval
self.start_time = datetime.datetime.now()
self.chk_counter = 0 | Initializes the handler with an interval.
Args:
interval (int): Interval at which to checkpoint in seconds.
Defaults to 3600 (1 hr). | juraj-google-style |
def inv_logistic(y: Union[float, np.ndarray],
k: float,
theta: float) -> Optional[float]:
r
if y is None or k is None or theta is None:
return None
return (np.log((1 / y) - 1) / -k) + theta | r"""
Inverse standard logistic function:
.. math::
x = ( log( \frac {1} {y} - 1) / -k ) + \theta
Args:
y: :math:`y`
k: :math:`k`
theta: :math:`\theta`
Returns:
:math:`x` | juraj-google-style |
def sign(self, private_keys):
if ((private_keys is None) or (not isinstance(private_keys, list))):
raise TypeError('`private_keys` must be a list instance')
def gen_public_key(private_key):
public_key = private_key.get_verifying_key().encode()
return public_key.decode()
key_pairs = {gen_public_key(PrivateKey(private_key)): PrivateKey(private_key) for private_key in private_keys}
tx_dict = self.to_dict()
tx_dict = Transaction._remove_signatures(tx_dict)
tx_serialized = Transaction._to_str(tx_dict)
for (i, input_) in enumerate(self.inputs):
self.inputs[i] = self._sign_input(input_, tx_serialized, key_pairs)
self._hash()
return self | Fulfills a previous Transaction's Output by signing Inputs.
Note:
This method works only for the following Cryptoconditions
currently:
- Ed25519Fulfillment
- ThresholdSha256
Furthermore, note that all keys required to fully sign the
Transaction have to be passed to this method. A subset of all
will cause this method to fail.
Args:
private_keys (:obj:`list` of :obj:`str`): A complete list of
all private keys needed to sign all Fulfillments of this
Transaction.
Returns:
:class:`~bigchaindb.common.transaction.Transaction` | codesearchnet |
def begin_abort(self, root_pipeline_key, abort_message):
def txn():
pipeline_record = db.get(root_pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to abort root pipeline ID "%s" but it does not exist.',
root_pipeline_key.name())
raise db.Rollback()
if pipeline_record.status == _PipelineRecord.ABORTED:
logging.warning(
'Tried to abort root pipeline ID "%s"; already in state: %s',
root_pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
if pipeline_record.abort_requested:
logging.warning(
'Tried to abort root pipeline ID "%s"; abort signal already sent.',
root_pipeline_key.name())
raise db.Rollback()
pipeline_record.abort_requested = True
pipeline_record.abort_message = abort_message
pipeline_record.put()
task = taskqueue.Task(
url=self.fanout_abort_handler_path,
params=dict(root_pipeline_key=root_pipeline_key))
task.add(queue_name=self.queue_name, transactional=True)
return True
return db.run_in_transaction(txn) | Kicks off the abort process for a root pipeline and all its children.
Args:
root_pipeline_key: db.Key of the root pipeline to abort.
abort_message: Message explaining why the abort happened, only saved
into the root pipeline.
Returns:
True if the abort signal was sent successfully; False otherwise. | juraj-google-style |
def base_name_from_image(image):
m = re.match('^(.+/)?([^:/]+)(:[^:]+)?$', image)
algo_name = (m.group(2) if m else image)
return algo_name | Extract the base name of the image to use as the 'algorithm name' for the job.
Args:
image (str): Image name.
Returns:
str: Algorithm name, as extracted from the image name. | codesearchnet |
def get_stored_version(connection):
if connection.engine.name == 'sqlite':
version = connection.execute('PRAGMA user_version').fetchone()[0]
if version == 0:
raise VersionIsNotStored
return version
elif connection.engine.name == 'postgresql':
try:
r = connection\
.execute('SELECT version FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME))\
.fetchone()
if not r:
raise VersionIsNotStored
version = r[0]
except ProgrammingError:
raise VersionIsNotStored
return version
else:
raise DatabaseError('Do not know how to get version from {} engine.'.format(connection.engine.name)) | Returns database version.
Args:
connection (sqlalchemy connection):
Raises: Assuming user_version pragma (sqlite case) and user_version table (postgresql case)
exist because they created with the database creation.
Returns:
int: version of the database. | juraj-google-style |
def get_metrics_namespace(self) -> str:
return 'BeamML_TF_Tensor' | Returns:
A namespace for metrics collected by the RunInference transform. | github-repos |
def __contains__(self, func):
return any((func is mw.func) or (mw.is_subchain and func in mw.func)
for mw in self.mw_list) | Returns whether the function is stored anywhere in the middleware chain.
This runs recursively though any subchains.
Args:
func (callable): A function which may be present in the chain
Returns:
bool: True if func is a function contained anywhere in the chain. | juraj-google-style |
def _remove_lines(self, lines, sublist_lengths, num_to_remove):
curr = 0
result = []
for offset in sublist_lengths:
end = curr + offset
start = min(curr + num_to_remove, end)
result += lines[start:end]
curr += offset
return result | Utility function to remove num_to_remove lines from each sublist.
Args:
lines: list of items.
sublist_lengths: list of integers representing length of sublist
corresponding to each source file.
num_to_remove: number of lines to remove from each sublist.
Returns:
remaining lines. | github-repos |
def add_transcript(self, transcript):
logger.debug('Adding transcript {0} to variant {1}'.format(transcript, self['variant_id']))
self['transcripts'].append(transcript) | Add the information transcript
This adds a transcript dict to variant['transcripts']
Args:
transcript (dict): A transcript dictionary | codesearchnet |
def create_assembly(self, did, wid, name='My Assembly'):
payload = {
'name': name
}
return self._api.request('post', '/api/assemblies/d/' + did + '/w/' + wid, body=payload) | Creates a new assembly element in the specified document / workspace.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- name (str, default='My Assembly')
Returns:
- requests.Response: Onshape response data | juraj-google-style |
def cds_score(self, x_te, y_te):
if type(x_te) == np.ndarray:
x_te, y_te = pd.Series(x_te.reshape(-1)), pd.Series(y_te.reshape(-1))
xd, yd = discretized_sequences(x_te, y_te, self.ffactor, self.maxdev)
cx = Counter(xd)
cy = Counter(yd)
yrange = sorted(cy.keys())
ny = len(yrange)
py = np.array([cy[i] for i in yrange], dtype=float)
py = py / py.sum()
pyx = []
for a in cx:
if cx[a] > self.minc:
yx = y_te[xd == a]
if count_unique(y_te) > len_discretized_values(y_te, "Numerical", self.ffactor, self.maxdev):
yx = (yx - np.mean(yx)) / np.std(y_te)
yx = discretized_sequence(yx, "Numerical", self.ffactor, self.maxdev, norm=False)
cyx = Counter(yx.astype(int))
pyxa = np.array([cyx[i] for i in discretized_values(y_te, "Numerical", self.ffactor, self.maxdev)],
dtype=float)
else:
cyx = Counter(yx)
pyxa = [cyx[i] for i in yrange]
pyxax = np.array([0] * (ny - 1) + pyxa + [0] * (ny - 1), dtype=float)
xcorr = [sum(py * pyxax[i:i + ny]) for i in range(2 * ny - 1)]
imax = xcorr.index(max(xcorr))
pyxa = np.array([0] * (2 * ny - 2 - imax) + pyxa + [0] * imax, dtype=float)
assert pyxa.sum() == cx[a]
pyxa = pyxa / pyxa.sum()
pyx.append(pyxa)
if len(pyx) == 0:
return 0
pyx = np.array(pyx)
pyx = pyx - pyx.mean(axis=0)
return np.std(pyx) | Computes the cds statistic from variable 1 to variable 2
Args:
x_te (numpy.ndarray): Variable 1
y_te (numpy.ndarray): Variable 2
Returns:
float: CDS fit score | juraj-google-style |
def remove_list_duplicates(lista, unique=False):
result = []
allready = []
for elem in lista:
if elem not in result:
result.append(elem)
else:
allready.append(elem)
if unique:
for elem in allready:
result = list(filter((elem).__ne__, result))
return result | Remove duplicated elements in a list.
Args:
lista: List with elements to clean duplicates. | juraj-google-style |
def __init__(self, skype=None):
self.skype = skype
self.synced = False
self.cache = {} | Create a new container object. The :attr:`synced` state and internal :attr:`cache` are initialised here.
Args:
skype (Skype): parent Skype instance | juraj-google-style |
def __init__(self, ad):
super(Sl4aClient, self).__init__(app_name=_APP_NAME, ad=ad)
self._ad = ad
self.ed = None
self._adb = ad.adb | Initializes an Sl4aClient.
Args:
ad: AndroidDevice object. | juraj-google-style |
def filesizes(images):
while True:
img = (yield marv.pull(images))
if (img is None):
break
(yield marv.push(img.size)) | Stat filesize of files.
Args:
images: stream of marv image files
Returns:
Stream of filesizes | codesearchnet |
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(GetAttributeListRequestPayload, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):
self._unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
self._unique_identifier.read(
local_buffer,
kmip_version=kmip_version
)
else:
self._unique_identifier = None
self.is_oversized(local_buffer) | Read the data encoding the GetAttributeList request payload and decode
it into its constituent parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0. | juraj-google-style |
def truncate_rationale(rationale, max_length=MAX_RATIONALE_SIZE_IN_EVENT):
if isinstance(rationale, basestring) and max_length is not None and len(rationale) > max_length:
return rationale[0:max_length], True
else:
return rationale, False | Truncates the rationale for analytics event emission if necessary
Args:
rationale (string): the string value of the rationale
max_length (int): the max length for truncation
Returns:
truncated_value (string): the possibly truncated version of the rationale
was_truncated (bool): returns true if the rationale is truncated | juraj-google-style |
def greedy_coloring(adj):
coloring = {}
colors = {}
possible_colors = {n: set(range(len(adj))) for n in adj}
while possible_colors:
n = min(possible_colors, key=lambda n: len(possible_colors[n]))
color = min(possible_colors[n])
coloring[n] = color
if color not in colors:
colors[color] = {n}
else:
colors[color].add(n)
for neighbor in adj[n]:
if neighbor in possible_colors and color in possible_colors[neighbor]:
possible_colors[neighbor].remove(color)
del possible_colors[n]
return coloring, colors | Determines a vertex coloring.
Args:
adj (dict): The edge structure of the graph to be colored.
`adj` should be of the form {node: neighbors, ...} where
neighbors is a set.
Returns:
dict: the coloring {node: color, ...}
dict: the colors {color: [node, ...], ...}
Note:
This is a greedy heuristic: the resulting coloring is not
necessarily minimal. | juraj-google-style |
def _load_credentials_from_file(filename):
if (not os.path.exists(filename)):
raise exceptions.DefaultCredentialsError('File {} was not found.'.format(filename))
with io.open(filename, 'r') as file_obj:
try:
info = json.load(file_obj)
except ValueError as caught_exc:
new_exc = exceptions.DefaultCredentialsError('File {} is not a valid json file.'.format(filename), caught_exc)
six.raise_from(new_exc, caught_exc)
credential_type = info.get('type')
if (credential_type == _AUTHORIZED_USER_TYPE):
from google.auth import _cloud_sdk
try:
credentials = _cloud_sdk.load_authorized_user_credentials(info)
except ValueError as caught_exc:
msg = 'Failed to load authorized user credentials from {}'.format(filename)
new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
six.raise_from(new_exc, caught_exc)
_warn_about_problematic_credentials(credentials)
return (credentials, None)
elif (credential_type == _SERVICE_ACCOUNT_TYPE):
from google.oauth2 import service_account
try:
credentials = service_account.Credentials.from_service_account_info(info)
except ValueError as caught_exc:
msg = 'Failed to load service account credentials from {}'.format(filename)
new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
six.raise_from(new_exc, caught_exc)
return (credentials, info.get('project_id'))
else:
raise exceptions.DefaultCredentialsError('The file {file} does not have a valid type. Type is {type}, expected one of {valid_types}.'.format(file=filename, type=credential_type, valid_types=_VALID_TYPES)) | Loads credentials from a file.
The credentials file must be a service account key or stored authorized
user credentials.
Args:
filename (str): The full path to the credentials file.
Returns:
Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded
credentials and the project ID. Authorized user credentials do not
have the project ID information.
Raises:
google.auth.exceptions.DefaultCredentialsError: if the file is in the
wrong format or is missing. | codesearchnet |
class PipedPipelineDataFormat(PipelineDataFormat):
def __iter__(self):
for line in sys.stdin:
if '\t' in line:
line = line.split('\t')
if self.column:
yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}
else:
yield tuple(line)
else:
yield line
def save(self, data: dict):
print(data)
def save_binary(self, data: Union[dict, List[dict]]) -> str:
if self.output_path is None:
raise KeyError('When using piped input on pipeline outputting large object requires an output file path. Please provide such output path through --output argument.')
return super().save_binary(data) | Read data from piped input to the python process. For multi columns data, columns should separated by
If columns are provided, then the output will be a dictionary with {column_x: value_x}
Args:
output_path (`str`): Where to save the outgoing data.
input_path (`str`): Where to look for the input data.
column (`str`): The column to read.
overwrite (`bool`, *optional*, defaults to `False`):
Whether or not to overwrite the `output_path`. | github-repos |
def write(self, output='jsonstat'):
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'") | Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter. | juraj-google-style |
def ParseTextToDicts(self, *args, **kwargs):
result_lists = self.ParseText(*args, **kwargs)
result_dicts = []
for row in result_lists:
result_dicts.append(dict(zip(self.header, row)))
return result_dicts | Calls ParseText and turns the result into list of dicts.
List items are dicts of rows, dict key is column header and value is column
value.
Args:
text: (str), Text to parse with embedded newlines.
eof: (boolean), Set to False if we are parsing only part of the file.
Suppresses triggering EOF state.
Raises:
TextFSMError: An error occurred within the FSM.
Returns:
List of dicts. | juraj-google-style |
def GetFileSystemReferenceCount(self, path_spec):
identifier = self._GetFileSystemCacheIdentifier(path_spec)
cache_value = self._file_system_cache.GetCacheValue(identifier)
if not cache_value:
return None
return cache_value.reference_count | Retrieves the reference count of a cached file system object.
Args:
path_spec (PathSpec): path specification.
Returns:
int: reference count or None if there is no file system object for
the corresponding path specification cached. | juraj-google-style |
def ReceiveMessagesRelationalFlows(self, client_id, messages):
now = time.time()
unprocessed_msgs = []
message_handler_requests = []
dropped_count = 0
for (session_id, msgs) in iteritems(collection.Group(messages, operator.attrgetter('session_id'))):
leftover_msgs = self.HandleWellKnownFlows(msgs)
for msg in leftover_msgs:
if ((msg.auth_state != msg.AuthorizationState.AUTHENTICATED) and (msg.session_id != self.unauth_allowed_session_id)):
dropped_count += 1
continue
if (session_id in queue_manager.session_id_map):
message_handler_requests.append(rdf_objects.MessageHandlerRequest(client_id=msg.source.Basename(), handler_name=queue_manager.session_id_map[session_id], request_id=msg.response_id, request=msg.payload))
else:
unprocessed_msgs.append(msg)
if dropped_count:
logging.info('Dropped %d unauthenticated messages for %s', dropped_count, client_id)
if unprocessed_msgs:
flow_responses = []
for message in unprocessed_msgs:
flow_responses.append(rdf_flow_objects.FlowResponseForLegacyResponse(message))
data_store.REL_DB.WriteFlowResponses(flow_responses)
for msg in unprocessed_msgs:
if (msg.type == rdf_flows.GrrMessage.Type.STATUS):
stat = rdf_flows.GrrStatus(msg.payload)
if (stat.status == rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED):
crash_details = rdf_client.ClientCrash(client_id=client_id, session_id=msg.session_id, backtrace=stat.backtrace, crash_message=stat.error_message, nanny_status=stat.nanny_status, timestamp=rdfvalue.RDFDatetime.Now())
events.Events.PublishEvent('ClientCrash', crash_details, token=self.token)
if message_handler_requests:
data_store.REL_DB.WriteMessageHandlerRequests(message_handler_requests)
logging.debug('Received %s messages from %s in %s sec', len(messages), client_id, (time.time() - now)) | Receives and processes messages for flows stored in the relational db.
Args:
client_id: The client which sent the messages.
messages: A list of GrrMessage RDFValues. | codesearchnet |
def Insert(self, request, global_params=None):
config = self.GetMethodConfig('Insert')
return self._RunMethod(config, request, global_params=global_params) | Creates a new, empty table in the dataset.
Args:
request: (BigqueryTablesInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message. | github-repos |
def ParseOptions(cls, options, configuration_object):
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
artifact_filters = cls._ParseStringOption(options, 'artifact_filter_string')
artifact_filters_file = cls._ParseStringOption(
options, 'artifact_filters_file')
filter_file = cls._ParseStringOption(options, 'file_filter')
if artifact_filters and artifact_filters_file:
raise errors.BadConfigOption(
'Please only specify artifact definition names in a file '
'or on the command line.')
if (artifact_filters_file or artifact_filters) and filter_file:
raise errors.BadConfigOption(
'Please do not specify both artifact definitions and legacy filters.')
if artifact_filters_file and os.path.isfile(artifact_filters_file):
with open(artifact_filters_file) as file_object:
file_content = file_object.read()
artifact_filters = file_content.splitlines()
elif artifact_filters:
artifact_filters = [name.strip() for name in artifact_filters.split(',')]
setattr(configuration_object, '_artifact_filters', artifact_filters) | Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
BadConfigOption: if the required artifact definitions are not defined. | juraj-google-style |
def invitation_backend(backend=None, namespace=None):
backend = backend or ORGS_INVITATION_BACKEND
class_module, class_name = backend.rsplit(".", 1)
mod = import_module(class_module)
return getattr(mod, class_name)(namespace=namespace) | Returns a specified invitation backend
Args:
backend: dotted path to the invitation backend class
namespace: URL namespace to use
Returns:
an instance of an InvitationBackend | juraj-google-style |
def __init__(self, direction, edge_name, optional=False, within_optional_scope=False):
super(Traverse, self).__init__(
direction, edge_name, optional=optional, within_optional_scope=within_optional_scope)
self.direction = direction
self.edge_name = edge_name
self.optional = optional
self.within_optional_scope = within_optional_scope
self.validate() | Create a new Traverse block in the given direction and across the given edge.
Args:
direction: string, 'in' or 'out'
edge_name: string obeying variable name rules (see validate_safe_string).
optional: optional bool, specifying whether the traversal to the given location
is optional (i.e. non-filtering) or mandatory (filtering).
Returns:
new Traverse object | juraj-google-style |
def main(argv=None):
args = parse_mobly_cli_args(argv)
test_class = _find_test_class()
if args.list_tests:
_print_test_names(test_class)
sys.exit(0)
test_configs = config_parser.load_test_config_file(args.config, args.test_bed)
tests = None
if args.tests:
tests = args.tests
console_level = logging.DEBUG if args.verbose else logging.INFO
ok = True
for config in test_configs:
runner = TestRunner(log_dir=config.log_path, testbed_name=config.testbed_name)
with runner.mobly_logger(console_level=console_level):
runner.add_test_class(config, test_class, tests)
try:
runner.run()
ok = runner.results.is_all_pass and ok
except signals.TestAbortAll:
pass
except Exception:
logging.exception('Exception when executing %s.', config.testbed_name)
ok = False
if not ok:
sys.exit(1) | Execute the test class in a test module.
This is the default entry point for running a test script file directly.
In this case, only one test class in a test script is allowed.
To make your test script executable, add the following to your file:
.. code-block:: python
from mobly import test_runner
...
if __name__ == '__main__':
test_runner.main()
If you want to implement your own cli entry point, you could use function
execute_one_test_class(test_class, test_config, test_identifier)
Args:
argv: A list that is then parsed as cli args. If None, defaults to cli
input. | github-repos |
def nb_r_deriv(r, data_row):
n = len(data_row)
d = sum(digamma(data_row + r)) - n*digamma(r) + n*np.log(r/(r+np.mean(data_row)))
return d | Derivative of log-likelihood wrt r (formula from wikipedia)
Args:
r (float): the R paramemter in the NB distribution
data_row (array): 1d array of length cells | juraj-google-style |
def transformer_text_encoder(inputs,
target_space,
hparams,
name=None):
with tf.variable_scope(name, default_name="transformer_text_encoder"):
inputs = common_layers.flatten4d3d(inputs)
[
encoder_input,
encoder_self_attention_bias,
ed,
] = transformer_layers.transformer_prepare_encoder(
inputs, target_space=target_space, hparams=hparams)
encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.dropout)
encoder_output = transformer_layers.transformer_encoder(
encoder_input, encoder_self_attention_bias, hparams)
return encoder_output, ed | Transformer text encoder over inputs with unmasked full attention.
Args:
inputs: Tensor of shape [batch, length, 1, hparams.hidden_size].
target_space: int. Used for encoding inputs under a target space id.
hparams: HParams.
name: string, variable scope.
Returns:
encoder_output: Tensor of shape [batch, length, hparams.hidden_size].
ed: Tensor of shape [batch, 1, 1, length]. Encoder-decoder attention bias
for any padded tokens. | juraj-google-style |
def insert(self, index, item):
if not self:
list.append(self, item)
elif item.__class__ == self[0].__class__:
list.insert(self, index, item)
else:
raise exceptions.WrongListItemType(item.__class__.__name__,
self[0].__class__.__name__) | Insert an item at the specified index.
Args:
index (int): Position to insert the item.
item: Item to be inserted.
Raises:
:exc:`~.exceptions.WrongListItemType`: If an item has a different
type than the first item to be stored. | juraj-google-style |
def from_audio_encoder_config(cls, audio_encoder_config: PretrainedConfig, **kwargs):
return cls(audio_encoder_config=audio_encoder_config.to_dict(), **kwargs) | Instantiate a [`MoshiConfig`] (or a derived class) from an audio encoder configuration.
Returns:
[`MoshiConfig`]: An instance of a configuration object | github-repos |
def ensure_resource_data(self, update_data=False):
if not any(key in self.data for key in self.UNIQUE_IDENTIFIERS):
raise exceptions.HPOneViewMissingUniqueIdentifiers(MISSING_UNIQUE_IDENTIFIERS)
if not update_data:
return
resource_data = None
if 'uri' in self.UNIQUE_IDENTIFIERS and self.data.get('uri'):
resource_data = self._helper.do_get(self.data['uri'])
else:
for identifier in self.UNIQUE_IDENTIFIERS:
identifier_value = self.data.get(identifier)
if identifier_value:
result = self.get_by(identifier, identifier_value)
if result and isinstance(result, list):
resource_data = result[0]
break
if resource_data:
self.data.update(resource_data)
else:
raise exceptions.HPOneViewResourceNotFound(RESOURCE_DOES_NOT_EXIST) | Retrieves data from OneView and updates resource object.
Args:
update_data: Flag to update resource data when it is required. | juraj-google-style |
def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0):
mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask
return incremental_indices + self.padding_idx | Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
input_ids: tf.Tensor
Returns: tf.Tensor | github-repos |
def get_change_point_config(params: Dict[str, Any]) -> ChangePointConfig:
return ChangePointConfig(min_runs_between_change_points=params.get('min_runs_between_change_points', constants._DEFAULT_MIN_RUNS_BETWEEN_CHANGE_POINTS), num_runs_in_change_point_window=params.get('num_runs_in_change_point_window', constants._DEFAULT_NUM_RUMS_IN_CHANGE_POINT_WINDOW)) | Args:
params: Dict containing parameters to run change point analysis.
Returns:
ChangePointConfig object containing change point analysis parameters. | github-repos |
def __init__(self, file_object, delete_tempfile=True, journal_mode="DELETE"):
self.file_object = file_object
self.journal_mode = journal_mode
if hasattr(self.file_object, "name"):
self.name = self.file_object.name
self._delete_file = False
else:
self._delete_file = delete_tempfile
with tempfile.NamedTemporaryFile(delete=False) as fd:
self.name = fd.name
data = file_object.read(65536)
while data:
fd.write(data)
data = file_object.read(65536) | Init.
Args:
file_object: A file like object.
delete_tempfile: If we create a tempfile, should we delete it when
we're done.
journal_mode: If set to "WAL" a "Write-Ahead Log" is created. | juraj-google-style |
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs):
if (tensors_to_log is None):
tensors_to_log = _TENSORS_TO_LOG
return tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=every_n_iter) | Function to get LoggingTensorHook.
Args:
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
**kwargs: a dictionary of arguments to LoggingTensorHook.
Returns:
Returns a LoggingTensorHook with a standard set of tensors that will be
printed to stdout. | codesearchnet |
def recoverURL(self, url):
self.setUserAgent()
if ('https:
self.setProxy(protocol='https')
else:
self.setProxy(protocol='http')
if ('.onion' in url):
try:
pass
except:
pass
url = url.replace('.onion', '.onion.cab')
try:
recurso = self.br.open(url)
except:
return None
html = recurso.read()
return html | Public method to recover a resource.
Args:
-----
url: The URL to be collected.
Returns:
--------
Returns a resource that has to be read, for instance, with html = self.br.read() | codesearchnet |
def skip(self, count, name=None) -> 'DatasetV2':
from tensorflow.python.data.ops import skip_op
return skip_op._skip(self, count, name) | Creates a `Dataset` that skips `count` elements from this dataset.
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.skip(7)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[7, 8, 9]
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be skipped to form the new dataset.
If `count` is greater than the size of this dataset, the new dataset
will contain no elements. If `count` is -1, skips the entire dataset.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above. | github-repos |
def create(self, request, desc, files, public=False):
request.data = json.dumps({
"description": desc,
"public": public,
"files": files,
})
return self.send(request).json()['html_url'] | Creates a gist
Arguments:
request: an initial request object
desc: the gist description
files: a list of files to add to the gist
public: a flag to indicate whether the gist is public or not
Returns:
The URL to the newly created gist. | juraj-google-style |
def compute_centroid(points):
lats = [p[1] for p in points]
lons = [p[0] for p in points]
return Point(np.mean(lats), np.mean(lons), None) | Computes the centroid of set of points
Args:
points (:obj:`list` of :obj:`Point`)
Returns:
:obj:`Point` | codesearchnet |
def get_asn_verbose_dns(self, asn=None):
if asn[0:2] != 'AS':
asn = 'AS{0}'.format(asn)
zone = '{0}.asn.cymru.com'.format(asn)
try:
log.debug('ASN verbose query for {0}'.format(zone))
data = self.dns_resolver.query(zone, 'TXT')
return str(data[0])
except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers,
dns.resolver.NoAnswer, dns.exception.Timeout) as e:
raise ASNLookupError(
'ASN lookup failed (DNS {0}) for {1}.'.format(
e.__class__.__name__, asn)
)
except:
raise ASNLookupError(
'ASN lookup failed for {0}.'.format(asn)
) | The function for retrieving the information for an ASN from
Cymru via port 53 (DNS). This is needed since IP to ASN mapping via
Cymru DNS does not return the ASN Description like Cymru Whois does.
Args:
asn (:obj:`str`): The AS number (required).
Returns:
str: The raw ASN data.
Raises:
ASNLookupError: The ASN lookup failed. | juraj-google-style |
def _initialize_global_state(self, redis_address, redis_password=None, timeout=20):
self.redis_client = services.create_redis_client(redis_address, redis_password)
start_time = time.time()
num_redis_shards = None
redis_shard_addresses = []
while ((time.time() - start_time) < timeout):
num_redis_shards = self.redis_client.get('NumRedisShards')
if (num_redis_shards is None):
print('Waiting longer for NumRedisShards to be populated.')
time.sleep(1)
continue
num_redis_shards = int(num_redis_shards)
if (num_redis_shards < 1):
raise Exception('Expected at least one Redis shard, found {}.'.format(num_redis_shards))
redis_shard_addresses = self.redis_client.lrange('RedisShards', start=0, end=(- 1))
if (len(redis_shard_addresses) != num_redis_shards):
print('Waiting longer for RedisShards to be populated.')
time.sleep(1)
continue
break
if ((time.time() - start_time) >= timeout):
raise Exception('Timed out while attempting to initialize the global state. num_redis_shards = {}, redis_shard_addresses = {}'.format(num_redis_shards, redis_shard_addresses))
self.redis_clients = []
for shard_address in redis_shard_addresses:
self.redis_clients.append(services.create_redis_client(shard_address.decode(), redis_password)) | Initialize the GlobalState object by connecting to Redis.
It's possible that certain keys in Redis may not have been fully
populated yet. In this case, we will retry this method until they have
been populated or we exceed a timeout.
Args:
redis_address: The Redis address to connect.
redis_password: The password of the redis server. | codesearchnet |
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool=False, output_router_logits: bool=False) -> torch.Tensor:
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)
hidden_states = self.attn_dropout(hidden_states)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.ff_layer_norm(hidden_states)
if self.is_sparse:
hidden_states, router_states = self.ffn(hidden_states, attention_mask)
else:
hidden_states, router_states = (self.ffn(hidden_states), None)
hidden_states = self.ff_dropout(hidden_states)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
if output_router_logits:
outputs += (router_states,)
return outputs | Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail. | github-repos |
def _ReadRecordAttributeValueOffset(self, file_object, file_offset, number_of_attribute_values):
offsets_data_size = (number_of_attribute_values * 4)
offsets_data = file_object.read(offsets_data_size)
context = dtfabric_data_maps.DataTypeMapContext(values={'number_of_attribute_values': number_of_attribute_values})
data_type_map = self._GetDataTypeMap('keychain_record_attribute_value_offsets')
try:
attribute_value_offsets = self._ReadStructureFromByteStream(offsets_data, file_offset, data_type_map, context=context)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to map record attribute value offsets data at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))
return attribute_value_offsets | Reads the record attribute value offsets.
Args:
file_object (file): file-like object.
file_offset (int): offset of the record attribute values offsets relative
to the start of the file.
number_of_attribute_values (int): number of attribute values.
Returns:
keychain_record_attribute_value_offsets: record attribute value offsets.
Raises:
ParseError: if the record attribute value offsets cannot be read. | codesearchnet |
def get_events_for_subscription(access_token, subscription_id, start_timestamp):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/microsoft.insights/eventtypes/management/values?api-version=',
INSIGHTS_API, '&$filter=eventTimestamp ge \'', start_timestamp, '\''])
return do_get(endpoint, access_token) | Get the insights evens for a subsctipion since the specific timestamp.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
start_timestamp (str): timestamp to get events from. E.g. '2017-05-01T00:00:00.0000000Z'.
Returns:
HTTP response. JSON body of insights events. | juraj-google-style |
def build_phenotype(phenotype_id, adapter):
phenotype_obj = {}
phenotype = adapter.hpo_term(phenotype_id)
if phenotype:
phenotype_obj['phenotype_id'] = phenotype['hpo_id']
phenotype_obj['feature'] = phenotype['description']
return phenotype | Build a small phenotype object
Build a dictionary with phenotype_id and description
Args:
phenotype_id (str): The phenotype id
adapter (scout.adapter.MongoAdapter)
Returns:
phenotype_obj (dict):
dict(
phenotype_id = str,
feature = str, # description of phenotype
) | juraj-google-style |
def get_properties(properties_file='raw.properties.json', env=None, region=None):
with open(properties_file, 'rt') as file_handle:
properties = json.load(file_handle)
env_properties = properties.get(env, properties)
contents = env_properties.get(region, env_properties)
LOG.debug('Found properties for %s:\n%s', env, contents)
return contents | Get contents of _properties_file_ for the _env_.
Args:
properties_file (str): File name of `create-configs` JSON output.
env (str): Environment to read optionally.
region (str): Region to get specific configs for.
Returns:
dict: JSON loaded Application properties for _env_.
None: Given _env_ was not found in `create-configs` JSON output. | juraj-google-style |
def synthesize(self, duration, tick_frequency):
sr = self.samplerate.samples_per_second
tick = np.random.uniform(low=-1., high=1., size=int(sr * .1))
tick *= np.linspace(1, 0, len(tick))
samples = np.zeros(int(sr * (duration / Seconds(1))))
ticks_per_second = Seconds(1) / tick_frequency
step = int(sr
for i in range(0, len(samples), step):
size = len(samples[i:i + len(tick)])
samples[i:i + len(tick)] += tick[:size]
return AudioSamples(samples, self.samplerate) | Synthesize periodic "ticks", generated from white noise and an envelope
Args:
duration (numpy.timedelta64): The total duration of the sound to be
synthesized
tick_frequency (numpy.timedelta64): The frequency of the ticking
sound | juraj-google-style |
def _api_scrape(json_inp, ndx):
try:
headers = json_inp['resultSets'][ndx]['headers']
values = json_inp['resultSets'][ndx]['rowSet']
except KeyError:
try:
headers = json_inp['resultSet'][ndx]['headers']
values = json_inp['resultSet'][ndx]['rowSet']
except KeyError:
headers = json_inp['resultSet']['headers']
values = json_inp['resultSet']['rowSet']
if HAS_PANDAS:
return DataFrame(values, columns=headers)
else:
return [dict(zip(headers, value)) for value in values] | Internal method to streamline the getting of data from the json
Args:
json_inp (json): json input from our caller
ndx (int): index where the data is located in the api
Returns:
If pandas is present:
DataFrame (pandas.DataFrame): data set from ndx within the
API's json
else:
A dictionary of both headers and values from the page | codesearchnet |
def _guess_format_from_extension(ext):
ext = ext.strip('.')
formats = []
for fmt in FILE_FORMATS:
if (ext in FILE_FORMATS[fmt]):
formats.append(fmt)
if ((formats == []) or (len(formats) > 1)):
return False
return formats[0] | Guess the appropriate data type from file extension.
Arguments:
ext: The file extension (period optional)
Returns:
String. The format (without leading period),
or False if none was found or couldn't be guessed | codesearchnet |
def block_view(self, mri):
controller = self.get_controller(mri)
block = controller.block_view(weakref.proxy(self))
return block | Get a view of a block
Args:
mri: The mri of the controller hosting the block
Returns:
Block: The block we control | codesearchnet |
def get_model_details(self, model_name):
full_name = model_name
if (not model_name.startswith('projects/')):
full_name = ('projects/%s/models/%s' % (self._project_id, model_name))
return self._api.projects().models().get(name=full_name).execute() | Get details of the specified model from CloudML Service.
Args:
model_name: the name of the model. It can be a model full name
("projects/[project_id]/models/[model_name]") or just [model_name].
Returns: a dictionary of the model details. | codesearchnet |
def render_dictionary(data, headers=None):
return IPython.core.display.HTML(_html.HtmlBuilder.render_table(data, headers)) | Return a dictionary list formatted as a HTML table.
Args:
data: the dictionary list
headers: the keys in the dictionary to use as table columns, in order. | juraj-google-style |
def create_index(index_name, index_config, client):
client.create(index=index_name, body=index_config) | Creates an index with a given configuration
Args:
index_name (str): Name of the index you want to create
index_config (dict) configuration for the index
client (Elasticsearch.IndicesClient) the Elasticsearch client | codesearchnet |
def StartMergeTaskStorage(self, task):
if (self._storage_type != definitions.STORAGE_TYPE_SESSION):
raise IOError('Unsupported storage type.')
if (not self._merge_task_storage_path):
raise IOError('Missing merge task storage path.')
merge_storage_file_path = self._GetMergeTaskStorageFilePath(task)
if (not os.path.isfile(merge_storage_file_path)):
raise IOError('Merge task storage path is not a file.')
return self._CreateTaskStorageMergeReader(merge_storage_file_path) | Starts a merge of a task storage with the session storage.
Args:
task (Task): task.
Returns:
StorageMergeReader: storage merge reader of the task storage.
Raises:
IOError: if the storage file cannot be opened or
if the storage type is not supported or
if the temporary path for the task storage does not exist or
if the temporary path for the task storage doe not refers to a file.
OSError: if the storage file cannot be opened or
if the storage type is not supported or
if the temporary path for the task storage does not exist or
if the temporary path for the task storage doe not refers to a file. | codesearchnet |
def post(cls, payload):
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
payload = cls.set_id_in_fkeys(payload)
payload = cls.check_boolean_fields(payload)
payload = cls.add_model_name_to_payload(payload)
payload = cls.prepost_hooks(payload)
cls.debug_logger.debug("POSTING payload {}".format(json.dumps(payload, indent=4)))
res = requests.post(url=cls.URL, json=(payload), headers=HEADERS, verify=False)
cls.write_response_html_to_file(res,"bob.html")
if not res.ok:
cls.log_error(res.text)
res_json = res.json()
if "exception" in res_json:
exc_type = res_json["exception"]
if exc_type == "ActiveRecord::RecordNotUnique":
raise RecordNotUnique()
res.raise_for_status()
res = res.json()
cls.log_post(res)
cls.debug_logger.debug("Success")
return res | Posts the data to the specified record.
Args:
payload: `dict`. This will be JSON-formatted prior to sending the request.
Returns:
`dict`. The JSON formatted response.
Raises:
`Requests.exceptions.HTTPError`: The status code is not ok.
`RecordNotUnique`: The Rails server returned the exception ActiveRecord::RecordNotUnique. | juraj-google-style |
def get_dimension_index(self, dimension):
if isinstance(dimension, int):
if ((dimension < (self.ndims + len(self.vdims))) or (dimension < len(self.dimensions()))):
return dimension
else:
return IndexError('Dimension index out of bounds')
dim = dimension_name(dimension)
try:
dimensions = (self.kdims + self.vdims)
return [i for (i, d) in enumerate(dimensions) if (d == dim)][0]
except IndexError:
raise Exception(('Dimension %s not found in %s.' % (dim, self.__class__.__name__))) | Get the index of the requested dimension.
Args:
dimension: Dimension to look up by name or by index
Returns:
Integer index of the requested dimension | codesearchnet |
def process_subj_or_pred(self, component: Union[(URIRef, str)]) -> URIRef:
if ('http' in component):
prefix = self.find_prefix(component)
if prefix:
self.process_prefix(prefix)
return URIRef(component)
elif (':' in component):
(presumed_prefix, info) = component.split(':', 1)
namespace: Union[(Namespace, None)] = self.process_prefix(presumed_prefix)
if (not namespace):
exit((component + ": qname namespace does't exist yet."))
return namespace[info]
exit((component + ': is not a valid subject or predicate')) | Adds viable uri from iri or expands viable qname to iri to be triple ready
Need to have a viable qualified name (qname) in order to use a qname. You can make it
viable by either add the namespace beforehand with add_namespace(s) or if its already
in the local common_namespaces preloaded.
Args:
component: entity subject or predicate to be expanded or have its uri saved.
Returns:
rdflib URIRef ready subject or predicate to be put into a triple.
Raises:
SystemExit: When expecting a qname to be expanded, but is not valid or if
component is not a qualified name or a iri. | codesearchnet |
def read_analysis(self, file_handle):
start = self.annotation['__header__']['analysis start']
end = self.annotation['__header__']['analysis end']
if start != 0 and end != 0:
file_handle.seek(start, 0)
self._analysis = file_handle.read(end - start)
else:
self._analysis = None | Read the ANALYSIS segment of the FCS file and store it in self.analysis.
Warning: This has never been tested with an actual fcs file that contains an
analysis segment.
Args:
file_handle: buffer containing FCS data | juraj-google-style |
def initialize(self, map_arr, start_point_label='S', end_point_label='G', wall_label='
np.set_printoptions(threshold=np.inf)
self.__agent_label = agent_label
self.__map_arr = map_arr
self.__start_point_label = start_point_label
start_arr_tuple = np.where((self.__map_arr == self.__start_point_label))
(x_arr, y_arr) = start_arr_tuple
self.__start_point_tuple = (x_arr[0], y_arr[0])
end_arr_tuple = np.where((self.__map_arr == self.__end_point_label))
(x_arr, y_arr) = end_arr_tuple
self.__end_point_tuple = (x_arr[0], y_arr[0])
self.__wall_label = wall_label
for x in range(self.__map_arr.shape[1]):
for y in range(self.__map_arr.shape[0]):
if (((x, y) == self.__start_point_tuple) or ((x, y) == self.__end_point_tuple)):
continue
arr_value = self.__map_arr[y][x]
if (arr_value == self.__wall_label):
continue
self.save_r_df((x, y), float(arr_value)) | Initialize map of maze and setup reward value.
Args:
map_arr: Map. the 2d- `np.ndarray`.
start_point_label: Label of start point.
end_point_label: Label of end point.
wall_label: Label of wall.
agent_label: Label of agent. | codesearchnet |
def _detect(self):
results = []
for contract in self.contracts:
shadows = self.detect_builtin_shadowing_definitions(contract)
if shadows:
for shadow in shadows:
shadow_type = shadow[0]
shadow_object = shadow[1]
local_variable_parent = shadow[2]
local_variable_path = (contract.name + '.')
if (local_variable_parent is not None):
local_variable_path += (local_variable_parent.name + '.')
local_variable_path += shadow_object.name
info = '{} ({} @ {}) shadows built-in symbol "{}"\n'.format(local_variable_path, shadow_type, shadow_object.source_mapping_str, shadow_object.name)
json = self.generate_json_result(info)
if (shadow_type in [self.SHADOWING_FUNCTION, self.SHADOWING_MODIFIER, self.SHADOWING_EVENT]):
self.add_function_to_json(shadow_object, json)
elif (shadow_type in [self.SHADOWING_STATE_VARIABLE, self.SHADOWING_LOCAL_VARIABLE]):
self.add_variable_to_json(shadow_object, json)
results.append(json)
return results | Detect shadowing of built-in symbols
Recursively visit the calls
Returns:
list: {'vuln', 'filename,'contract','func', 'shadow'} | codesearchnet |
def print_tensor(self, args, screen_info=None):
parsed = self._arg_parsers['print_tensor'].parse_args(args)
np_printoptions = cli_shared.numpy_printoptions_from_screen_info(screen_info)
highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)
tensor_name, tensor_slicing = command_parser.parse_tensor_name_with_slicing(parsed.tensor_name)
node_name, output_slot = debug_graphs.parse_node_or_tensor_name(tensor_name)
if self._debug_dump.loaded_partition_graphs() and (not self._debug_dump.node_exists(node_name)):
output = cli_shared.error('Node "%s" does not exist in partition graphs' % node_name)
_add_main_menu(output, node_name=None, enable_list_tensors=True, enable_print_tensor=False)
return output
watch_keys = self._debug_dump.debug_watch_keys(node_name)
if output_slot is None:
output_slots = set()
for watch_key in watch_keys:
output_slots.add(int(watch_key.split(':')[1]))
if len(output_slots) == 1:
output_slot = list(output_slots)[0]
else:
lines = ['Node "%s" generated debug dumps from %s output slots:' % (node_name, len(output_slots)), 'Please specify the output slot: %s:x.' % node_name]
output = debugger_cli_common.RichTextLines(lines)
_add_main_menu(output, node_name=node_name, enable_list_tensors=True, enable_print_tensor=False)
return output
matching_data = []
for watch_key in watch_keys:
debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
for datum in debug_tensor_data:
if datum.output_slot == output_slot:
matching_data.append(datum)
if not matching_data:
output = cli_shared.error('Tensor "%s" did not generate any dumps.' % parsed.tensor_name)
elif len(matching_data) == 1:
if parsed.number <= 0:
output = cli_shared.format_tensor(matching_data[0].get_tensor(), matching_data[0].watch_key, np_printoptions, print_all=parsed.print_all, tensor_slicing=tensor_slicing, highlight_options=highlight_options, include_numeric_summary=parsed.numeric_summary, write_path=parsed.write_path)
else:
output = cli_shared.error('Invalid number (%d) for tensor %s, which generated one dump.' % (parsed.number, parsed.tensor_name))
_add_main_menu(output, node_name=node_name, enable_print_tensor=False)
else:
if parsed.number < 0:
lines = ['Tensor "%s" generated %d dumps:' % (parsed.tensor_name, len(matching_data))]
font_attr_segs = {}
for i, datum in enumerate(matching_data):
rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0
lines.append('
command = 'print_tensor %s -n %d' % (parsed.tensor_name, i)
font_attr_segs[len(lines) - 1] = [(len(lines[-1]) - len(datum.watch_key), len(lines[-1]), debugger_cli_common.MenuItem(None, command))]
lines.append('')
lines.append('You can use the -n (--number) flag to specify which dump to print.')
lines.append('For example:')
lines.append(' print_tensor %s -n 0' % parsed.tensor_name)
output = debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs)
elif parsed.number >= len(matching_data):
output = cli_shared.error('Specified number (%d) exceeds the number of available dumps (%d) for tensor %s' % (parsed.number, len(matching_data), parsed.tensor_name))
else:
output = cli_shared.format_tensor(matching_data[parsed.number].get_tensor(), matching_data[parsed.number].watch_key + ' (dump
_add_main_menu(output, node_name=node_name, enable_print_tensor=False)
return output | Command handler for print_tensor.
Print value of a given dumped tensor.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object. | github-repos |
def move_file(src, dest):
try:
os.replace(src, dest)
except Exception as ex_replace:
logger.error(f"error moving file {src} to "
f"{dest}. {ex_replace}")
raise | Move source file to destination.
Overwrites dest.
Args:
src: str or path-like. source file
dest: str or path-like. destination file
Returns:
None.
Raises:
FileNotFoundError: out path parent doesn't exist.
OSError: if any IO operations go wrong. | juraj-google-style |
def load_pkl(filenames):
if not isinstance(filenames, (list, tuple)):
filenames = [filenames]
times = []
for name in filenames:
name = str(name)
with open(name, 'rb') as file:
loaded_obj = pickle.load(file)
if not isinstance(loaded_obj, Times):
raise TypeError("At least one loaded object is not a Times data object.")
times.append(loaded_obj)
return times if len(times) > 1 else times[0] | Unpickle file contents.
Args:
filenames (str): Can be one or a list or tuple of filenames to retrieve.
Returns:
Times: A single object, or from a collection of filenames, a list of Times objects.
Raises:
TypeError: If any loaded object is not a Times object. | juraj-google-style |
def begin_operation(self, conn_or_internal_id, op_name, callback, timeout):
data = {'id': conn_or_internal_id, 'callback': callback, 'operation_name': op_name}
action = ConnectionAction('begin_operation', data, timeout=timeout, sync=False)
self._actions.put(action) | Begin an operation on a connection
Args:
conn_or_internal_id (string, int): Either an integer connection id or a string
internal_id
op_name (string): The name of the operation that we are starting (stored in
the connection's microstate)
callback (callable): Callback to call when this disconnection attempt either
succeeds or fails
timeout (float): How long to allow this connection attempt to proceed
without timing it out (in seconds) | codesearchnet |
def individual(self, ind_id=None):
for ind_obj in self.individual_objs:
if ind_obj.ind_id == ind_id:
return ind_obj
return None | Return a individual object
Args:
ind_id (str): A individual id
Returns:
individual (puzzle.models.individual) | juraj-google-style |
def on(self, event_name, *args, **kwargs):
def decorator(f):
self.add_event_handler(event_name, f, *args, **kwargs)
return f
return decorator | Decorator shortcut for add_event_handler.
Args:
event_name: An event to attach the handler to. Valid events are from :class:`~ignite.engine.Events` or
any `event_name` added by :meth:`~ignite.engine.Engine.register_events`.
*args: optional args to be passed to `handler`.
**kwargs: optional keyword args to be passed to `handler`. | juraj-google-style |
def NCHW_VECT_CToNHWC(input_shape_or_tensor: Union[tensor_lib.Tensor, list[int]]) -> Union[tensor_lib.Tensor, list[int]]:
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, tensor_lib.Tensor)
input_shape: list[int] = input_shape_or_tensor.shape.as_list() if is_tensor else input_shape_or_tensor
if input_shape[-1] != 4:
raise ValueError('Last dimension of NCHW_VECT_C must be 4.')
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape | Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4. | github-repos |
def enhance_function_signatures(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:
for func in spec_dict["functions"]["signatures"]:
for i, sig in enumerate(spec_dict["functions"]["signatures"][func]["signatures"]):
args = sig["arguments"]
req_args = []
pos_args = []
opt_args = []
mult_args = []
for arg in args:
if arg.get("multiple", False):
if arg["type"] in ["Function", "Modifier"]:
mult_args.extend(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
mult_args.append(arg["type"])
elif arg.get("optional", False) and arg.get("position", False):
if arg["type"] in ["Function", "Modifier"]:
pos_args.append(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
pos_args.append(arg["type"])
elif arg.get("optional", False):
if arg["type"] in ["Function", "Modifier"]:
opt_args.extend(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
opt_args.append(arg["type"])
else:
if arg["type"] in ["Function", "Modifier"]:
req_args.append(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
req_args.append(arg["type"])
spec_dict["functions"]["signatures"][func]["signatures"][i]["req_args"] = copy.deepcopy(
req_args
)
spec_dict["functions"]["signatures"][func]["signatures"][i]["pos_args"] = copy.deepcopy(
pos_args
)
spec_dict["functions"]["signatures"][func]["signatures"][i]["opt_args"] = copy.deepcopy(
opt_args
)
spec_dict["functions"]["signatures"][func]["signatures"][i][
"mult_args"
] = copy.deepcopy(mult_args)
return spec_dict | Enhance function signatures
Add required and optional objects to signatures objects for semantic validation
support.
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: return enhanced bel specification dict | juraj-google-style |
def gfortran_search_path(library_dirs):
cmd = ("gfortran", "-print-search-dirs")
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return_code = process.wait()
if return_code != 0:
return library_dirs
cmd_output = process.stdout.read().decode("utf-8")
search_lines = cmd_output.strip().split("\n")
library_lines = [
line[len(FORTRAN_LIBRARY_PREFIX) :]
for line in search_lines
if line.startswith(FORTRAN_LIBRARY_PREFIX)
]
if len(library_lines) != 1:
msg = GFORTRAN_MISSING_LIBS.format(cmd_output)
print(msg, file=sys.stderr)
return library_dirs
library_line = library_lines[0]
accepted = set(library_dirs)
for part in library_line.split(os.pathsep):
full_path = os.path.abspath(part.strip())
if os.path.isdir(full_path):
accepted.add(full_path)
else:
msg = GFORTRAN_BAD_PATH.format(full_path)
print(msg, file=sys.stderr)
return sorted(accepted) | Get the library directory paths for ``gfortran``.
Looks for ``libraries: =`` in the output of ``gfortran -print-search-dirs``
and then parses the paths. If this fails for any reason, this method will
print an error and return ``library_dirs``.
Args:
library_dirs (List[str]): Existing library directories.
Returns:
List[str]: The library directories for ``gfortran``. | juraj-google-style |
def copy(source, destination):
if os.path.isdir(source):
return __copytree(source, destination)
else:
return __copyfile2(source, destination) | Copy file or directory.
Args:
source (str): Source file or directory
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise. | juraj-google-style |
def _check_version(self, root):
version = self._get_version(root)
supported = [StrictVersion(x) for x in
self.supported_versions(root.tag)]
if version in supported:
return
error = "Document version ({0}) not in supported versions ({1})"
raise UnsupportedVersionError(
message=error.format(version, supported),
expected=supported,
found=version
) | Ensure the root element is a supported version.
Args:
root (etree.Element)
Raises:
UnsupportedVersionError | juraj-google-style |
def validate_seeded_answers(answers, options, algo):
if (algo['name'] == 'simple'):
return validate_seeded_answers_simple(answers, options, algo)
elif (algo['name'] == 'random'):
return validate_seeded_answers_random(answers)
else:
raise UnknownChooseAnswerAlgorithm() | Validate answers based on selection algorithm
This is called when instructor setup the tool and providing seeded answers to the question.
This function is trying to validate if instructor provided enough seeds for a give algorithm.
e.g. we require 1 seed for each option in simple algorithm and at least 1 seed for random
algorithm. Because otherwise, the first student won't be able to see the answers on the
second step where he/she suppose to compare and review other students answers.
Args:
answers (list): list of dict that contain seeded answers
options (dict): all options that should exist in the answers
algo (str): selection algorithm
Returns:
None if successful, otherwise error message | codesearchnet |
def run(self, steps=None):
try:
while (self.instruction_pointer < len(self.code)):
self.step()
if (steps is not None):
steps -= 1
if (steps == 0):
break
except StopIteration:
pass
except EOFError:
pass
return self | Run threaded code in machine.
Args:
steps: If specified, run that many number of instructions before
stopping. | codesearchnet |
def is_displayed(target):
is_displayed = getattr(target, 'is_displayed', None)
if not is_displayed or not callable(is_displayed):
raise TypeError('Target has no attribute \'is_displayed\' or not callable')
if not is_displayed():
raise WebDriverException('element not visible') | Assert whether the target is displayed
Args:
target(WebElement): WebElement Object.
Returns:
Return True if the element is displayed or return False otherwise. | juraj-google-style |
def replace_variables(self, text):
variables = {'python-executable': str(((self._venv_path / 'bin') / 'python'))}
return text.format(**variables) | Replace variable placeholders in `text` with values from the virtual env.
The variables are:
- {python-executable}
Args:
text: The text to do replacment int.
Returns: The text after replacement. | codesearchnet |
def askInitial():
return inquirer.prompt([inquirer.Text('inputPath', message="What's the path of your input file (eg input.csv)"), inquirer.List('year', message='What year are you in', choices=[1, 2, 3, 4]), inquirer.Checkbox('whatToDo', message='What can I do for you (select with your spacebar)', choices=['Get your weighted average', 'Get your rank in the year', 'Reformat results by module and output to csv', 'Plot the results by module'])]) | Asks the user for what it wants the script to do
Returns:
[dictionary] -- answers to the questions | codesearchnet |
def _create_dir_path(self, file_hash, path=None, hash_list=None):
if hash_list is None:
hash_list = list(file_hash)
if not hash_list:
raise IOError("Directory structure is too full!")
if not path:
path = os.path.join(
self.path,
hash_list.pop(0)
)
if not os.path.exists(path):
os.mkdir(path)
return self._create_dir_path(
file_hash=file_hash,
path=path,
hash_list=hash_list
)
files = os.listdir(path)
if file_hash in files:
return path
if len(files) < self.dir_limit:
return path
return self._create_dir_path(
file_hash=file_hash,
path=os.path.join(path, hash_list.pop(0)),
hash_list=hash_list
) | Create proper filesystem paths for given `file_hash`.
Args:
file_hash (str): Hash of the file for which the path should be
created.
path (str, default None): Recursion argument, don't set this.
hash_list (list, default None): Recursion argument, don't set this.
Returns:
str: Created path. | juraj-google-style |
def search(pattern):
def match(napp):
'Whether a NApp metadata matches the pattern.'
username = napp.get('username', napp.get('author'))
strings = (['{}/{}'.format(username, napp.get('name')), napp.get('description')] + napp.get('tags'))
return any((pattern.match(string) for string in strings))
napps = NAppsClient().get_napps()
return [napp for napp in napps if match(napp)] | Search all server NApps matching pattern.
Args:
pattern (str): Python regular expression. | codesearchnet |
def fetched_records(self, max_records=None):
if (max_records is None):
max_records = self.config['max_poll_records']
assert (max_records > 0)
drained = collections.defaultdict(list)
records_remaining = max_records
while (records_remaining > 0):
if (not self._next_partition_records):
if (not self._completed_fetches):
break
completion = self._completed_fetches.popleft()
self._next_partition_records = self._parse_fetched_data(completion)
else:
records_remaining -= self._append(drained, self._next_partition_records, records_remaining)
return (dict(drained), bool(self._completed_fetches)) | Returns previously fetched records and updates consumed offsets.
Arguments:
max_records (int): Maximum number of records returned. Defaults
to max_poll_records configuration.
Raises:
OffsetOutOfRangeError: if no subscription offset_reset_strategy
CorruptRecordException: if message crc validation fails (check_crcs
must be set to True)
RecordTooLargeError: if a message is larger than the currently
configured max_partition_fetch_bytes
TopicAuthorizationError: if consumer is not authorized to fetch
messages from the topic
Returns: (records (dict), partial (bool))
records: {TopicPartition: [messages]}
partial: True if records returned did not fully drain any pending
partition requests. This may be useful for choosing when to
pipeline additional fetch requests. | codesearchnet |
def print_search_results(self, search_results, buf=sys.stdout):
formatted_lines = self.format_search_results(search_results)
pr = Printer(buf)
for txt, style in formatted_lines:
pr(txt, style) | Print formatted search results.
Args:
search_results (list of `ResourceSearchResult`): Search to format. | juraj-google-style |
def _is_magical_field(self, model_instance, field, is_insert: bool):
old_value = getattr(model_instance, field.name, None)
field.pre_save(model_instance, is_insert)
new_value = getattr(model_instance, field.name, None)
return (old_value != new_value) | Verifies whether this field is gonna modify something
on its own.
"Magical" means that a field modifies the field value
during the pre_save.
Arguments:
model_instance:
The model instance the field is defined on.
field:
The field to get of whether the field is
magical.
is_insert:
Pretend whether this is an insert?
Returns:
True when this field modifies something. | codesearchnet |
def delete(self, domain, type_name, search_command):
return self._request(domain, type_name, search_command, 'DELETE', None) | Delete entry in ThreatConnect Data Store
Args:
domain (string): One of 'local', 'organization', or 'system'.
type_name (string): This is a free form index type name. The ThreatConnect API will use
this resource verbatim.
search_command (string): Search command to pass to ES. | juraj-google-style |
def get_users(self, capacity=None):
users = list()
usersdicts = self.data.get('users')
if usersdicts is not None:
for userdata in usersdicts:
if capacity is not None and userdata['capacity'] != capacity:
continue
id = userdata.get('id')
if id is None:
id = userdata['name']
user = hdx.data.user.User.read_from_hdx(id, configuration=self.configuration)
user['capacity'] = userdata['capacity']
users.append(user)
return users | Returns the organization's users.
Args:
capacity (Optional[str]): Filter by capacity eg. member, admin. Defaults to None.
Returns:
List[User]: Organization's users. | juraj-google-style |
def delete_keyvault(access_token, subscription_id, rgname, vault_name):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults/', vault_name,
'?api-version=', KEYVAULT_API])
return do_delete(endpoint, access_token) | Deletes a key vault in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
vault_name (str): Name of the new key vault.
Returns:
HTTP response. 200 OK. | juraj-google-style |
async def setvolume(self, value):
self.logger.debug('volume command')
if (self.state != 'ready'):
return
logger.debug('Volume command received')
if (value == '+'):
if (self.volume < 100):
self.statuslog.debug('Volume up')
self.volume = ((10 * (self.volume
self.volumelog.info(str(self.volume))
try:
self.streamer.volume = (self.volume / 100)
except AttributeError:
pass
else:
self.statuslog.warning('Already at maximum volume')
elif (value == '-'):
if (self.volume > 0):
self.statuslog.debug('Volume down')
self.volume = ((10 * ((self.volume + 9)
self.volumelog.info(str(self.volume))
try:
self.streamer.volume = (self.volume / 100)
except AttributeError:
pass
else:
self.statuslog.warning('Already at minimum volume')
else:
try:
value = int(value)
except ValueError:
self.statuslog.error('Volume argument must be +, -, or a %')
else:
if (0 <= value <= 200):
self.statuslog.debug('Setting volume')
self.volume = value
self.volumelog.info(str(self.volume))
try:
self.streamer.volume = (self.volume / 100)
except AttributeError:
pass
else:
self.statuslog.error('Volume must be between 0 and 200')
self.write_volume() | The volume command
Args:
value (str): The value to set the volume to | codesearchnet |
def archive(self, output_path):
if self.path is None:
raise ArgumentError("Cannot archive a recipe yet without a reference to its original yaml file in self.path")
outfile = zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED)
outfile.write(self.path, arcname="recipe_script.yaml")
written_files = set()
for _factory, args, _resources, files in self.steps:
for arg_name in files:
file_path = args[arg_name]
if file_path in written_files:
continue
if os.path.basename(file_path) != file_path:
raise ArgumentError("Cannot archive a recipe yet that references file not in the same directory as the recipe")
full_path = os.path.join(os.path.dirname(self.path), file_path)
outfile.write(full_path, arcname=file_path)
written_files.add(file_path) | Archive this recipe and all associated files into a .ship archive.
Args:
output_path (str): The path where the .ship file should be saved. | juraj-google-style |
def ReadSystemConfigurationArtifact(
self, system_configuration, session_identifier=CURRENT_SESSION):
if system_configuration.code_page:
try:
self.SetCodepage(system_configuration.code_page)
except ValueError:
logger.warning(
'Unsupported codepage: {0:s}, defaulting to {1:s}'.format(
system_configuration.code_page, self._codepage))
self._hostnames[session_identifier] = system_configuration.hostname
self.SetValue('keyboard_layout', system_configuration.keyboard_layout)
self.SetValue('operating_system', system_configuration.operating_system)
self.SetValue(
'operating_system_product',
system_configuration.operating_system_product)
self.SetValue(
'operating_system_version',
system_configuration.operating_system_version)
if system_configuration.time_zone:
try:
self.SetTimeZone(system_configuration.time_zone)
except ValueError:
logger.warning(
'Unsupported time zone: {0:s}, defaulting to {1:s}'.format(
system_configuration.time_zone, self.timezone.zone))
self._user_accounts[session_identifier] = {
user_account.username: user_account
for user_account in system_configuration.user_accounts} | Reads the knowledge base values from a system configuration artifact.
Note that this overwrites existing values in the knowledge base.
Args:
system_configuration (SystemConfigurationArtifact): system configuration
artifact.
session_identifier (Optional[str])): session identifier, where
CURRENT_SESSION represents the active session. | juraj-google-style |
def populate_ast_nsarg_defaults(ast, belast, species_id=None):
if isinstance(ast, NSArg):
given_term_id = "{}:{}".format(ast.namespace, ast.value)
r = bel.terms.terms.get_normalized_terms(given_term_id)
ast.canonical = r["canonical"]
ast.decanonical = r["decanonical"]
r = bel.terms.terms.get_terms(ast.canonical)
if len(r) > 0:
ast.species_id = r[0].get("species_id", False)
ast.species_label = r[0].get("species_label", False)
if ast.species_id and species_id is None:
species_id = ast.species_id
belast.species.add((ast.species_id, ast.species_label))
elif ast.species_id and species_id and species_id != ast.species_id:
belast.species_id = False
belast.species_label = False
if hasattr(ast, "args"):
for arg in ast.args:
populate_ast_nsarg_defaults(arg, belast, species_id)
return ast | Recursively populate NSArg AST entries for default (de)canonical values
This was added specifically for the BEL Pipeline. It is designed to
run directly against ArangoDB and not through the BELAPI.
Args:
ast (BEL): BEL AST
Returns:
BEL: BEL AST | juraj-google-style |
def update_load_balancer(access_token, subscription_id, resource_group, lb_name, body):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/loadBalancers/', lb_name,
'?api-version=', NETWORK_API])
return do_put(endpoint, body, access_token) | Updates a load balancer model, i.e. PUT an updated LB body.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
lb_name (str): Name of the new load balancer.
body (str): JSON body of an updated load balancer.
Returns:
HTTP response. Load Balancer JSON body. | juraj-google-style |
def from_dict(cls, d):
sites = [Site.from_dict(sd) for sd in d["sites"]]
charge = d.get("charge", 0)
spin_multiplicity = d.get("spin_multiplicity")
return cls.from_sites(sites, charge=charge, spin_multiplicity=spin_multiplicity) | Reconstitute a Molecule object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of Molecule.
Returns:
Molecule object | juraj-google-style |
def configure(self, sbi_config: str):
config_dict = json.loads(sbi_config)
self.debug_stream('SBI configuration:\n%s',
json.dumps(config_dict, indent=2))
try:
sbi = Subarray(self.get_name()).configure_sbi(config_dict)
except jsonschema.exceptions.ValidationError as error:
return json.dumps(dict(path=error.absolute_path.__str__(),
schema_path=error.schema_path.__str__(),
message=error.message), indent=2)
except RuntimeError as error:
return json.dumps(dict(error=str(error)), indent=2)
return 'Accepted SBI: {}'.format(sbi.id) | Configure an SBI for this subarray.
Args:
sbi_config (str): SBI configuration JSON
Returns:
str, | juraj-google-style |
def sanger_variants(self, institute_id=None, case_id=None):
query = {'validation': {'$exists': True}}
if institute_id:
query['institute_id'] = institute_id
if case_id:
query['case_id'] = case_id
return self.variant_collection.find(query) | Return all variants with sanger information
Args:
institute_id(str)
case_id(str)
Returns:
res(pymongo.Cursor): A Cursor with all variants with sanger activity | codesearchnet |
def bessel_k0(x, name=None):
with ops.name_scope(name, 'bessel_k0', [x]):
return gen_special_math_ops.bessel_k0(x) | Computes the Bessel k0 function of `x` element-wise.
Modified Bessel function of order 0.
It is preferable to use the numerically stabler function `k0e(x)` instead.
>>> tf.math.special.bessel_k0([0.5, 1., 2., 4.]).numpy()
array([0.92441907, 0.42102444, 0.11389387, 0.01115968], dtype=float32)
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(scipy)
Equivalent to scipy.special.k0
@end_compatibility | github-repos |
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
device_cache = match.get('DeviceCache', {})
for device, value in iter(device_cache.items()):
name = value.get('Name', '')
if name:
name = ''.join(('Name:', name))
event_data = plist_event.PlistTimeEventData()
event_data.root = '/DeviceCache'
datetime_value = value.get('LastInquiryUpdate', None)
if datetime_value:
event_data.desc = ' '.join(
filter(None, ('Bluetooth Discovery', name)))
event_data.key = '{0:s}/LastInquiryUpdate'.format(device)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
if device in match.get('PairedDevices', []):
event_data.desc = 'Paired:True {0:s}'.format(name)
event_data.key = device
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
datetime_value = value.get('LastNameUpdate', None)
if datetime_value:
event_data.desc = ' '.join(filter(None, ('Device Name Set', name)))
event_data.key = '{0:s}/LastNameUpdate'.format(device)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
datetime_value = value.get('LastServicesUpdate', None)
if datetime_value:
event_data.desc = ' '.join(filter(None, ('Services Updated', name)))
event_data.key = '{0:s}/LastServicesUpdate'.format(device)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts relevant BT entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. | juraj-google-style |
def _call_unittest_assertion(assertion_method, *args, msg=None, extras=None, **kwargs):
my_msg = None
try:
assertion_method(*args, **kwargs)
except AssertionError as e:
my_msg = str(e)
if msg:
my_msg = f'{my_msg} {msg}'
if my_msg is not None:
raise signals.TestFailure(my_msg, extras=extras) | Wrapper for converting a unittest assertion into a Mobly one.
Args:
assertion_method: unittest.TestCase assertion method to call.
*args: Positional arguments for the assertion call.
msg: A string that adds additional info about the failure.
extras: An optional field for extra information to be included in
test result.
**kwargs: Keyword arguments for the assertion call. | github-repos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.