code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def remove(self, force=False):
return self.client.api.remove_plugin(self.name, force=force)
|
Remove the plugin from the server.
Args:
force (bool): Remove even if the plugin is enabled.
Default: False
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def mtf_range(mesh, dim, dtype, name=None):
dim = convert_to_dimension(dim)
with tf.variable_scope(name, default_name='range'):
if (dtype == tf.bfloat16):
tf_range = tf.cast(tf.range(dim.size), tf.bfloat16)
else:
tf_range = tf.range(dim.size, dtype=dtype)
return import_tf_tensor(mesh, tf_range, shape=Shape([dim]))
|
Create a 1d mesh tensor with a range from [0, dim.size).
Call externally as mtf.range()
Args:
mesh: a Mesh
dim: a Dimension
dtype: a tf.DType
name: an optional string
Returns:
a Tensor
|
codesearchnet
|
def action(elem, doc):
if isinstance(elem, pf.CodeBlock):
doc.listings_counter += 1
elems = ([elem] if ('hide' not in elem.classes) else [])
if ('file' in elem.attributes):
elem.text = read_file(elem.attributes['file'])
filename = trimpath(elem.attributes)
prefix = pf.Emph(pf.Str('File:'))
if ('exec' in elem.classes):
if (('interactive' in elem.classes) or (elem.text[:4] == '>>> ')):
elem.text = execute_interactive_code(elem, doc)
else:
result = execute_code_block(elem, doc)
if ('hideimports' in elem.classes):
elem.text = remove_import_statements(elem.text)
if (('plt' in elem.attributes) or ('plt' in elem.classes)):
doc.plot_found = True
result = maybe_center_plot(result)
block = pf.RawBlock(result, format='latex')
else:
block = pf.CodeBlock(result, classes=['changelog'])
elems += [pf.Para(pf.Emph(pf.Str('Output:'))), block]
if ('lines' in elem.attributes):
elem.text = filter_lines(elem.text, elem.attributes['lines'])
label = elem.attributes.get('label', f'cl:{doc.listings_counter}')
if ('caption' in elem.attributes.keys()):
doc.caption_found = True
cap = pf.convert_text(elem.attributes['caption'], output_format='latex')
if ('shortcaption' in elem.attributes.keys()):
shortcap = pf.convert_text(elem.attributes['shortcaption'], output_format='latex')
else:
shortcap = cap
if ('file' in elem.attributes.keys()):
cap += pf.convert_text(f' (`{filename}`)', output_format='latex')
elems = make_codelisting(elems, cap, label, shortcaption=shortcap, above=('capbelow' not in elem.classes))
elif ('caption' in elem.classes):
doc.caption_found = True
cap = ''
if ('file' in elem.attributes.keys()):
cap = pf.convert_text(f'`{filename}`', output_format='latex')
elems = make_codelisting(elems, cap, label, above=('capbelow' not in elem.classes))
elif ('file' in elem.attributes.keys()):
elems.insert(0, pf.Para(prefix, pf.Space, pf.Code(filename)))
return elems
|
Processes pf.CodeBlocks.
For details and a specification of how each command should behave,
check the example files (especially the md and pdf)!
Args:
elem: The element to process.
doc: The document.
Returns:
A changed element or None.
|
codesearchnet
|
def enable_eager_execution_internal(config=None, device_policy=None, execution_mode=None, server_def=None) -> None:
if config is not None and (not isinstance(config, config_pb2.ConfigProto)):
raise TypeError('config must be a tf.ConfigProto, but got %s' % type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT, context.DEVICE_PLACEMENT_WARN, context.DEVICE_PLACEMENT_SILENT, context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError('device_policy must be one of None, DEVICE_PLACEMENT_*')
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError('execution_mode must be one of None, SYNC, ASYNC')
if context.default_execution_mode == context.GRAPH_MODE:
graph_mode_has_been_used = _default_graph_stack._global_default_graph is not None
if graph_mode_has_been_used:
raise ValueError('tf.enable_eager_execution must be called at program startup.')
context.default_execution_mode = context.EAGER_MODE
with context._context_lock:
if context._context is None:
context._set_context_locked(context.Context(config=config, device_policy=device_policy, execution_mode=execution_mode, server_def=server_def))
elif config is not None and config is not context._context._config or (device_policy is not None and device_policy is not context._context._device_policy) or (execution_mode is not None and execution_mode is not context._context._execution_mode):
raise ValueError('Trying to change the options of an active eager execution. Context config: %s, specified config: %s. Context device policy: %s, specified device policy: %s. Context execution mode: %s, specified execution mode %s.' % (context._context._config, config, context._context._device_policy, device_policy, context._context._execution_mode, execution_mode))
else:
context._context._thread_local_data.is_eager = True
context.context = context.context_safe
|
Enables eager execution for the lifetime of this program.
Most of the doc string for enable_eager_execution is relevant here as well.
Args:
config: See enable_eager_execution doc string
device_policy: See enable_eager_execution doc string
execution_mode: See enable_eager_execution doc string
server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on
remote devices. GrpcServers need to be started by creating an identical
server_def to this, and setting the appropriate task_indexes, so that the
servers can communicate. It will then be possible to execute operations on
remote devices.
Raises:
ValueError
|
github-repos
|
def squid_to_guid(squid):
squid_pattern = re.compile(r'^(\w{8})(\w{4})(\w{4})(\w\w)(\w\w)(\w\w)(\w\w)(\w\w)(\w\w)(\w\w)(\w\w)$')
squid_match = squid_pattern.match(squid)
guid = ''
if squid_match is not None:
guid = '{' + \
squid_match.group(1)[::-1]+'-' + \
squid_match.group(2)[::-1]+'-' + \
squid_match.group(3)[::-1]+'-' + \
squid_match.group(4)[::-1]+squid_match.group(5)[::-1] + '-'
for index in range(6, 12):
guid += squid_match.group(index)[::-1]
guid += '}'
return guid
|
Converts a compressed GUID (SQUID) back into a GUID
Args:
squid (str): A valid compressed GUID
Returns:
str: A valid GUID
|
juraj-google-style
|
def xor_bytes(a, b):
assert isinstance(a, bytes)
assert isinstance(b, bytes)
assert len(a) == len(b)
res = bytearray()
for i in range(len(a)):
res.append(a[i] ^ b[i])
return bytes(res)
|
XOR on two bytes objects
Args:
a (bytes): object 1
b (bytes): object 2
Returns:
bytes: The XOR result
|
juraj-google-style
|
def generate_masks_with_special_tokens_and_transfer_map(input_ids: torch.LongTensor) -> Tuple[Tensor, Tensor]:
batch_size, num_token = input_ids.shape
special_tokens_mask = torch.zeros((batch_size, num_token), device=input_ids.device).bool()
for special_token in SPECIAL_TOKENS:
special_tokens_mask |= input_ids == special_token
idxs = torch.nonzero(special_tokens_mask)
attention_mask = torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(batch_size, 1, 1)
position_ids = torch.zeros((batch_size, num_token), device=input_ids.device)
previous_col = 0
for i in range(idxs.shape[0]):
row, col = idxs[i]
if col == 0 or col == num_token - 1:
attention_mask[row, col, col] = True
position_ids[row, col] = 0
else:
attention_mask[row, previous_col + 1:col + 1, previous_col + 1:col + 1] = True
position_ids[row, previous_col + 1:col + 1] = torch.arange(0, col - previous_col, device=input_ids.device)
previous_col = col
return (attention_mask, position_ids.to(torch.long))
|
Generate attention mask between each pair of special tokens and positional ids.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Returns:
`tuple(torch.Tensor)` comprising attention mask between each special tokens and position_ids:
- **attention_mask** (`torch.BoolTensor` of shape `(batch_size, sequence_length, sequence_length)`)
- **position_ids** (`torch.LongTensor` of shape `(batch_size, sequence_length)`)
|
github-repos
|
def IsOutOfLineMethodDefinition(clean_lines, linenum):
for i in xrange(linenum, max((- 1), (linenum - 10)), (- 1)):
if Match('^([^()]*\\w+)\\(', clean_lines.elided[i]):
return (Match('^[^()]*\\w+::\\w+\\(', clean_lines.elided[i]) is not None)
return False
|
Check if current line contains an out-of-line method definition.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains an out-of-line method definition.
|
codesearchnet
|
def adafactor_decay_rate_adam(beta2):
t = tf.to_float(tf.train.get_or_create_global_step()) + 1.0
decay = beta2 * (1.0 - tf.pow(beta2, t - 1.0)) / (1.0 - tf.pow(beta2, t))
return decay
|
Second-moment decay rate like Adam, subsuming the correction factor.
Args:
beta2: a float between 0 and 1
Returns:
a scalar
|
juraj-google-style
|
def l2_regularizer(weight=1.0, scope=None):
def regularizer(tensor):
with tf.name_scope(scope, 'L2Regularizer', [tensor]):
l2_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value')
return regularizer
|
Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
|
juraj-google-style
|
def ExpectedEnginesToBuild(self, run_params):
return [f'TRTEngineOp_{seq_id:03d}' for seq_id in range(len(self.max_batch_sizes))]
|
Checks that the expected engine is built.
Args:
run_params: the run parameters.
Returns:
the expected engines to build.
There shall be engines generated for each maximum batch size.
|
github-repos
|
def start_instance(self):
start_url = self._get_url('start_path')
res = self.rest_client.session.put(start_url, json={})
_handle_http_errors(res)
return res.json()
|
Start the instance for this Streaming Analytics service.
Returns:
dict: JSON response for the instance start operation.
|
codesearchnet
|
def add_output(self, name, value):
self.template.add_output(Output(name, Value=value))
|
Simple helper for adding outputs.
Args:
name (str): The name of the output to create.
value (str): The value to put in the output.
|
codesearchnet
|
def _callback_main(self, call, handler='edit_config', target='running', source='startup'):
try:
if (handler == 'get_config'):
call = ET.tostring(call.getchildren()[0])
return self._mgr.get(filter=('subtree', call))
call = ET.tostring(call)
if (handler == 'get'):
call_element = xml_.to_ele(call)
return ET.fromstring(str(self._mgr.dispatch(call_element)))
if (handler == 'edit_config'):
self._mgr.edit_config(target=target, config=call)
if (handler == 'delete_config'):
self._mgr.delete_config(target=target)
if (handler == 'copy_config'):
self._mgr.copy_config(target=target, source=source)
except (ncclient.transport.TransportError, ncclient.transport.SessionCloseError, ncclient.transport.SSHError, ncclient.transport.AuthenticationError, ncclient.transport.SSHUnknownHostError) as error:
logging.error(error)
raise DeviceCommError
|
Callback for NETCONF calls.
Args:
call: An Element Tree element containing the XML of the NETCONF
call you intend to make to the device.
handler: Type of ncclient call to make.
get_config: NETCONF standard get config.
get: ncclient dispatch. For custom RPCs.
edit_config: NETCONF standard edit.
delete_config: NETCONF standard delete.
copy_config: NETCONF standard copy.
target: Target configuration location for action. Only used for
edit_config, delete_config, and copy_config.
source: Source of configuration information for copying
configuration. Only used for copy_config.
Returns:
None
Raises:
None
|
codesearchnet
|
def __init__(self, structure, defect_site, charge=0.):
self._structure = structure
self._charge = charge
self._defect_site = defect_site
if structure.lattice != defect_site.lattice:
raise ValueError("defect_site lattice must be same as structure lattice.")
|
Initializes an abstract defect
Args:
structure: Pymatgen Structure without any defects
defect_site (Site): site for defect within structure
must have same lattice as structure
charge: (int or float) defect charge
default is zero, meaning no change to NELECT after defect is created in the structure
(assuming use_structure_charge=True in vasp input set)
|
juraj-google-style
|
def dbmin50years(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dbmin50years`'.format(value))
self._dbmin50years = value
|
Corresponds to IDD Field `dbmin50years`
50-year return period values for minimum extreme dry-bulb temperature
Args:
value (float): value for IDD Field `dbmin50years`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def watch(self, path, recursive=False):
self._logger.info('Initializing watcher for path "%s"', path)
handler = FileHandler(self)
self._observer = Observer()
self._observer.schedule(handler, path, recursive)
self._logger.info('Starting watcher')
self._observer.start()
self._watch = True
try:
self._logger.info('Waiting for file events')
while self._watch:
time.sleep(1)
except KeyboardInterrupt:
self.stop_watching()
self._observer.join()
|
Watch for files in a directory and apply normalizations.
Watch for new or changed files in a directory and apply
normalizations over them.
Args:
path: Path to the directory.
recursive: Whether to find files recursively or not.
|
juraj-google-style
|
def cancelMktDepth(self, contract: Contract, isSmartDepth=False):
ticker = self.ticker(contract)
reqId = self.wrapper.endTicker(ticker, 'mktDepth')
if reqId:
self.client.cancelMktDepth(reqId, isSmartDepth)
else:
self._logger.error(f'cancelMktDepth: No reqId found for contract {contract}')
|
Unsubscribe from market depth data.
Args:
contract: The exact contract object that was used to
subscribe with.
|
codesearchnet
|
def _nested_from_proto(nested_proto, process_leafs):
if (not isinstance(nested_proto, module_pb2.NestedData)):
raise base_errors.ModuleInfoError('Expected module_pb2.NestedData.')
if nested_proto.HasField('value'):
value = nested_proto.value
if (not value):
value = _UnserializableObject()
else:
value = process_leafs(value)
return value
elif nested_proto.HasField('list'):
return [_nested_from_proto(child, process_leafs) for child in nested_proto.list.list]
elif nested_proto.HasField('tuple'):
return tuple((_nested_from_proto(child, process_leafs) for child in nested_proto.tuple.list))
elif nested_proto.HasField('dict'):
return {name: _nested_from_proto(child, process_leafs) for (name, child) in six.iteritems(nested_proto.dict.map)}
elif nested_proto.HasField('named_tuple'):
tmp_dict = {name: _nested_from_proto(child, process_leafs) for (name, child) in six.iteritems(nested_proto.named_tuple.map)}
NamedTuple = collections.namedtuple(nested_proto.named_tuple.name, tmp_dict.keys())
return NamedTuple(**tmp_dict)
elif nested_proto.HasField('special_type'):
if (nested_proto.special_type.name not in _TO_PROTO_SPECIAL_TYPES):
return _UnserializableObject()
type_info = _TO_PROTO_SPECIAL_TYPES[nested_proto.special_type.name]
return type_info.from_proto(nested_proto.special_type.object, process_leafs)
else:
raise base_errors.ModuleInfoError('Cannot deserialize a `ModuleInfo` protobuf with no fields.')
|
Deserializes `nested_proto`.
Args:
nested_proto: An instance of `module_pb2.NestedData`.
process_leafs: A function to be applied to the leaf values of the nested
structure.
Returns:
An instance of `string`, `tuple`, `dict` or `namedtuple`.
Raises:
base_errors.ModuleInfoError: If the probobuf is of the wrong type or
if some of its fields are missing.
|
codesearchnet
|
def tensor_layout(self, tensor_shape, mesh_shape):
ret = [self.tensor_dimension_to_mesh_axis(d, mesh_shape)
for d in tensor_shape]
not_nones = [a for a in ret if a is not None]
if len(not_nones) != len(set(not_nones)):
raise ValueError(
"Two Tensor Dimensions may not map to the same Mesh Dimension:"
" layout=%s tensor_shape=%s mesh_shape=%s " %
(self, tensor_shape, mesh_shape))
return TensorLayout(ret)
|
Computes TensorLayout given a Tensor Shape and a Mesh Shape.
Args:
tensor_shape: Shape.
mesh_shape: Shape.
Returns:
TensorLayout.
Raises:
ValueError: If two Tensor Dimensions map to the same Mesh Dimensions.
|
juraj-google-style
|
def fresh(t, non_generic):
mappings = {}
def freshrec(tp):
p = prune(tp)
if isinstance(p, TypeVariable):
if is_generic(p, non_generic):
if p not in mappings:
mappings[p] = TypeVariable()
return mappings[p]
else:
return p
elif isinstance(p, dict):
return p
elif isinstance(p, Collection):
return Collection(*[freshrec(x) for x in p.types])
elif isinstance(p, Scalar):
return Scalar([freshrec(x) for x in p.types])
elif isinstance(p, TypeOperator):
return TypeOperator(p.name, [freshrec(x) for x in p.types])
elif isinstance(p, MultiType):
return MultiType([freshrec(x) for x in p.types])
else:
assert False, "missing freshrec case {}".format(type(p))
return freshrec(t)
|
Makes a copy of a type expression.
The type t is copied. The generic variables are duplicated and the
non_generic variables are shared.
Args:
t: A type to be copied.
non_generic: A set of non-generic TypeVariables
|
juraj-google-style
|
def list_current_jobs(self):
jobs = {}
for job in self.scheduler.get_jobs():
if (job.name not in ('schedule_jobs', 'process_status_queue')):
jobs[job.name] = job
return jobs
|
Return a list of the currently scheduled jobs in APScheduler
Returns:
`dict` of `str`: :obj:`apscheduler/job:Job`
|
codesearchnet
|
def Detect(self, str_in):
components = SplitIntoComponents(str_in)
extracted_paths = set()
for extractor in self.extractors:
extracted_paths.update(extractor.Extract(components))
results = set(extracted_paths)
for post_processor in self.post_processors:
processed_results = set()
for result in results:
processed_results.update(post_processor.Process(result))
results = processed_results
return results
|
Detects paths in a given string.
Args:
str_in: String where the paths should be detected.
Returns:
A list of paths (as strings) detected inside the given string.
|
codesearchnet
|
def isInstalled(value):
function =
command = .format(f = function, arg=value)
cmd = CommandHelper(command)
cmd.execute()
return "1" in cmd.output
|
Check if a software is installed into machine.
Args:
value (str): Software's name
Returns:
bool: True if the software is installed. False else
|
juraj-google-style
|
def stream_reader_statements(stream_arn):
action_type = get_stream_action_type(stream_arn)
arn_parts = stream_arn.split('/')
wildcard_arn_parts = arn_parts[:(- 1)]
wildcard_arn_parts.append('*')
wildcard_arn = '/'.join(wildcard_arn_parts)
return [Statement(Effect=Allow, Resource=[stream_arn], Action=[action_type('DescribeStream'), action_type('GetRecords'), action_type('GetShardIterator')]), Statement(Effect=Allow, Resource=[wildcard_arn], Action=[action_type('ListStreams')])]
|
Returns statements to allow Lambda to read from a stream.
Handles both DynamoDB & Kinesis streams. Automatically figures out the
type of stream, and provides the correct actions from the supplied Arn.
Arg:
stream_arn (str): A kinesis or dynamodb stream arn.
Returns:
list: A list of statements.
|
codesearchnet
|
def fastcc_consistent_subset(model, epsilon, solver):
reaction_set = set(model.reactions)
return reaction_set.difference(fastcc(model, epsilon, solver))
|
Return consistent subset of model.
The largest consistent subset is returned as
a set of reaction names.
Args:
model: :class:`MetabolicModel` to solve.
epsilon: Flux threshold value.
solver: LP solver instance to use.
Returns:
Set of reaction IDs in the consistent reaction subset.
|
juraj-google-style
|
def run_validate_program_main(self, program_main):
program_language = self.profile.get('install_json').get('programLanguage', 'python').lower()
if ((program_language == 'python') and (not os.path.isfile('{}.py'.format(program_main)))):
print('{}{}Could not find program main file ({}).'.format(c.Style.BRIGHT, c.Fore.RED, program_main))
sys.exit(1)
|
Validate the program main file exists.
Args:
program_main (str): The executable name.
|
codesearchnet
|
def get_ss_class(pdb_file, dssp_file, chain):
prag = pr.parsePDB(pdb_file)
pr.parseDSSP(dssp_file, prag)
(alpha, threeTen, beta) = get_dssp_ss_content_multiplechains(prag, chain)
if ((alpha == 0) and (beta > 0)):
classification = 'all-beta'
elif ((beta == 0) and (alpha > 0)):
classification = 'all-alpha'
elif ((beta == 0) and (alpha == 0)):
classification = 'mixed'
elif ((float(alpha) / beta) >= 20):
classification = 'all-alpha'
else:
classification = 'mixed'
return classification
|
Define the secondary structure class of a PDB file at the specific chain
Args:
pdb_file:
dssp_file:
chain:
Returns:
|
codesearchnet
|
def retry_auth_check(exception):
if isinstance(exception, apiclient.errors.HttpError):
if exception.resp.status in HTTP_AUTH_ERROR_CODES:
_print_error('Retrying...')
return True
return False
|
Specific check for auth error codes.
Return True if we should retry.
False otherwise.
Args:
exception: An exception to test for transience.
Returns:
True if we should retry. False otherwise.
|
juraj-google-style
|
def get_vcenter(self, **kwargs):
config = ET.Element('config')
urn = 'urn:brocade.com:mgmt:brocade-vswitch'
ET.SubElement(config, 'vcenter', xmlns=urn)
output = self._callback(config, handler='get_config')
result = []
element = ET.fromstring(str(output))
for vcenter in element.iter(('{%s}vcenter' % urn)):
vc = {}
vc['name'] = vcenter.find(('{%s}id' % urn)).text
vc['url'] = vcenter.find(('{%s}credentials' % urn)).find(('{%s}url' % urn)).text
isactive = vcenter.find(('{%s}activate' % urn))
if (isactive is None):
vc['isactive'] = False
else:
vc['isactive'] = True
result.append(vc)
return result
|
Get vCenter hosts on the switch
Args:
callback (function): A function executed upon completion of the
method.
Returns:
Returns a list of vcenters
Raises:
None
|
codesearchnet
|
def get(self, *index):
assert (self.wrapFunction is not None)
if ((len(index) == 1) and isinstance(index[0], (tuple, list))):
index = index[0]
if (len(index) == 0):
return self.wrapFunction(self._impl.get())
else:
return self.wrapFunction(self._impl.get(Tuple(index)._impl))
|
Get the instance with the specified index.
Returns:
The corresponding instance.
|
codesearchnet
|
def clear(self, size=-1, *, offset=0, chunk=None) -> None:
self.mglo.clear(size, offset, chunk)
|
Clear the content.
Args:
size (int): The size. Value ``-1`` means all.
Keyword Args:
offset (int): The offset.
chunk (bytes): The chunk to use repeatedly.
|
juraj-google-style
|
def MultiDestroyFlowStates(self, session_ids, request_limit=None):
subjects = [session_id.Add("state") for session_id in session_ids]
to_delete = []
deleted_requests = []
for subject, values in self.MultiResolvePrefix(
subjects, self.FLOW_REQUEST_PREFIX, limit=request_limit):
for _, serialized, _ in values:
request = rdf_flow_runner.RequestState.FromSerializedString(serialized)
deleted_requests.append(request)
response_subject = self.GetFlowResponseSubject(request.session_id,
request.id)
to_delete.append(response_subject)
to_delete.append(subject)
self.DeleteSubjects(to_delete, sync=True)
return deleted_requests
|
Deletes all requests and responses for the given flows.
Args:
session_ids: A lists of flows to destroy.
request_limit: A limit on the number of requests to delete.
Returns:
A list of requests that were deleted.
|
juraj-google-style
|
def get_template_list(self, page=1, page_size=None, account_id=None, query=None):
request = self._get_request()
parameters = {'page': page, 'page_size': page_size, 'account_id': account_id, 'query': query}
return request.get(self.TEMPLATE_GET_LIST_URL, parameters=parameters)
|
Lists your Templates
Args:
page (int, optional): Page number of the template List to return. Defaults to 1.
page_size (int, optional): Number of objects to be returned per page, must be between 1 and 100, default is 20.
account_id (str, optional): Which account to return Templates for. Must be a team member. Use "all" to indicate all team members. Defaults to your account.
query (str, optional): String that includes search terms and/or fields to be used to filter the Template objects.
Returns:
A ResourceList object
|
codesearchnet
|
def save(self, file_prefix: str, options: Optional[checkpoint_options.CheckpointOptions]=None) -> Optional[ops.Operation]:
if options is not None and options.experimental_io_device is not None:
raise ValueError('Specified experimental_io_device in DTensor checkpoint is not supported.')
del options
tensor_names = []
tensors = []
tensor_slices = []
for saveable in self._saveable_objects:
for spec in saveable.specs:
tensor = spec.tensor
if tensor is not None:
if api.device_name() != spec.device:
tensor = api.pack([tensor] * self._mesh.host_mesh().num_local_devices(), layout.Layout.replicated(self._mesh.host_mesh(), rank=tensor.shape.rank))
tensor_names.append(spec.name)
tensors.append(tensor)
tensor_slices.append(spec.slice_spec)
return save_restore.sharded_save(self._mesh, file_prefix, tensor_names, tensor_slices, tensors)
|
Saves the saveable objects to a checkpoint with `file_prefix`.
Also query the generated shards from the distributed DTensor SaveV2 ops and
do a MergeV2 on those. Each op here is backed by a global_barrier to avoid
racing from multiple clients.
Args:
file_prefix: A string or scalar string Tensor containing the prefix to
save under.
options: Optional `CheckpointOptions` object. This is unused in DTensor.
Returns:
An `Operation`, or None when executing eagerly.
|
github-repos
|
def flowwrite(flow, filename, quantize=False, concat_axis=0, *args, **kwargs):
if (not quantize):
with open(filename, 'wb') as f:
f.write('PIEH'.encode('utf-8'))
np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)
flow = flow.astype(np.float32)
flow.tofile(f)
f.flush()
else:
assert (concat_axis in [0, 1])
(dx, dy) = quantize_flow(flow, *args, **kwargs)
dxdy = np.concatenate((dx, dy), axis=concat_axis)
imwrite(dxdy, filename)
|
Write optical flow to file.
If the flow is not quantized, it will be saved as a .flo file losslessly,
otherwise a jpeg image which is lossy but of much smaller size. (dx and dy
will be concatenated horizontally into a single image if quantize is True.)
Args:
flow (ndarray): (h, w, 2) array of optical flow.
filename (str): Output filepath.
quantize (bool): Whether to quantize the flow and save it to 2 jpeg
images. If set to True, remaining args will be passed to
:func:`quantize_flow`.
concat_axis (int): The axis that dx and dy are concatenated,
can be either 0 or 1. Ignored if quantize is False.
|
codesearchnet
|
def apply_mutation(module_path, operator, occurrence):
module_ast = get_ast(module_path, python_version=operator.python_version)
original_code = module_ast.get_code()
visitor = MutationVisitor(occurrence, operator)
mutated_ast = visitor.walk(module_ast)
mutated_code = None
if visitor.mutation_applied:
mutated_code = mutated_ast.get_code()
with module_path.open(mode='wt', encoding='utf-8') as handle:
handle.write(mutated_code)
handle.flush()
return (original_code, mutated_code)
|
Apply a specific mutation to a file on disk.
Args:
module_path: The path to the module to mutate.
operator: The `operator` instance to use.
occurrence: The occurrence of the operator to apply.
Returns: A `(unmutated-code, mutated-code)` tuple to the with-block. If there was
no mutation performed, the `mutated-code` is `None`.
|
codesearchnet
|
def get_estimator(output_dir, train_config, args):
target_name = train_config['target_column']
if is_classification_model(args.model_type) and target_name not in \
train_config['categorical_columns']:
raise ValueError('When using a classification model, the target must be a '
'categorical variable.')
if is_regression_model(args.model_type) and target_name not in \
train_config['numerical_columns']:
raise ValueError('When using a regression model, the target must be a '
'numerical variable.')
if is_dnn_model(args.model_type) and not args.layer_sizes:
raise ValueError('--layer-size* must be used with DNN models')
if is_linear_model(args.model_type) and args.layer_sizes:
raise ValueError('--layer-size* cannot be used with linear models')
feature_columns = _tflearn_features(train_config, args)
config = tf.contrib.learn.RunConfig(
save_checkpoints_secs=args.save_checkpoints_secs)
train_dir = os.path.join(output_dir, 'train')
if args.model_type == 'dnn_regression':
estimator = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=args.layer_sizes,
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
elif args.model_type == 'linear_regression':
estimator = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
elif args.model_type == 'dnn_classification':
estimator = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=args.layer_sizes,
n_classes=train_config['vocab_stats'][target_name]['n_classes'],
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
elif args.model_type == 'linear_classification':
estimator = tf.contrib.learn.LinearClassifier(
feature_columns=feature_columns,
n_classes=train_config['vocab_stats'][target_name]['n_classes'],
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
else:
raise ValueError('bad --model-type value')
return estimator
|
Returns a tf learn estimator.
We only support {DNN, Linear}Regressor and {DNN, Linear}Classifier. This is
controlled by the values of model_type in the args.
Args:
output_dir: Modes are saved into outputdir/train
train_config: our training config
args: command line parameters
Returns:
TF lean estimator
Raises:
ValueError: if config is wrong.
|
juraj-google-style
|
def _ragged_tensor_to_string(string_tensor, summarize):
if string_tensor.shape.rank == 1:
pieces = string_tensor
else:
pieces = map_fn_lib.map_fn(lambda s: _ragged_tensor_to_string(s, summarize), string_tensor, fn_output_signature=tensor_lib.TensorSpec(None, dtypes.string))
if summarize not in (-1, None):
pieces = cond.cond(_nrows(string_tensor) <= 2 * summarize, lambda: pieces, lambda: array_ops.concat([pieces[:summarize], ['...'], pieces[-summarize:]], axis=0))
return '[' + string_ops.reduce_join(pieces, separator=', ') + ']'
|
Returns a scalar string tensor with the contents of `string_tensor`.
Args:
string_tensor: A potentially ragged tensor with dtype=string.
summarize: Include only the first and last `summarize` elements of each
dimension. If `-1` or `None`, then include all elements.
Returns:
A scalar string Tensor.
|
github-repos
|
def dump(ofp, *pb_objs, **kwargs):
mode = 'wb'
if isinstance(ofp, str):
ostream = open(ofp, mode=mode, **kwargs)
else:
ostream = open(fileobj=ofp, mode=mode, **kwargs)
with ostream:
ostream.write(*pb_objs)
|
Write to a stream.
Args:
ofp (string or file-like object): output stream.
pb_objs (*protobuf.message.Message): list of protobuf message objects
to be written.
|
juraj-google-style
|
def collective_diffusion_coefficient( self ):
if self.has_run:
return self.atoms.collective_dr_squared() / ( 6.0 * self.lattice.time )
else:
return None
|
Returns the collective or "jump" diffusion coefficient, D_J.
Args:
None
Returns:
(Float): The collective diffusion coefficient, D_J.
|
juraj-google-style
|
def aggregate(self, batch_outs, batch_start=None, batch_end=None):
raise NotImplementedError('Must be implemented in subclasses.')
|
Aggregates batch-level results into total results.
Args:
batch_outs: A list of batch-level outputs.
batch_start: The start index of this batch. Always `None` if `use_steps`
is `True`.
batch_end: The end index of this batch. Always `None` if `use_steps` is
`True`.
|
github-repos
|
def __init__(self, dimensions, hidden_size):
super(LearnableMultivariateNormalDiagCell, self).__init__()
self.dimensions = dimensions
self.hidden_size = hidden_size
self.lstm_cell = tf.keras.layers.LSTMCell(hidden_size)
self.output_layer = tf.keras.layers.Dense(2*dimensions)
|
Constructs a learnable multivariate diagonal normal cell.
Args:
dimensions: An integer corresponding to the dimensionality of the
distribution.
hidden_size: Dimensionality of the LSTM function parameters.
|
juraj-google-style
|
def convert_transpose(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting transpose ...')
if params['perm'][0] != 0:
if inputs[0] in layers:
print('!!! Cannot permute batch dimension. Result may be wrong !!!')
layers[scope_name] = layers[inputs[0]]
else:
print('Skip weight matrix transpose, result may be wrong.')
else:
if names:
tf_name = 'PERM' + random_string(4)
else:
tf_name = w_name + str(random.random())
permute = keras.layers.Permute(params['perm'][1:], name=tf_name)
layers[scope_name] = permute(layers[inputs[0]])
|
Convert transpose layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
juraj-google-style
|
def pp_hex(raw, reverse=True):
if not reverse:
return ''.join(['{:02x}'.format(v) for v in bytearray(raw)])
return ''.join(reversed(['{:02x}'.format(v) for v in bytearray(raw)]))
|
Return a pretty-printed (hex style) version of a binary string.
Args:
raw (bytes): any sequence of bytes
reverse (bool): True if output should be in reverse order.
Returns:
Hex string corresponding to input byte sequence.
|
juraj-google-style
|
def _FormatReturnOrExitToken(self, token_data):
error_string = bsmtoken.BSM_ERRORS.get(token_data.status, 'UNKNOWN')
return {'error': error_string, 'token_status': token_data.status, 'call_status': token_data.return_value}
|
Formats a return or exit token as a dictionary of values.
Args:
token_data (bsm_token_data_exit|bsm_token_data_return32|
bsm_token_data_return64): AUT_EXIT, AUT_RETURN32 or
AUT_RETURN64 token data.
Returns:
dict[str, str]: token values.
|
codesearchnet
|
def _extract_params(self, kwargs, hyperparameters):
init_params = dict()
fit_params = dict()
produce_params = dict()
for (name, param) in hyperparameters.get('fixed', dict()).items():
if (name in kwargs):
value = kwargs.pop(name)
elif ('default' in param):
value = param['default']
else:
raise TypeError("{} required argument '{}' not found".format(self.name, name))
init_params[name] = value
for (name, param) in hyperparameters.get('tunable', dict()).items():
if (name in kwargs):
init_params[name] = kwargs.pop(name)
fit_args = [arg['name'] for arg in self.fit_args]
produce_args = [arg['name'] for arg in self.produce_args]
for name in list(kwargs.keys()):
if (name in fit_args):
fit_params[name] = kwargs.pop(name)
elif (name in produce_args):
produce_params[name] = kwargs.pop(name)
if kwargs:
error = "Unexpected hyperparameters '{}'".format(', '.join(kwargs.keys()))
raise TypeError(error)
return (init_params, fit_params, produce_params)
|
Extract init, fit and produce params from kwargs.
The `init_params`, `fit_params` and `produce_params` are extracted
from the passed `kwargs` taking the metadata hyperparameters as a
reference.
During this extraction, make sure that all the required hyperparameters
have been given and that nothing unexpected exists in the input.
Args:
kwargs (dict): dict containing the Keyword arguments that have
been passed to the `__init__` method upon
initialization.
hyperparameters (dict): hyperparameters dictionary, as found in
the JSON annotation.
Raises:
TypeError: A `TypeError` is raised if a required argument is not
found in the `kwargs` dict, or if an unexpected
argument has been given.
|
codesearchnet
|
def tagscleanupdicts(configuration=None, url=None, keycolumn=5, failchained=True):
if (not Tags._tags_dict):
if (configuration is None):
configuration = Configuration.read()
with Download(full_agent=configuration.get_user_agent()) as downloader:
if (url is None):
url = configuration['tags_cleanup_url']
Tags._tags_dict = downloader.download_tabular_rows_as_dicts(url, keycolumn=keycolumn)
keys = Tags._tags_dict.keys()
chainerror = False
for (i, tag) in enumerate(keys):
whattodo = Tags._tags_dict[tag]
action = whattodo[u'action']
final_tags = whattodo[u'final tags (semicolon separated)']
for final_tag in final_tags.split(';'):
if (final_tag in keys):
index = list(keys).index(final_tag)
if (index != i):
whattodo2 = Tags._tags_dict[final_tag]
action2 = whattodo2[u'action']
if ((action2 != 'OK') and (action2 != 'Other')):
final_tags2 = whattodo2[u'final tags (semicolon separated)']
if (final_tag not in final_tags2.split(';')):
chainerror = True
if failchained:
logger.error(('Chained rules: %s (%s -> %s) | %s (%s -> %s)' % (action, tag, final_tags, action2, final_tag, final_tags2)))
if (failchained and chainerror):
raise ChainRuleError('Chained rules for tags detected!')
Tags._wildcard_tags = list()
for tag in Tags._tags_dict:
if ('*' in tag):
Tags._wildcard_tags.append(tag)
return (Tags._tags_dict, Tags._wildcard_tags)
|
Get tags cleanup dictionaries
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
url (Optional[str]): Url of tags cleanup spreadsheet. Defaults to None (internal configuration parameter).
keycolumn (int): Column number of tag column in spreadsheet. Defaults to 5.
failchained (bool): Fail if chained rules found. Defaults to True.
Returns:
Tuple[Dict,List]: Returns (Tags dictionary, Wildcard tags list)
|
codesearchnet
|
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False, **kwargs):
residual = hidden_states
if self.normalize_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
if self.normalize_before:
hidden_states = self.final_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.final_layer_norm(hidden_states)
if self.training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
position_embeddings (`torch.FloatTensor`, *optional*):
Object queries (also called content embeddings), to be added to the hidden states.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
|
github-repos
|
def download_software_version(version=None, synch=False):
if not version:
raise CommandExecutionError("Version option must not be none.")
if not isinstance(synch, bool):
raise CommandExecutionError("Synch option must be boolean..")
if synch is True:
query = {'type': 'op',
'cmd': '<request><system><software><download>'
'<version>{0}</version></download></software></system></request>'.format(version)}
else:
query = {'type': 'op',
'cmd': '<request><system><software><download><sync-to-peer>yes</sync-to-peer>'
'<version>{0}</version></download></software></system></request>'.format(version)}
return _get_job_results(query)
|
Download software packages by version number.
Args:
version(str): The version of the PANOS file to download.
synch (bool): If true then the file will synch to the peer unit.
CLI Example:
.. code-block:: bash
salt '*' panos.download_software_version 8.0.0
salt '*' panos.download_software_version 8.0.0 True
|
juraj-google-style
|
def __init__(self, n_clusters: int, batch_size: int, is_batched: bool=False):
super().__init__()
self.n_clusters = n_clusters
self.batch_size = batch_size
self.is_batched = is_batched
|
Preprocessing for Clustering Transformation
The clustering transform expects batches for performance reasons,
therefore this batches the data and converts it to numpy arrays,
which are accepted by sklearn. This transform also adds the same key
to all batches, such that only 1 state is created and updated during
clustering updates.
Example Usage::
pcoll | ClusteringPreprocessing(
n_clusters=8,
batch_size=1024,
is_batched=False)
Args:
n_clusters: number of clusters used by the algorithm
batch_size: size of the data batches
is_batched: boolean value that marks if the collection is already
batched and thus doesn't need to be batched by this transform
|
github-repos
|
def read_stream(self, file: IO, data_stream: DataStream) -> Reply:
(yield from data_stream.read_file(file=file))
reply = (yield from self._control_stream.read_reply())
self.raise_if_not_match('End stream', ReplyCodes.closing_data_connection, reply)
data_stream.close()
return reply
|
Read from the data stream.
Args:
file: A destination file object or a stream writer.
data_stream: The stream of which to read from.
Coroutine.
Returns:
Reply: The final reply.
|
codesearchnet
|
def _build_zmat(self, construction_table):
c_table = construction_table
default_cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral']
optional_cols = list(set(self.columns) - {'atom', 'x', 'y', 'z'})
zmat_frame = pd.DataFrame(columns=default_cols + optional_cols,
dtype='float', index=c_table.index)
zmat_frame.loc[:, optional_cols] = self.loc[c_table.index,
optional_cols]
zmat_frame.loc[:, 'atom'] = self.loc[c_table.index, 'atom']
zmat_frame.loc[:, ['b', 'a', 'd']] = c_table
zmat_values = self._calculate_zmat_values(c_table)
zmat_frame.loc[:, ['bond', 'angle', 'dihedral']] = zmat_values
zmatrix = Zmat(zmat_frame, metadata=self.metadata,
_metadata={'last_valid_cartesian': self.copy()})
return zmatrix
|
Create the Zmatrix from a construction table.
Args:
Construction table (pd.DataFrame):
Returns:
Zmat: A new instance of :class:`Zmat`.
|
juraj-google-style
|
def write(self, data):
self._process.poll()
if self._process.returncode is not None:
raise EOFError('Process ended')
self._process.stdin.write(data)
|
Write *n* bytes to the subprocess' input channel.
Args:
data(bytes): The data to write.
Raises:
EOFError: If the process exited.
|
juraj-google-style
|
def extract_possible_actions(self, state_arr):
agent_x, agent_y = np.where(state_arr[-1] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
possible_action_arr = None
for x, y in [
(-1, 0), (1, 0), (0, -1), (0, 1), (0, 0)
]:
next_x = agent_x + x
if next_x < 0 or next_x >= state_arr[-1].shape[1]:
continue
next_y = agent_y + y
if next_y < 0 or next_y >= state_arr[-1].shape[0]:
continue
wall_flag = False
if x > 0:
for add_x in range(1, x):
if self.__map_arr[agent_x + add_x, next_y] == self.WALL:
wall_flag = True
elif x < 0:
for add_x in range(x, 0):
if self.__map_arr[agent_x + add_x, next_y] == self.WALL:
wall_flag = True
if wall_flag is True:
continue
if y > 0:
for add_y in range(1, y):
if self.__map_arr[next_x, agent_y + add_y] == self.WALL:
wall_flag = True
elif y < 0:
for add_y in range(y, 0):
if self.__map_arr[next_x, agent_y + add_y] == self.WALL:
wall_flag = True
if wall_flag is True:
continue
if self.__map_arr[next_x, next_y] == self.WALL:
continue
if (next_x, next_y) in self.__route_memory_list:
continue
next_action_arr = np.zeros((
3 + self.__enemy_num,
state_arr[-1].shape[0],
state_arr[-1].shape[1]
))
next_action_arr[0][agent_x, agent_y] = 1
next_action_arr[1] = self.__map_arr
next_action_arr[-1][next_x, next_y] = 1
for e in range(self.__enemy_num):
enemy_state_arr = np.zeros(state_arr[0].shape)
enemy_state_arr[self.__enemy_pos_list[e][0], self.__enemy_pos_list[e][1]] = 1
next_action_arr[2 + e] = enemy_state_arr
next_action_arr = np.expand_dims(next_action_arr, axis=0)
if possible_action_arr is None:
possible_action_arr = next_action_arr
else:
possible_action_arr = np.r_[possible_action_arr, next_action_arr]
if possible_action_arr is not None:
while possible_action_arr.shape[0] < self.__batch_size:
key = np.random.randint(low=0, high=possible_action_arr.shape[0])
possible_action_arr = np.r_[
possible_action_arr,
np.expand_dims(possible_action_arr[key], axis=0)
]
else:
self.__route_memory_list = self.__route_memory_list[1:]
possible_action_arr = self.extract_possible_actions(state_arr)
return possible_action_arr
|
Extract possible actions.
Args:
state_arr: `np.ndarray` of state.
Returns:
`np.ndarray` of actions.
The shape is:(
`batch size corresponded to each action key`,
`channel that is 1`,
`feature points1`,
`feature points2`
)
|
juraj-google-style
|
def by_geopoint(self, lat, long):
header, content = self._http_request(self.BASE_URL, lat=lat, long=long)
return json.loads(content)
|
Perform a Yelp Neighborhood API Search based on a geopoint.
Args:
lat - geopoint latitude
long - geopoint longitude
|
juraj-google-style
|
def unhide_tool(self, context_name, tool_name):
data = self._context(context_name)
hidden_tools = data["hidden_tools"]
if tool_name in hidden_tools:
hidden_tools.remove(tool_name)
self._flush_tools()
|
Unhide a tool so that it may be exposed in a suite.
Note that unhiding a tool doesn't guarantee it can be seen - a tool of
the same name from a different context may be overriding it.
Args:
context_name (str): Context containing the tool.
tool_name (str): Name of tool to unhide.
|
juraj-google-style
|
def attach_socket(self, **kwargs):
return self.client.api.attach_socket(self.id, **kwargs)
|
Like :py:meth:`attach`, but returns the underlying socket-like object
for the HTTP request.
Args:
params (dict): Dictionary of request parameters (e.g. ``stdout``,
``stderr``, ``stream``).
ws (bool): Use websockets instead of raw HTTP.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
juraj-google-style
|
def _show_all(saved_model_dir):
saved_model = saved_model_utils.read_saved_model(saved_model_dir)
for meta_graph_def in sorted(saved_model.meta_graphs, key=lambda meta_graph_def: list(meta_graph_def.meta_info_def.tags)):
tag_set = meta_graph_def.meta_info_def.tags
print("\nMetaGraphDef with tag-set: '%s' contains the following SignatureDefs:" % ', '.join(tag_set))
tag_set = ','.join(tag_set)
signature_def_map = meta_graph_def.signature_def
for signature_def_key in sorted(signature_def_map.keys()):
print("\nsignature_def['" + signature_def_key + "']:")
_show_inputs_outputs_mgd(meta_graph_def, signature_def_key, indent=1)
_show_ops_in_metagraph_mgd(meta_graph_def)
_show_defined_functions(saved_model_dir, saved_model.meta_graphs)
|
Prints tag-set, ops, SignatureDef, and Inputs/Outputs of SavedModel.
Prints all tag-set, ops, SignatureDef and Inputs/Outputs information stored in
SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
|
github-repos
|
def remove_child(self, c: 'AbstractSyntaxTree') -> None:
if self._children is None:
raise ValueError(f'No children belonging to {self!r}.')
self._children.remove(c)
c.parent = None
|
Removes a child from the reciever and sets its parent to `None`.
Args:
c: The child to remove. By default, compared using pointer equality.
Raises:
ValueError in the event that the child does not being to the underlying
list of children.
|
github-repos
|
def PublishEvent(cls, event_name, msg, token=None):
cls.PublishMultipleEvents({event_name: [msg]}, token=token)
|
Publish the message into all listeners of the event.
We send the message to all event handlers which contain this
string in their EVENT static member. This allows the event to be
sent to multiple interested listeners.
Args:
event_name: An event name.
msg: The message to send to the event handler.
token: ACL token.
Raises:
ValueError: If the message is invalid. The message must be a Semantic
Value (instance of RDFValue) or a full GrrMessage.
|
juraj-google-style
|
def alwaysThrew(self, error_type=None):
if self.callCount == 0:
return False
if not error_type:
return True if len(self.exceptions) == self.callCount else False
else:
return uch.obj_in_list_always(self.exceptions, error_type)
|
Determining whether the specified exception is the ONLY thrown exception
Args:
error_type:
None: checking without specified exception
Specified Exception
Return: Boolean
|
juraj-google-style
|
def load_hat(self, path):
hat = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if hat is None:
raise ValueError('No hat image found at `{}`'.format(path))
b, g, r, a = cv2.split(hat)
return cv2.merge((r, g, b, a))
|
Loads the hat from a picture at path.
Args:
path: The path to load from
Returns:
The hat data.
|
juraj-google-style
|
def cifar_generator(cifar_version, tmp_dir, training, how_many, start_from=0):
if (cifar_version == 'cifar10'):
url = _CIFAR10_URL
train_files = _CIFAR10_TRAIN_FILES
test_files = _CIFAR10_TEST_FILES
prefix = _CIFAR10_PREFIX
image_size = _CIFAR10_IMAGE_SIZE
label_key = 'labels'
elif ((cifar_version == 'cifar100') or (cifar_version == 'cifar20')):
url = _CIFAR100_URL
train_files = _CIFAR100_TRAIN_FILES
test_files = _CIFAR100_TEST_FILES
prefix = _CIFAR100_PREFIX
image_size = _CIFAR100_IMAGE_SIZE
if (cifar_version == 'cifar100'):
label_key = 'fine_labels'
else:
label_key = 'coarse_labels'
_get_cifar(tmp_dir, url)
data_files = (train_files if training else test_files)
(all_images, all_labels) = ([], [])
for filename in data_files:
path = os.path.join(tmp_dir, prefix, filename)
with tf.gfile.Open(path, 'rb') as f:
if six.PY2:
data = cPickle.load(f)
else:
data = cPickle.load(f, encoding='latin1')
images = data['data']
num_images = images.shape[0]
images = images.reshape((num_images, 3, image_size, image_size))
all_images.extend([np.squeeze(images[j]).transpose((1, 2, 0)) for j in range(num_images)])
labels = data[label_key]
all_labels.extend([labels[j] for j in range(num_images)])
return image_utils.image_generator(all_images[start_from:(start_from + how_many)], all_labels[start_from:(start_from + how_many)])
|
Image generator for CIFAR-10 and 100.
Args:
cifar_version: string; one of "cifar10" or "cifar100"
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces CIFAR-10 images and labels.
|
codesearchnet
|
def __init__(self, max_iterations, unroll_loop=False):
assert max_iterations >= 0
self.max_iterations = max_iterations
assert isinstance(unroll_loop, bool)
self.unroll_loop = unroll_loop
super(Iterative, self).__init__()
self.initialize = tf.make_template(name_='initialize', func_=self.tf_initialize)
self.step = tf.make_template(name_='step', func_=self.tf_step)
self.next_step = tf.make_template(name_='next-step', func_=self.tf_next_step)
|
Creates a new iterative solver instance.
Args:
max_iterations: Maximum number of iterations before termination.
unroll_loop: Unrolls the TensorFlow while loop if true.
|
juraj-google-style
|
def email_address(self, address, owner=None, **kwargs):
return EmailAddress(self.tcex, address, owner=owner, **kwargs)
|
Create the Email Address TI object.
Args:
owner:
address:
**kwargs:
Return:
|
codesearchnet
|
def html_for_modules_method(method_name, *args, **kwargs):
method = getattr(modules, method_name)
value = method(*args, **kwargs)
return KEY_VALUE_TEMPLATE.format(method_name, value)
|
Returns an HTML snippet for a Modules API method.
Args:
method_name: A string containing a Modules API method.
args: Positional arguments to be passed to the method.
kwargs: Keyword arguments to be passed to the method.
Returns:
String HTML representing the Modules API method and value.
|
juraj-google-style
|
def _begin_operation_action(self, action):
conn_key = action.data['id']
callback = action.data['callback']
if self._get_connection_state(conn_key) != self.Idle:
callback(conn_key, self.id, False, 'Cannot start operation, connection is not idle')
return
data = self._get_connection(conn_key)
data['state'] = self.InProgress
data['microstate'] = action.data['operation_name']
data['action'] = action
|
Begin an attempted operation.
Args:
action (ConnectionAction): the action object describing what we are
operating on
|
juraj-google-style
|
def _add_arg_java(self, key, value, mask=False):
if isinstance(value, bool):
value = int(value)
self._data[key] = value
self._args.append('{}{}={}'.format('-D', key, value))
self._args_quoted.append(self.quote('{}{}={}'.format('-D', key, value)))
if mask:
value = ('x' * len(str(value)))
self._args_masked.append('{}{}={}'.format('-D', key, value))
|
Add CLI Arg formatted specifically for Java.
Args:
key (string): The CLI Args key (e.g., --name).
value (string): The CLI Args value (e.g., bob).
mask (boolean, default:False): Indicates whether no mask value.
|
codesearchnet
|
def read_label_list(path):
ll = annotations.LabelList()
for record in read_label_file(path):
ll.add(annotations.Label(record[2], start=record[0], end=record[1]))
return ll
|
Reads labels from an Audacity label file
and returns them wrapped in a :py:class:`audiomate.annotations.LabelList`.
Args:
path (str): Path to the Audacity label file
Returns:
audiomate.annotations.LabelList: Label list containing the labels
|
juraj-google-style
|
def __init__(self, project_name, instance_name, table_name):
self.btspec = BigtableSpec(project_name, instance_name, table_name)
self.bt_table = bigtable.Client(
self.btspec.project, admin=True).instance(
self.btspec.instance).table(self.btspec.table)
self.tf_table = tf.contrib.cloud.BigtableClient(
self.btspec.project,
self.btspec.instance).table(self.btspec.table)
|
Constructor.
Args:
project_name: string name of GCP project having table.
instance_name: string name of CBT instance in project.
table_name: string name of CBT table in instance.
|
juraj-google-style
|
def tell(self):
self._checkClosed()
return self._position
|
Tell the stream's current offset.
Returns:
current offset in reading this stream.
Raises:
``ValueError``: When this stream is closed.
|
github-repos
|
def get_intersection(self, range_):
result = []
for entry in self.entries:
(package, value) = entry
if (value is None):
continue
if (package.version not in range_):
continue
if isinstance(value, list):
variants = value
entry_ = _PackageEntry(package, variants, self.solver)
result.append(entry_)
continue
if self.solver.package_filter:
rule = self.solver.package_filter.excludes(package)
if rule:
if config.debug_package_exclusions:
print_debug(("Package '%s' was excluded by rule '%s'" % (package.qualified_name, str(rule))))
entry[1] = None
continue
if self.solver.package_load_callback:
self.solver.package_load_callback(package)
variants_ = []
for var in package.iter_variants():
variant = PackageVariant(var, self.solver.building)
variants_.append(variant)
entry[1] = variants_
entry_ = _PackageEntry(package, variants_, self.solver)
result.append(entry_)
return (result or None)
|
Get a list of variants that intersect with the given range.
Args:
range_ (`VersionRange`): Package version range.
Returns:
List of `_PackageEntry` objects.
|
codesearchnet
|
def register_event(self, name, callback, validator):
async def _validate_and_call(message):
payload = message.get('payload')
try:
payload = validator.verify(payload)
except ValidationError:
self._logger.warning('Dropping invalid payload for event %s, payload=%s', name, payload)
return
try:
result = callback(payload)
if inspect.isawaitable(result):
(await result)
except:
self._logger.error('Error calling callback for event %s, payload=%s', name, payload, exc_info=True)
self._manager.every_match(_validate_and_call, type='event', name=name)
|
Register a callback to receive events.
Every event with the matching name will have its payload validated
using validator and then will be passed to callback if validation
succeeds.
Callback must be a normal callback function, coroutines are not
allowed. If you need to run a coroutine you are free to schedule it
from your callback.
Args:
name (str): The name of the event that we are listening
for
callback (callable): The function that should be called
when a message that matches validator is received.
validator (Verifier): A schema verifier that will
validate a received message uniquely
|
codesearchnet
|
def _peer_get_bfd(self, tx, rx, multiplier):
tx = self._callback(tx, handler='get_config')
rx = self._callback(rx, handler='get_config')
multiplier = self._callback(multiplier, handler='get_config')
tx = pynos.utilities.return_xml(str(tx))
rx = pynos.utilities.return_xml(str(rx))
multiplier = pynos.utilities.return_xml(str(multiplier))
config = pynos.utilities.merge_xml(tx, rx)
return pynos.utilities.merge_xml(config, multiplier)
|
Get and merge the `bfd` config from global BGP.
You should not use this method.
You probably want `BGP.bfd`.
Args:
tx: XML document with the XML to get the transmit interval.
rx: XML document with the XML to get the receive interval.
multiplier: XML document with the XML to get the interval
multiplier.
Returns:
Merged XML document.
Raises:
None
|
juraj-google-style
|
def load_institute(adapter, internal_id, display_name, sanger_recipients=None):
institute_obj = build_institute(internal_id=internal_id, display_name=display_name, sanger_recipients=sanger_recipients)
log.info('Loading institute {0} with display name {1} into database'.format(internal_id, display_name))
adapter.add_institute(institute_obj)
|
Load a institute into the database
Args:
adapter(MongoAdapter)
internal_id(str)
display_name(str)
sanger_recipients(list(email))
|
codesearchnet
|
def wait_for_stateful_block_init(context, mri, timeout=DEFAULT_TIMEOUT):
context.when_matches([mri, 'state', 'value'], StatefulStates.READY, bad_values=[StatefulStates.FAULT, StatefulStates.DISABLED], timeout=timeout)
|
Wait until a Block backed by a StatefulController has initialized
Args:
context (Context): The context to use to make the child block
mri (str): The mri of the child block
timeout (float): The maximum time to wait
|
codesearchnet
|
def add_attribute_label(self, attribute_id, label):
if (not self.can_update()):
self._tcex.handle_error(910, [self.type])
return self.tc_requests.add_attribute_label(self.api_type, self.api_sub_type, self.unique_id, attribute_id, label, owner=self.owner)
|
Adds a security labels to a attribute
Args:
attribute_id:
label:
Returns: A response json
|
codesearchnet
|
def _map_args(self, node: 'cfg.CFGNode', args: function.Args) -> 'dict[str, cfg.Variable]':
posargs = [u.AssignToNewVariable(node) for u in args.posargs]
kws = {k: u.AssignToNewVariable(node) for k, u in args.namedargs.items()}
sig = self.signature
callargs = {name: self.ctx.program.NewVariable(default.data, [], node) for name, default in sig.defaults.items()}
positional = dict(zip(sig.param_names, posargs))
posonly_names = set(sig.posonly_params)
for key in set(positional) - posonly_names:
if key in kws:
raise error_types.DuplicateKeyword(sig, args, self.ctx, key)
kwnames = set(kws)
extra_kws = kwnames.difference(sig.param_names + sig.kwonly_params)
if extra_kws and (not sig.kwargs_name):
if function.has_visible_namedarg(node, args, extra_kws):
raise error_types.WrongKeywordArgs(sig, args, self.ctx, extra_kws)
posonly_kws = kwnames & posonly_names
if posonly_kws and (not sig.kwargs_name):
raise error_types.WrongKeywordArgs(sig, args, self.ctx, posonly_kws)
callargs.update(positional)
callargs.update(kws)
for key, kwonly in itertools.chain(self.get_nondefault_params(), ((key, True) for key in sig.kwonly_params)):
if key not in callargs:
if args.starstarargs or (args.starargs and (not kwonly)):
callargs[key] = self.ctx.new_unsolvable(node)
else:
raise error_types.MissingParameter(sig, args, self.ctx, key)
if sig.varargs_name:
varargs_name = sig.varargs_name
extraneous = posargs[self.argcount(node):]
if args.starargs:
if extraneous:
log.warning('Not adding extra params to *%s', varargs_name)
callargs[varargs_name] = args.starargs.AssignToNewVariable(node)
else:
callargs[varargs_name] = self.ctx.convert.build_tuple(node, extraneous)
elif len(posargs) > self.argcount(node):
raise error_types.WrongArgCount(sig, args, self.ctx)
if sig.kwargs_name:
kwargs_name = sig.kwargs_name
if args.starstarargs:
callargs[kwargs_name] = args.starstarargs.AssignToNewVariable(node)
else:
omit = sig.param_names + sig.kwonly_params
k = _instances.Dict(self.ctx)
k.update(node, args.namedargs, omit=omit)
callargs[kwargs_name] = k.to_variable(node)
return callargs
|
Map call args to function args.
This emulates how Python would map arguments of function calls. It takes
care of keyword parameters, default parameters, and *args and **kwargs.
Args:
node: The current CFG node.
args: The arguments.
Returns:
A dictionary, mapping strings (parameter names) to cfg.Variable.
Raises:
function.FailedFunctionCall: If the caller supplied incorrect arguments.
|
github-repos
|
def urlEncodeAndJoin(self, seq, sepr=','):
try:
from urllib.parse import quote_plus as encode
return sepr.join([encode(x, encoding=CHARSET_UTF8) for x in seq])
except ImportError:
from urllib import quote as encode
return sepr.join([i for i in map(lambda x: encode(x), seq)])
|
sepr.join(urlencode(seq))
Args:
seq: string list to be urlencoded
sepr: join seq with sepr
Returns:
str
|
juraj-google-style
|
def patchify(self, pixel_values, interpolate_pos_encoding: bool=False):
patch_size, num_channels = (self.config.patch_size, self.config.num_channels)
if shape_list(pixel_values)[1] == num_channels:
pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
if not interpolate_pos_encoding:
tf.debugging.assert_equal(shape_list(pixel_values)[1], shape_list(pixel_values)[2], message='Make sure the pixel values have a squared size')
tf.debugging.assert_equal(shape_list(pixel_values)[1] % patch_size, 0, message='Make sure the pixel values have a size that is divisible by the patch size')
tf.debugging.assert_equal(shape_list(pixel_values)[3], num_channels, message='Make sure the number of channels of the pixel values is equal to the one set in the configuration')
batch_size = shape_list(pixel_values)[0]
num_patches_h = shape_list(pixel_values)[1]
num_patches_w = shape_list(pixel_values)[2]
patchified_pixel_values = tf.reshape(pixel_values, (batch_size, num_patches_h, patch_size, num_patches_w, patch_size, num_channels))
patchified_pixel_values = tf.einsum('nhpwqc->nhwpqc', patchified_pixel_values)
patchified_pixel_values = tf.reshape(patchified_pixel_values, (batch_size, num_patches_h * num_patches_w, patch_size ** 2 * num_channels))
return patchified_pixel_values
|
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, height, width, num_channels)` or `(batch_size, num_channels, height, width)`):
Pixel values.
interpolate_pos_encoding (`bool`, default `False`):
interpolation flag passed during the forward pass.
Returns:
`tf.Tensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:
Patchified pixel values.
|
github-repos
|
def lstat(self, entry_path, dir_fd=None):
entry_path = self._path_with_dir_fd(entry_path, self.lstat, dir_fd)
return self.filesystem.stat(entry_path, follow_symlinks=False)
|
Return the os.stat-like tuple for entry_path, not following symlinks.
Args:
entry_path: path to filesystem object to retrieve.
dir_fd: If not `None`, the file descriptor of a directory, with
`entry_path` being relative to this directory.
New in Python 3.3.
Returns:
the FakeStatResult object corresponding to `entry_path`.
Raises:
OSError: if the filesystem object doesn't exist.
|
juraj-google-style
|
def __init__(self, prefix):
self.bed = PyPlink(prefix)
self.bim = self.bed.get_bim()
self.fam = self.bed.get_fam()
self.bim["multiallelic"] = False
self.bim.loc[
self.bim.duplicated(["chrom", "pos"], keep=False),
"multiallelic"
] = True
try:
self.fam = self.fam.set_index("iid", verify_integrity=True)
except ValueError:
logging.info(
"Setting the index as 'fid_iid' because the individual IDs "
"are not unique."
)
self.fam["fid_iid"] = [
"{fid}_{iid}".format(fid=fid, iid=iid)
for fid, iid in zip(self.fam.fid, self.fam.iid)
]
self.fam = self.fam.set_index("fid_iid", verify_integrity=True)
|
Binary plink file reader.
Args:
prefix (str): the prefix of the Plink binary files.
|
juraj-google-style
|
def assert_global_step(global_step_tensor):
if not (isinstance(global_step_tensor, variables.Variable) or isinstance(global_step_tensor, tensor.Tensor) or resource_variable_ops.is_resource_variable(global_step_tensor)):
raise TypeError('Existing "global_step" must be a Variable or Tensor: %s.' % global_step_tensor)
if not global_step_tensor.dtype.base_dtype.is_integer:
raise TypeError('Existing "global_step" does not have integer type: %s' % global_step_tensor.dtype)
if global_step_tensor.get_shape().ndims != 0 and global_step_tensor.get_shape().is_fully_defined():
raise TypeError('Existing "global_step" is not scalar: %s' % global_step_tensor.get_shape())
|
Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.
Args:
global_step_tensor: `Tensor` to test.
|
github-repos
|
def set_state(self, vid, value=None, default=False, disable=False):
cmds = self.command_builder('state', value=value, default=default, disable=disable)
return self.configure_vlan(vid, cmds)
|
Configures the VLAN state
EosVersion:
4.13.7M
Args:
vid (str): The VLAN ID to configure
value (str): The value to set the vlan state to
default (bool): Configures the vlan state to its default value
disable (bool): Negates the vlan state
Returns:
True if the operation was successful otherwise False
|
codesearchnet
|
def tar_add_bytes(tf, filename, bytestring):
if not isinstance(bytestring, bytes):
bytestring = bytestring.encode('ascii')
buff = io.BytesIO(bytestring)
tarinfo = tarfile.TarInfo(filename)
tarinfo.size = len(bytestring)
tf.addfile(tarinfo, buff)
|
Add a file to a tar archive
Args:
tf (tarfile.TarFile): tarfile to add the file to
filename (str): path within the tar file
bytestring (bytes or str): file contents. Must be :class:`bytes` or
ascii-encodable :class:`str`
|
juraj-google-style
|
def timeRange(
start: datetime.time, end: datetime.time,
step: float) -> Iterator[datetime.datetime]:
assert step > 0
start = _fillDate(start)
end = _fillDate(end)
delta = datetime.timedelta(seconds=step)
t = start
while t < datetime.datetime.now():
t += delta
while t <= end:
waitUntil(t)
yield t
t += delta
|
Iterator that waits periodically until certain time points are
reached while yielding those time points.
Args:
start: Start time, can be specified as datetime.datetime,
or as datetime.time in which case today is used as the date
end: End time, can be specified as datetime.datetime,
or as datetime.time in which case today is used as the date
step (float): The number of seconds of each period
|
juraj-google-style
|
def __call__(self, *args):
if len(self.formatters) == 0:
self.setup(*args)
row_cells = []
if self.rownum:
row_cells.append(0)
if self.timestamp:
row_cells.append(datetime.datetime.now())
if self.time_diff:
row_cells.append(0)
row_cells.extend(args)
if len(row_cells) != len(self.formatters):
raise ValueError('Expected number of columns is {}. Got {}.'.format(
len(self.formatters), len(row_cells)))
line = self.format_row(*row_cells)
self.print_line(line)
|
Prints a formatted row
Args:
args: row cells
|
juraj-google-style
|
def create_query(self, fields=None):
if fields is None:
return Query(self.fields)
non_contained_fields = set(fields) - set(self.fields)
if non_contained_fields:
raise BaseLunrException(
"Fields {} are not part of the index", non_contained_fields
)
return Query(fields)
|
Convenience method to create a Query with the Index's fields.
Args:
fields (iterable, optional): The fields to include in the Query,
defaults to the Index's `all_fields`.
Returns:
Query: With the specified fields or all the fields in the Index.
|
juraj-google-style
|
def make_supercells_with_defects(self, scaling_matrix):
scs = []
sc = self._structure.copy()
sc.make_supercell(scaling_matrix)
scs.append(sc)
for ids, defect_site in enumerate(self._defect_sites):
sc_with_inter = sc.copy()
sc_with_inter.append(
defect_site.species_string,
defect_site.frac_coords,
coords_are_cartesian=False,
validate_proximity=False,
properties=None)
if not sc_with_inter:
raise RuntimeError(
"could not generate supercell with" " interstitial {}".format(
ids + 1))
scs.append(sc_with_inter.copy())
return scs
|
Generate a sequence of supercells
in which each supercell contains a single interstitial,
except for the first supercell in the sequence
which is a copy of the defect-free input structure.
Args:
scaling_matrix (3x3 integer array): scaling matrix
to transform the lattice vectors.
Returns:
scs ([Structure]): sequence of supercells.
|
juraj-google-style
|
def CopyToDict(self):
result_dict = {'labels': self.labels}
if self.comment:
result_dict['comment'] = self.comment
return result_dict
|
Copies the event tag to a dictionary.
Returns:
dict[str, object]: event tag attributes.
|
codesearchnet
|
def convert_timedelta(duration):
days, seconds = duration.days, duration.seconds
hours = seconds
minutes = (seconds % 3600)
seconds = (seconds % 60)
return days, hours, minutes, seconds
|
Summary:
Convert duration into component time units
Args:
:duration (datetime.timedelta): time duration to convert
Returns:
days, hours, minutes, seconds | TYPE: tuple (integers)
|
juraj-google-style
|
def _original_path(self, path):
def components_to_path():
if (len(path_components) > len(normalized_components)):
normalized_components.extend(path_components[len(normalized_components):])
sep = self._path_separator(path)
normalized_path = sep.join(normalized_components)
if (path.startswith(sep) and (not normalized_path.startswith(sep))):
normalized_path = (sep + normalized_path)
return normalized_path
if (self.is_case_sensitive or (not path)):
return path
path_components = self._path_components(path)
normalized_components = []
current_dir = self.root
for component in path_components:
if (not isinstance(current_dir, FakeDirectory)):
return components_to_path()
(dir_name, current_dir) = self._directory_content(current_dir, component)
if ((current_dir is None) or (isinstance(current_dir, FakeDirectory) and (current_dir._byte_contents is None) and (current_dir.st_size == 0))):
return components_to_path()
normalized_components.append(dir_name)
return components_to_path()
|
Return a normalized case version of the given path for
case-insensitive file systems. For case-sensitive file systems,
return path unchanged.
Args:
path: the file path to be transformed
Returns:
A version of path matching the case of existing path elements.
|
codesearchnet
|
def generate_json_schema(cls, schema, context=DEFAULT_DICT):
schema = cls._get_schema(schema)
return cls(context=context).dump(schema).data
|
Generate a JSON Schema from a Marshmallow schema.
Args:
schema (marshmallow.Schema|str): The Marshmallow schema, or the
Python path to one, to create the JSON schema for.
Keyword Args:
file_pointer (file, optional): The path or pointer to the file
to write this schema to. If not provided, the schema will be
dumped to ``sys.stdout``.
Returns:
dict: The JSON schema in dictionary form.
|
juraj-google-style
|
def plot(self, **plot_kwargs: Any) -> None:
fig = plt.figure()
plt.plot(self._rabi_angles, self._excited_state_probs, 'ro-',
figure=fig, **plot_kwargs)
plt.xlabel(r"Rabi Angle (Radian)", figure=fig)
plt.ylabel('Excited State Probability', figure=fig)
fig.show()
|
Plots excited state probability vs the Rabi angle (angle of rotation
around the x-axis).
Args:
**plot_kwargs: Arguments to be passed to matplotlib.pyplot.plot.
|
juraj-google-style
|
def Print(self, output_writer):
if self._date_time_ranges:
for date_time_range in self._date_time_ranges:
if date_time_range.start_date_time is None:
end_time_string = date_time_range.end_date_time.CopyToDateTimeString()
output_writer.Write('\t{0:s} after {1:s}\n'.format(
date_time_range.time_value, end_time_string))
elif date_time_range.end_date_time is None:
start_time_string = (
date_time_range.start_date_time.CopyToDateTimeString())
output_writer.Write('\t{0:s} before {1:s}\n'.format(
date_time_range.time_value, start_time_string))
else:
start_time_string = (
date_time_range.start_date_time.CopyToDateTimeString())
end_time_string = date_time_range.end_date_time.CopyToDateTimeString()
output_writer.Write('\t{0:s} between {1:s} and {2:s}\n'.format(
date_time_range.time_value, start_time_string,
end_time_string))
|
Prints a human readable version of the filter.
Args:
output_writer (CLIOutputWriter): output writer.
|
juraj-google-style
|
def ones(shape, dtype=None, **kwargs):
data = np.ones(shape, dtype)
return dc.array(data, **kwargs)
|
Create an array of given shape and type, filled with ones.
Args:
shape (sequence of ints): 2D shape of the array.
dtype (data-type, optional): Desired data-type for the array.
kwargs (optional): Other arguments of the array (*coords, attrs, and name).
Returns:
array (decode.array): Decode array filled with ones.
|
codesearchnet
|
async def gather(self, *cmds: str) -> Tuple[int]:
subprocs = self.spawn(*cmds)
subproc_wait_coros = [subproc.wait_done() for subproc in subprocs]
return (await asyncio.gather(*subproc_wait_coros))
|
Coroutine to spawn subprocesses and block until completion.
Note:
The same `max_concurrency` restriction that applies to `spawn`
also applies here.
Returns:
The exit codes of the spawned subprocesses, in the order they were
passed.
|
codesearchnet
|
def compile_protofile(proto_file_path):
out_file = tempfile.mkstemp()[1]
try:
subprocess.check_output(['protoc', '--include_source_info',
'--descriptor_set_out', out_file,
proto_file_path])
except subprocess.CalledProcessError as e:
sys.exit('protoc returned status {}'.format(e.returncode))
return out_file
|
Compile proto file to descriptor set.
Args:
proto_file_path: Path to proto file to compile.
Returns:
Path to file containing compiled descriptor set.
Raises:
SystemExit if the compilation fails.
|
juraj-google-style
|
def _click(x, y, button):
if button == 'left':
try:
_sendMouseEvent(MOUSEEVENTF_LEFTCLICK, x, y)
except (PermissionError, OSError):
pass
elif button == 'middle':
try:
_sendMouseEvent(MOUSEEVENTF_MIDDLECLICK, x, y)
except (PermissionError, OSError):
pass
elif button == 'right':
try:
_sendMouseEvent(MOUSEEVENTF_RIGHTCLICK, x, y)
except (PermissionError, OSError):
pass
else:
assert False, "button argument not in ('left', 'middle', 'right')"
|
Send the mouse click event to Windows by calling the mouse_event() win32
function.
Args:
button (str): The mouse button, either 'left', 'middle', or 'right'
x (int): The x position of the mouse event.
y (int): The y position of the mouse event.
Returns:
None
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.