code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def from_signature(cls, sig: inspect.Signature, name: str, callable_type: CallableType, module_name: Optional[str]=None, qualname: Optional[str]=None, auto_typing: bool=False, docstr: Union[str, utils.DocStr, None]=None, parent_module: Optional[types.ModuleType]=None) -> 'Signature':
args = []
kwonly_args = []
varargs = None
varkw = None
if isinstance(docstr, str):
docstr = utils.DocStr.parse(docstr)
def make_arg_spec(param: inspect.Parameter) -> Argument:
docstr_arg = docstr.parameter(param) if docstr else None
return Argument.from_parameter(param, description=docstr_arg.description if docstr_arg else None, auto_typing=auto_typing, parent_module=parent_module)
for param in sig.parameters.values():
arg_spec = make_arg_spec(param)
if param.kind == inspect.Parameter.POSITIONAL_ONLY or param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
args.append(arg_spec)
elif param.kind == inspect.Parameter.KEYWORD_ONLY:
kwonly_args.append(arg_spec)
elif param.kind == inspect.Parameter.VAR_POSITIONAL:
varargs = arg_spec
else:
assert param.kind == inspect.Parameter.VAR_KEYWORD, param.kind
varkw = arg_spec
return_value = None
if sig.return_annotation is not inspect.Parameter.empty:
return_value = class_schema.ValueSpec.from_annotation(sig.return_annotation, auto_typing=auto_typing, parent_module=parent_module)
return cls(callable_type=callable_type, name=name, module_name=module_name, qualname=qualname, description=docstr.short_description if docstr else None, args=args, kwonlyargs=kwonly_args, varargs=varargs, varkw=varkw, return_value=return_value)
|
Returns PyGlove signature from Python signature.
Args:
sig: Python signature.
name: Name of the entity (class name or function/method name).
callable_type: the type of this callable.
module_name: Module name of the entity.
qualname: (Optional) qualified name of the entity.
auto_typing: If True, automatically convert argument annotations
to PyGlove ValueSpec objects. Otherwise use pg.typing.Any()
with annotations.
docstr: (Optional) DocStr for this entity.
parent_module: (Optional) Parent module from where the signature is
derived. This is useful to infer classes with forward declarations.
Returns:
A PyGlove Signature object.
|
github-repos
|
def from_dict(cls, d, identifier_str=None):
def _print_version(value):
return '.'.join(str(x) for x in value)
toks = str(d["serialize_version"]).split('.')
load_ver = tuple(int(x) for x in toks)
curr_ver = ResolvedContext.serialize_version
if load_ver[0] > curr_ver[0]:
msg = ["The context"]
if identifier_str:
msg.append("in %s" % identifier_str)
msg.append("was written by a newer version of Rez. The load may "
"fail (serialize version %d > %d)"
% (_print_version(load_ver), _print_version(curr_ver)))
print >> sys.stderr, ' '.join(msg)
r = ResolvedContext.__new__(ResolvedContext)
r.load_path = None
r.pre_resolve_bindings = None
r.timestamp = d["timestamp"]
r.building = d["building"]
r.caching = d["caching"]
r.implicit_packages = [PackageRequest(x) for x in d["implicit_packages"]]
r._package_requests = [PackageRequest(x) for x in d["package_requests"]]
r.package_paths = d["package_paths"]
r.rez_version = d["rez_version"]
r.rez_path = d["rez_path"]
r.user = d["user"]
r.host = d["host"]
r.platform = d["platform"]
r.arch = d["arch"]
r.os = d["os"]
r.created = d["created"]
r.verbosity = d.get("verbosity", 0)
r.status_ = ResolverStatus[d["status"]]
r.failure_description = d["failure_description"]
r.solve_time = d["solve_time"]
r.load_time = d["load_time"]
r.graph_string = d["graph"]
r.graph_ = None
r._resolved_packages = []
for d_ in d["resolved_packages"]:
variant_handle = d_
if load_ver < (4, 0):
from rez.utils.backcompat import convert_old_variant_handle
variant_handle = convert_old_variant_handle(variant_handle)
variant = get_variant(variant_handle)
variant.set_context(r)
r._resolved_packages.append(variant)
r.requested_timestamp = d.get("requested_timestamp", 0)
r.parent_suite_path = d.get("parent_suite_path")
r.suite_context_name = d.get("suite_context_name")
r.default_patch_lock = PatchLock[d.get("default_patch_lock", "no_lock")]
patch_locks = d.get("patch_locks", {})
r.patch_locks = dict((k, PatchLock[v]) for k, v in patch_locks)
r.from_cache = d.get("from_cache", False)
data = d.get("package_filter", [])
r.package_filter = PackageFilterList.from_pod(data)
data = d.get("package_orderers")
if data:
r.package_orderers = [package_order.from_pod(x) for x in data]
else:
r.package_orderers = None
r.num_loaded_packages = d.get("num_loaded_packages", -1)
if config.context_tracking_host:
data = dict((k, v) for k, v in d.iteritems()
if k in config.context_tracking_context_fields)
r._track_context(data, action="sourced")
return r
|
Load a `ResolvedContext` from a dict.
Args:
d (dict): Dict containing context data.
identifier_str (str): String identifying the context, this is only
used to display in an error string if a serialization version
mismatch is detected.
Returns:
`ResolvedContext` object.
|
juraj-google-style
|
def solve_fba(self, objective):
self._prob.set_objective(self._v_wt[objective])
return self._solve(lp.ObjectiveSense.Maximize)
|
Solve the wild type problem using FBA.
Args:
objective: The objective reaction to be maximized.
Returns:
The LP Result object for the solved FBA problem.
|
codesearchnet
|
def read_tables(fstream):
table = read_table(fstream)
while (table is not None):
(yield table)
table = read_table(fstream)
|
Read all tables from likwid's file stream.
Args:
fstream: Likwid's output file stream.
Returns:
A generator that can be used to iterate over all tables in the fstream.
|
codesearchnet
|
def get_shape(x: tf.Tensor, name: Optional[str]=None) -> Union[tf.TensorShape, types.IntTensor]:
name = 'get_shape' if name is None else name
with tf.name_scope(name):
x = tf.convert_to_tensor(x)
is_fully_defined = x.shape.is_fully_defined()
if is_fully_defined:
return x.shape
return tf.shape(x)
|
Returns static shape of `x` if it is fully defined, or dynamic, otherwise.
####Example
```python
import tensorflow as tf
import tf_quant_finance as tff
x = tf.zeros([5, 2])
prefer_static_shape(x)
# Expected: [5, 2]
Args:
x: A tensor of any shape and `dtype`
name: Python string. The name to give to the ops created by this function.
Default value: `None` which maps to the default name
`get_shape`.
Returns:
A shape of `x` which a list, if the shape is fully defined, or a `Tensor`
for dynamically shaped `x`.
|
github-repos
|
def recipe_to_colab(name, description, instructions, tasks, parameters={}, project=None, client_credentials=None, user_credentials=None, service_credentials=None):
colab = Colab(name)
colab.header(name)
colab.paragraph(description)
colab.header('License')
colab.paragraph(textwrap.dedent('\n Copyright 2020 Google LLC,\n\n Licensed under the Apache License, Version 2.0 (the "License");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n https:
colab.header('Disclaimer')
colab.paragraph('This is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team.')
colab.paragraph(textwrap.dedent('\n This code generated (see starthinker/scripts for possible source):\n - **Command**: "python starthinker_ui/manage.py colab"\n - **Command**: "python starthinker/tools/colab.py [JSON RECIPE]"\n '))
colab.header('1. Install Dependencies')
colab.paragraph('First install the libraries needed to execute recipes, this only needs to be done once, then click play.')
colab.code('!pip install git+https:
colab.header('2. Set Configuration')
colab.paragraph(textwrap.dedent('\n This code is required to initialize the project. Fill in required fields and press play.\n\n 1. If the recipe uses a Google Cloud Project:\n - Set the configuration **project** value to the project identifier from [these instructions](https:
colab.code('from starthinker.util.configuration import Configuration')
colab.code('')
colab.code(textwrap.dedent('\n CONFIG = Configuration(\n project="",\n client={},\n service={},\n user="/content/user.json",\n verbose=True\n )\n '))
fields = json_get_fields(tasks)
if fields:
colab.header('3. Enter %s Recipe Parameters' % name)
colab.list(instructions)
colab.paragraph('Modify the values below for your use case, can be done multiple times, then click play.')
colab.code('FIELDS = %s' % fields_to_string(fields, parameters))
colab.code('\nprint("Parameters Set To: %s" % FIELDS)')
colab.header('%d. Execute %s' % (4 if fields else 3, name))
colab.paragraph('This does NOT need to be modified unless you are changing the recipe, click play.')
colab.code('from starthinker.util.configuration import execute')
colab.code('from starthinker.util.recipe import json_set_fields')
colab.code('')
colab.code('TASKS = %s' % dict_to_string(tasks, skip=('field',)))
colab.code('')
if fields:
colab.code('json_set_fields(TASKS, FIELDS)')
colab.code('')
colab.code('execute(CONFIG, TASKS, force=True)')
return colab.render()
|
Converts a JSON recipe into a Jupyter Notebook for Colabs.
Sets up multiple steps to execute recipe:
1. Install starthinker from repository
2. Get Cloud Project ID.
3. Get Client Credentials ( optional if User Credentials exist ).
4. Enter Recipe parameters if fields present.
5. Execute recipe tasks.
Args:
* name: (string) The name of the notebook.
* description: (string) A description fo the recipe.
* instructions: (string) Recipe manual instructions, for example connecting datastudios.
* tasks: (list) The task JSON to execute.
* parameters: (dict) Values for field parameters in tasks, optional.
* project: (string) The GCP project id.
* client_credentials: (string) The GCP Desktop Client Credentials in JSON string.
* user_credentials: (string) Not used, placeholder.
* service_credentials: (string) Not used, placeholder.
Returns:
* (string) Rendered notebook source code to be written to a ipynb file.
|
github-repos
|
def __init__(self, time_elements_tuple=None):
super(TimeElements, self).__init__()
self._number_of_seconds = None
self._precision = definitions.PRECISION_1_SECOND
self._time_elements_tuple = time_elements_tuple
if time_elements_tuple:
if len(time_elements_tuple) < 6:
raise ValueError((
'Invalid time elements tuple at least 6 elements required,'
'got: {0:d}').format(len(time_elements_tuple)))
self._number_of_seconds = self._GetNumberOfSecondsFromElements(
*time_elements_tuple)
|
Initializes time elements.
Args:
time_elements_tuple (Optional[tuple[int, int, int, int, int, int]]):
time elements, contains year, month, day of month, hours, minutes and
seconds.
Raises:
ValueError: if the time elements tuple is invalid.
|
juraj-google-style
|
def readInput(self, directory, projectFileName, session, spatial=False, spatialReferenceID=None):
self.project_directory = directory
with tmp_chdir(directory):
session.add(self)
self.read(directory, projectFileName, session, spatial, spatialReferenceID)
if (spatialReferenceID is None):
spatialReferenceID = self._automaticallyDeriveSpatialReferenceId(directory)
replaceParamFile = self._readReplacementFiles(directory, session, spatial, spatialReferenceID)
self._readXput(self.INPUT_FILES, directory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)
self._readXputMaps(self.INPUT_MAPS, directory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)
self._commit(session, self.COMMIT_ERROR_MESSAGE)
|
Read only input files for a GSSHA project into the database.
Use this method to read a project when only pre-processing tasks need to be performed.
Args:
directory (str): Directory containing all GSSHA model files. This method assumes that all files are located
in the same directory.
projectFileName (str): Name of the project file for the GSSHA model which will be read (e.g.: 'example.prj').
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.
Defaults to False.
spatialReferenceID (int, optional): Integer id of spatial reference system for the model. If no id is
provided GsshaPy will attempt to automatically lookup the spatial reference ID. If this process fails,
default srid will be used (4326 for WGS 84).
|
codesearchnet
|
def function(inputs, outputs, updates=None, name=None, **kwargs):
if ops.executing_eagerly_outside_functions():
if kwargs:
raise ValueError('Session keyword arguments are not supported during eager execution. You passed: %s' % (kwargs,))
if updates:
raise ValueError('`updates` argument is not supported during eager execution. You passed: %s' % (updates,))
from tensorflow.python.keras import models
from tensorflow.python.keras.utils import tf_utils
model = models.Model(inputs=inputs, outputs=outputs)
wrap_outputs = isinstance(outputs, list) and len(outputs) == 1
def func(model_inputs):
outs = model(model_inputs)
if wrap_outputs:
outs = [outs]
return tf_utils.sync_to_numpy_or_python_type(outs)
return func
if kwargs:
for key in kwargs:
if key not in tf_inspect.getfullargspec(session_module.Session.run)[0] and key not in ['inputs', 'outputs', 'updates', 'name']:
msg = 'Invalid argument "%s" passed to K.function with TensorFlow backend' % key
raise ValueError(msg)
return GraphExecutionFunction(inputs, outputs, updates=updates, name=name, **kwargs)
|
Instantiates a Keras function.
Args:
inputs: List of placeholder tensors.
outputs: List of output tensors.
updates: List of update ops.
name: String, name of function.
**kwargs: Passed to `tf.Session.run`.
Returns:
Output values as Numpy arrays.
Raises:
ValueError: if invalid kwargs are passed in or if in eager execution.
|
github-repos
|
def _handle_offset_response(self, future, response):
timestamp_offset_map = {}
for topic, part_data in response.topics:
for partition_info in part_data:
partition, error_code = partition_info[:2]
partition = TopicPartition(topic, partition)
error_type = Errors.for_code(error_code)
if error_type is Errors.NoError:
if response.API_VERSION == 0:
offsets = partition_info[2]
assert len(offsets) <= 1, 'Expected OffsetResponse with one offset'
if not offsets:
offset = UNKNOWN_OFFSET
else:
offset = offsets[0]
log.debug("Handling v0 ListOffsetResponse response for %s. "
"Fetched offset %s", partition, offset)
if offset != UNKNOWN_OFFSET:
timestamp_offset_map[partition] = (offset, None)
else:
timestamp, offset = partition_info[2:]
log.debug("Handling ListOffsetResponse response for %s. "
"Fetched offset %s, timestamp %s",
partition, offset, timestamp)
if offset != UNKNOWN_OFFSET:
timestamp_offset_map[partition] = (offset, timestamp)
elif error_type is Errors.UnsupportedForMessageFormatError:
log.debug("Cannot search by timestamp for partition %s because the"
" message format version is before 0.10.0", partition)
elif error_type is Errors.NotLeaderForPartitionError:
log.debug("Attempt to fetch offsets for partition %s failed due"
" to obsolete leadership information, retrying.",
partition)
future.failure(error_type(partition))
return
elif error_type is Errors.UnknownTopicOrPartitionError:
log.warning("Received unknown topic or partition error in ListOffset "
"request for partition %s. The topic/partition " +
"may not exist or the user may not have Describe access "
"to it.", partition)
future.failure(error_type(partition))
return
else:
log.warning("Attempt to fetch offsets for partition %s failed due to:"
" %s", partition, error_type)
future.failure(error_type(partition))
return
if not future.is_done:
future.success(timestamp_offset_map)
|
Callback for the response of the list offset call above.
Arguments:
future (Future): the future to update based on response
response (OffsetResponse): response from the server
Raises:
AssertionError: if response does not match partition
|
juraj-google-style
|
def api_keys(self, serverid, api_key):
if serverid and api_key:
self.can_query = True
self.serverid = int(serverid)
self.api_key = api_key
self.webhook_url = self.__base_url + str(self.serverid) + '/' + self.api_key
|
Load object with id/API pair
Args:
serverid (int): Discord 'guild' webhook is attached to
api_key (`str`:uuid): unique ID for webhook
|
juraj-google-style
|
def generate_name_variations(name):
def _update_name_variations_with_product(set_a, set_b):
name_variations.update([unidecode(((names_variation[0] + separator) + names_variation[1]).strip(''.join(_LASTNAME_NON_LASTNAME_SEPARATORS))).lower() for names_variation in product(set_a, set_b) for separator in _LASTNAME_NON_LASTNAME_SEPARATORS])
parsed_name = ParsedName.loads(name)
if (len(parsed_name) == 1):
return [parsed_name.dumps().lower()]
name_variations = set()
non_lastnames = [non_lastname for non_lastname in (parsed_name.first_list + parsed_name.suffix_list) if non_lastname]
if ((len(non_lastnames) > _NAMES_MAX_NUMBER_THRESHOLD) or (len(parsed_name.last_list) > _NAMES_MAX_NUMBER_THRESHOLD)):
LOGGER.error('Skipping name variations generation - too many names in: "%s"', name)
return [name]
non_lastnames_variations = _generate_non_lastnames_variations(non_lastnames)
lastnames_variations = _generate_lastnames_variations(parsed_name.last_list)
_update_name_variations_with_product(lastnames_variations, non_lastnames_variations)
_update_name_variations_with_product(non_lastnames_variations, lastnames_variations)
return list(name_variations)
|
Generate name variations for a given name.
Args:
name (six.text_type): The name whose variations are to be generated.
Returns:
list: All the name variations for the given name.
Notes:
Uses `unidecode` for doing unicode characters transliteration to ASCII ones. This was chosen so that we can map
both full names of authors in HEP records and user's input to the same space and thus make exact queries work.
|
codesearchnet
|
def create_s3_bucket(cls, bucket_name, bucket_region, bucket_account, template):
s3 = get_aws_session(bucket_account).client('s3', region_name=bucket_region)
try:
s3.head_bucket(Bucket=bucket_name)
except ClientError as ex:
status_code = ex.response['ResponseMetadata']['HTTPStatusCode']
if status_code == 403:
raise Exception('Bucket {} already exists but we do not have access to it and so cannot continue'.format(
bucket_name
))
elif status_code == 404:
try:
s3.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': bucket_region
}
)
auditlog(
event='cloudtrail.create_s3_bucket',
actor=cls.ns,
data={
'account': bucket_account.account_name,
'bucket_region': bucket_region,
'bucket_name': bucket_name
}
)
except Exception:
raise Exception('An error occured while trying to create the bucket, cannot continue')
try:
bucket_acl = template.render(
bucket_name=bucket_name,
account_id=bucket_account.account_number
)
s3.put_bucket_policy(Bucket=bucket_name, Policy=bucket_acl)
except Exception as ex:
raise Warning('An error occurred while setting bucket policy: {}'.format(ex))
|
Creates the S3 bucket on the account specified as the destination account for log files
Args:
bucket_name (`str`): Name of the S3 bucket
bucket_region (`str`): AWS Region for the bucket
bucket_account (:obj:`Account`): Account to create the S3 bucket in
template (:obj:`Template`): Jinja2 Template object for the bucket policy
Returns:
`None`
|
juraj-google-style
|
def get_pixel(self, x: int, y: int) -> Tuple[int, int, int]:
color = lib.TCOD_image_get_pixel(self.image_c, x, y)
return color.r, color.g, color.b
|
Get the color of a pixel in this Image.
Args:
x (int): X pixel of the Image. Starting from the left at 0.
y (int): Y pixel of the Image. Starting from the top at 0.
Returns:
Tuple[int, int, int]:
An (r, g, b) tuple containing the pixels color value.
Values are in a 0 to 255 range.
|
juraj-google-style
|
def add_defaults(self, ctype: ContentType = None) -> "InstanceNode":
val = self.value
if not (isinstance(val, StructuredValue) and self.is_internal()):
return self
res = self
if isinstance(val, ObjectValue):
if val:
for mn in self._member_names():
m = res._member(mn) if res is self else res.sibling(mn)
res = m.add_defaults(ctype)
res = res.up()
return self.schema_node._add_defaults(res, ctype)
if not val:
return res
en = res[0]
while True:
res = en.add_defaults(ctype)
try:
en = res.next()
except NonexistentInstance:
break
return res.up()
|
Return the receiver with defaults added recursively to its value.
Args:
ctype: Content type of the defaults to be added. If it is
``None``, the content type will be the same as receiver's.
|
juraj-google-style
|
def mnist_model(image, labels, mesh):
batch_dim = mtf.Dimension('batch', FLAGS.batch_size)
row_blocks_dim = mtf.Dimension('row_blocks', 4)
col_blocks_dim = mtf.Dimension('col_blocks', 4)
rows_dim = mtf.Dimension('rows_size', 7)
cols_dim = mtf.Dimension('cols_size', 7)
classes_dim = mtf.Dimension('classes', 10)
one_channel_dim = mtf.Dimension('one_channel', 1)
x = mtf.import_tf_tensor(mesh, tf.reshape(image, [FLAGS.batch_size, 4, 7, 4, 7, 1]), mtf.Shape([batch_dim, row_blocks_dim, rows_dim, col_blocks_dim, cols_dim, one_channel_dim]))
x = mtf.transpose(x, [batch_dim, row_blocks_dim, col_blocks_dim, rows_dim, cols_dim, one_channel_dim])
fh_dim = mtf.Dimension('fh', 9)
fw_dim = mtf.Dimension('fw', 9)
filters1_dim = mtf.Dimension('filters1', 16)
filters2_dim = mtf.Dimension('filters2', 16)
kernel1 = mtf.get_variable(mesh, 'kernel1', [fh_dim, fw_dim, one_channel_dim, filters1_dim])
kernel2 = mtf.get_variable(mesh, 'kernel2', [fh_dim, fw_dim, filters1_dim, filters2_dim])
f1 = mtf.relu(mtf.conv2d_with_blocks(x, kernel1, strides=[1, 1, 1, 1], padding='SAME', h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim))
f2 = mtf.relu(mtf.conv2d_with_blocks(f1, kernel2, strides=[1, 1, 1, 1], padding='SAME', h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim))
x = mtf.reduce_mean(f2, reduced_dim=filters2_dim)
hidden_dim1 = mtf.Dimension('hidden1', FLAGS.hidden_size)
hidden_dim2 = mtf.Dimension('hidden2', FLAGS.hidden_size)
h1 = mtf.layers.dense(x, hidden_dim1, reduced_dims=x.shape.dims[(- 4):], activation=mtf.relu, name='hidden1')
h2 = mtf.layers.dense(h1, hidden_dim2, activation=mtf.relu, name='hidden2')
logits = mtf.layers.dense(h2, classes_dim, name='logits')
if (labels is None):
loss = None
else:
labels = mtf.import_tf_tensor(mesh, tf.reshape(labels, [FLAGS.batch_size]), mtf.Shape([batch_dim]))
loss = mtf.layers.softmax_cross_entropy_with_logits(logits, mtf.one_hot(labels, classes_dim), classes_dim)
loss = mtf.reduce_mean(loss)
return (logits, loss)
|
The model.
Args:
image: tf.Tensor with shape [batch, 28*28]
labels: a tf.Tensor with shape [batch] and dtype tf.int32
mesh: a mtf.Mesh
Returns:
logits: a mtf.Tensor with shape [batch, 10]
loss: a mtf.Tensor with shape []
|
codesearchnet
|
def set_charge_and_spin(self, charge, spin_multiplicity=None):
self._charge = charge
nelectrons = 0
for site in self._sites:
for sp, amt in site.species.items():
if not isinstance(sp, DummySpecie):
nelectrons += sp.Z * amt
nelectrons -= charge
self._nelectrons = nelectrons
if spin_multiplicity:
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(
self._charge, spin_multiplicity))
self._spin_multiplicity = spin_multiplicity
else:
self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
|
Set the charge and spin multiplicity.
Args:
charge (int): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
|
juraj-google-style
|
def __init__(self, conf, map_name, automount_mountpoint=None):
super(Cache, self).__init__()
self.log = logging.getLogger(__name__)
self.conf = conf
self.output_dir = conf.get('dir', '.')
self.automount_mountpoint = automount_mountpoint
self.map_name = map_name
if map_name == config.MAP_PASSWORD:
self.data = passwd.PasswdMap()
elif map_name == config.MAP_SSHKEY:
self.data = sshkey.SshkeyMap()
elif map_name == config.MAP_GROUP:
self.data = group.GroupMap()
elif map_name == config.MAP_SHADOW:
self.data = shadow.ShadowMap()
elif map_name == config.MAP_NETGROUP:
self.data = netgroup.NetgroupMap()
elif map_name == config.MAP_AUTOMOUNT:
self.data = automount.AutomountMap()
else:
raise error.UnsupportedMap('Cache does not support %s' % map_name)
|
Initialise the Cache object.
Args:
conf: A dictionary of key/value pairs
map_name: A string representation of the map type
automount_mountpoint: A string containing the automount mountpoint,
used only by automount maps.
Raises:
UnsupportedMap: for map types we don't know about
|
github-repos
|
def _bind_topics(self, topics):
self.client.subscribe(topics.status, self._on_status_message)
self.client.subscribe(topics.tracing, self._on_trace)
self.client.subscribe(topics.streaming, self._on_report)
self.client.subscribe(topics.response, self._on_response_message)
|
Subscribe to all the topics we need to communication with this device
Args:
topics (MQTTTopicValidator): The topic validator for this device that
we are connecting to.
|
codesearchnet
|
def to_variable(self, node: 'cfg.CFGNode') -> 'cfg.Variable':
return self.ctx.program.NewVariable([self], source_set=[], where=node)
|
Build a variable out of this abstract value.
Args:
node: The current CFG node.
Returns:
A cfg.Variable.
|
github-repos
|
def _setup_class(self):
class_record = records.TestResultRecord(STAGE_NAME_SETUP_CLASS, self.TAG)
class_record.test_begin()
self.current_test_info = runtime_test_info.RuntimeTestInfo(STAGE_NAME_SETUP_CLASS, self.log_path, class_record)
expects.recorder.reset_internal_states(class_record)
try:
with self._log_test_stage(STAGE_NAME_SETUP_CLASS):
self.setup_class()
except signals.TestAbortSignal:
raise
except Exception as e:
logging.exception('Error in %s
class_record.test_error(e)
self.results.add_class_error(class_record)
self._exec_procedure_func(self._on_fail, class_record)
class_record.update_record()
self.summary_writer.dump(class_record.to_dict(), records.TestSummaryEntryType.RECORD)
self._skip_remaining_tests(e)
return self.results
if expects.recorder.has_error:
self._exec_procedure_func(self._on_fail, class_record)
class_record.test_error()
class_record.update_record()
self.summary_writer.dump(class_record.to_dict(), records.TestSummaryEntryType.RECORD)
self.results.add_class_error(class_record)
self._skip_remaining_tests(class_record.termination_signal.exception)
return self.results
|
Proxy function to guarantee the base implementation of setup_class
is called.
Returns:
If `self.results` is returned instead of None, this means something
has gone wrong, and the rest of the test class should not execute.
|
github-repos
|
def TensorShapeProtoToList(shape):
return [dim.size for dim in shape.dim]
|
Convert a TensorShape to a list.
Args:
shape: A TensorShapeProto.
Returns:
List of integers representing the dimensions of the tensor.
|
github-repos
|
def BuildCampaignOperations(batch_job_helper, budget_operations, number_of_campaigns=1):
budget_id = budget_operations[0]['operand']['budgetId']
campaign_operations = [{'xsi_type': 'CampaignOperation', 'operand': {'name': ('Batch Campaign
return campaign_operations
|
Builds the operations needed to create a new Campaign.
Note: When the Campaigns are created, they will have a different Id than those
generated here as a temporary Id. This is just used to identify them in the
BatchJobService.
Args:
batch_job_helper: a BatchJobHelper instance.
budget_operations: a list containing the operation that will add the budget
used by these Campaigns.
number_of_campaigns: an int number defining the number of campaigns to be
created.
Returns:
a list containing the operations to create the desired number of Campaigns.
|
codesearchnet
|
def swo_start(self, swo_speed=9600):
if self.swo_enabled():
self.swo_stop()
info = structs.JLinkSWOStartInfo()
info.Speed = swo_speed
res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.START,
ctypes.byref(info))
if res < 0:
raise errors.JLinkException(res)
self._swo_enabled = True
return None
|
Starts collecting SWO data.
Note:
If SWO is already enabled, it will first stop SWO before enabling it
again.
Args:
self (JLink): the ``JLink`` instance
swo_speed (int): the frequency in Hz used by the target to communicate
Returns:
``None``
Raises:
JLinkException: on error
|
juraj-google-style
|
def setup(self,
hunt_id,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True):
super(GRRHuntDownloader, self).setup(
reason, grr_server_url, grr_username, grr_password,
approvers=approvers, verify=verify)
self.hunt_id = hunt_id
self.output_path = tempfile.mkdtemp()
|
Initializes a GRR Hunt file collector.
Args:
hunt_id: Hunt ID to download results from.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: comma-separated list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate.
|
juraj-google-style
|
def __init__(self, email, password):
self.email = email
self.password = password
|
Initialize the AMYLPRED object with your email and password used to login here.
Args:
email (str): Account email
password (str): Account password
|
juraj-google-style
|
def render_to_terminal(self, array, cursor_pos=(0, 0)):
for_stdout = self.fmtstr_to_stdout_xform()
if (not self.hide_cursor):
self.write(self.t.hide_cursor)
(height, width) = (self.t.height, self.t.width)
if ((height != self._last_rendered_height) or (width != self._last_rendered_width)):
self.on_terminal_size_change(height, width)
current_lines_by_row = {}
rows_for_use = list(range(self.top_usable_row, height))
shared = min(len(array), len(rows_for_use))
for (row, line) in zip(rows_for_use[:shared], array[:shared]):
current_lines_by_row[row] = line
if (line == self._last_lines_by_row.get(row, None)):
continue
self.write(self.t.move(row, 0))
self.write(for_stdout(line))
if (len(line) < width):
self.write(self.t.clear_eol)
rest_of_lines = array[shared:]
rest_of_rows = rows_for_use[shared:]
for row in rest_of_rows:
if (self._last_lines_by_row and (row not in self._last_lines_by_row)):
continue
self.write(self.t.move(row, 0))
self.write(self.t.clear_eol)
self.write(self.t.clear_bol)
current_lines_by_row[row] = None
offscreen_scrolls = 0
for line in rest_of_lines:
self.scroll_down()
if (self.top_usable_row > 0):
self.top_usable_row -= 1
else:
offscreen_scrolls += 1
current_lines_by_row = dict((((k - 1), v) for (k, v) in current_lines_by_row.items()))
logger.debug(('new top_usable_row: %d' % self.top_usable_row))
self.write(self.t.move((height - 1), 0))
self.write(for_stdout(line))
current_lines_by_row[(height - 1)] = line
logger.debug(('lines in last lines by row: %r' % self._last_lines_by_row.keys()))
logger.debug(('lines in current lines by row: %r' % current_lines_by_row.keys()))
self._last_cursor_row = max(0, ((cursor_pos[0] - offscreen_scrolls) + self.top_usable_row))
self._last_cursor_column = cursor_pos[1]
self.write(self.t.move(self._last_cursor_row, self._last_cursor_column))
self._last_lines_by_row = current_lines_by_row
if (not self.hide_cursor):
self.write(self.t.normal_cursor)
return offscreen_scrolls
|
Renders array to terminal, returns the number of lines scrolled offscreen
Returns:
Number of times scrolled
Args:
array (FSArray): Grid of styled characters to be rendered.
If array received is of width too small, render it anyway
if array received is of width too large, render it anyway
if array received is of height too small, render it anyway
if array received is of height too large, render it, scroll down,
and render the rest of it, then return how much we scrolled down
|
codesearchnet
|
def json(self) -> list:
json_controls = [control.json() for control in self.controls]
return json_controls
|
Returns list of json compatible states of the RichMessage instance
nested controls.
Returns:
json_controls: Json representation of RichMessage instance
nested controls.
|
codesearchnet
|
def _can_connect(host, port=22):
try:
logger.debug('Testing connection to host %s', host)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host,
port=port)
client.close()
logger.info('Can connect to host %s', host)
return True
except Exception as e:
logger.info('Cannot connect to host %s', host)
logger.info('Connection failed with exception: \n %s', str(e))
return False
|
Checks if the connection to provided ``host`` and ``port`` is possible or not.
Args:
host (str): Hostname for the host to check connection.
port (int): Port name of the host to check connection on.
|
juraj-google-style
|
def __getitem__(self, key: Any) -> 'ColumnExpressionBuilder':
item = self._builder[key]
if isinstance(item, expressions.Builder) and self._sealed:
raise self._fhir_path_sealed_error(key)
return ColumnExpressionBuilder._wrap_any(self, item)
|
Redirects to the expressions.Builder to get the item.
Args:
key: the key of the item.
Returns:
A ColumnExpressionBuilder, because the item got from the
expressions.Builder is always the type of Builder.
Raises:
AttributeError: if the FHIR path in this class is already sealed.
TypeError: if getting the key from self._builder fails.
|
github-repos
|
def inference(self, observed_arr):
decoded_arr = self.__encoder_decoder_controller.inference(observed_arr)
encoded_arr = self.__encoder_decoder_controller.get_feature_points()
_ = self.__retrospective_encoder.inference(decoded_arr)
re_encoded_arr = self.__retrospective_encoder.get_feature_points()
self.__inferenced_tuple = (observed_arr, encoded_arr, decoded_arr, re_encoded_arr)
return re_encoded_arr
|
Infernece by the model.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced feature points.
|
juraj-google-style
|
def astype(self, col_dtypes, **kwargs):
dtype_indices = {}
columns = col_dtypes.keys()
numeric_indices = list(self.columns.get_indexer_for(columns))
new_dtypes = self.dtypes.copy()
for i, column in enumerate(columns):
dtype = col_dtypes[column]
if (
not isinstance(dtype, type(self.dtypes[column]))
or dtype != self.dtypes[column]
):
if dtype in dtype_indices.keys():
dtype_indices[dtype].append(numeric_indices[i])
else:
dtype_indices[dtype] = [numeric_indices[i]]
try:
new_dtype = np.dtype(dtype)
except TypeError:
new_dtype = dtype
if dtype != np.int32 and new_dtype == np.int32:
new_dtype = np.dtype("int64")
elif dtype != np.float32 and new_dtype == np.float32:
new_dtype = np.dtype("float64")
new_dtypes[column] = new_dtype
new_data = self.data
for dtype in dtype_indices.keys():
def astype(df, internal_indices=[]):
block_dtypes = {}
for ind in internal_indices:
block_dtypes[df.columns[ind]] = dtype
return df.astype(block_dtypes)
new_data = new_data.apply_func_to_select_indices(
0, astype, dtype_indices[dtype], keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns, new_dtypes)
|
Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
DataFrame with updated dtypes.
|
juraj-google-style
|
def gradient_helper(optimizer, loss, var_list=None):
if (var_list is None):
var_list = tf.compat.v1.trainable_variables()
grads_and_vars = optimizer.compute_gradients(loss, var_list=var_list)
grads = [pair[0] for pair in grads_and_vars]
return (grads, optimizer.apply_gradients(grads_and_vars))
|
A helper to get the gradients out at each step.
Args:
optimizer: the optimizer op.
loss: the op that computes your loss value.
Returns: the gradient tensors and the train_step op.
|
codesearchnet
|
def save_as(self, new_filename):
xfile._save_file(
self._filename, self._workbookTree, new_filename)
|
Save our file with the name provided.
Args:
new_filename: New name for the workbook file. String.
Returns:
Nothing.
|
juraj-google-style
|
def to_element(self, include_namespaces=False):
didl_item = DidlItem(
title="DUMMY",
parent_id="DUMMY",
item_id=self.item_id,
desc=self.desc,
resources=self.resources
)
return didl_item.to_element(include_namespaces=include_namespaces)
|
Return an ElementTree Element representing this instance.
Args:
include_namespaces (bool, optional): If True, include xml
namespace attributes on the root element
Return:
~xml.etree.ElementTree.Element: The (XML) Element representation of
this object
|
juraj-google-style
|
def member_command(self, repl_id, member_id, command):
repl = self[repl_id]
result = repl.member_command(member_id, command)
self[repl_id] = repl
return result
|
apply command(start, stop, restart) to the member of replica set
Args:
repl_id - replica set identity
member_id - member index
command - command: start, stop, restart
return True if operation success otherwise False
|
juraj-google-style
|
def trace_region(self, region_index):
cmd = enums.JLinkTraceCommand.GET_REGION_PROPS_EX
region = structs.JLinkTraceRegion()
region.RegionIndex = int(region_index)
res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(region))
if (res == 1):
raise errors.JLinkException('Failed to get trace region.')
return region
|
Retrieves the properties of a trace region.
Args:
self (JLink): the ``JLink`` instance.
region_index (int): the trace region index.
Returns:
An instance of ``JLinkTraceRegion`` describing the specified region.
|
juraj-google-style
|
def _det_large_enough_mask(x, det_bounds):
return tf.cast(tf.linalg.det(x) > det_bounds, dtype=x.dtype)
|
Returns whether the input matches the given determinant limit.
Args:
x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.
det_bounds: A floating-point `Tensor` that must broadcast to shape
`[B1, ..., Bn]`, giving the desired lower bound on the
determinants in `x`.
Returns:
mask: A floating-point `Tensor` of shape [B1, ..., Bn]. Each
scalar is 1 if the corresponding matrix had determinant above
the corresponding bound, otherwise 0.
|
juraj-google-style
|
def depth_soil_conductivity(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `depth_soil_conductivity`'.format(value))
self._depth_soil_conductivity = value
|
Corresponds to IDD Field `depth_soil_conductivity`
Args:
value (float): value for IDD Field `depth_soil_conductivity`
Unit: W/m-K,
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def join(self) -> None:
self._server.join()
|
Blocks until the server has shut down.
This is useful when starting a dedicated worker process.
```
worker_server = tf.data.experimental.service.WorkerServer(
port=5051, dispatcher_address="localhost:5050")
worker_server.join()
```
This method currently blocks forever.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
joining the server.
|
github-repos
|
def _write_version(self, data, model):
vdata = {'data': data, 'key': model.key, 'model': model.Meta.bucket_name, 'timestamp': time.time()}
obj = version_bucket.new(data=vdata)
obj.add_index('key_bin', model.key)
obj.add_index('model_bin', vdata['model'])
obj.add_index('timestamp_int', int(vdata['timestamp']))
obj.store()
return obj.key
|
Writes a copy of the objects current state to write-once mirror bucket.
Args:
data (dict): Model instance's all data for versioning.
model (instance): Model instance.
Returns:
Key of version record.
key (str): Version_bucket key.
|
codesearchnet
|
def floor(cls, x: 'TensorFluent') -> 'TensorFluent':
return cls._unary_op(x, tf.floor, tf.float32)
|
Returns a TensorFluent for the floor function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the floor function.
|
juraj-google-style
|
def parse_struct(path_dir):
with open(os.path.join(path_dir, "boltztrap.struct"), 'r') as f:
tokens = f.readlines()
return Lattice([[Length(float(tokens[i].split()[j]), "bohr").
to("ang") for j in range(3)] for i in
range(1, 4)]).volume
|
Parses boltztrap.struct file (only the volume)
Args:
path_dir: (str) dir containing the boltztrap.struct file
Returns:
(float) volume
|
juraj-google-style
|
def get_is_group_maintainer(self, grp_name, user):
self.project_service.set_auth(self._token_project)
return self.project_service.get_is_group_maintainer(grp_name, user)
|
Check if the given user is a member of the named group.
Args:
name (string): Name of group.
user (string): User of interest.
Returns:
(bool): False if user not a member.
|
juraj-google-style
|
def flatten_with_path(structure):
return tree_impl.flatten_with_path(structure)
|
Flattens a possibly nested structure into a list.
This is a variant of flattens() which produces a
list of pairs: `(path, item)`. A path is a tuple of indices and/or keys
which uniquely identifies the position of the corresponding item.
Dictionaries with non-sortable keys are not supported.
Examples:
>>> keras.flatten_with_path([{"foo": 42}])
[((0, 'foo'), 42)]
Args:
structure: An arbitrarily nested structure.
Returns:
A list of `(path, item)` pairs corresponding to the flattened
version of the input `structure`.
|
github-repos
|
def __init__(self, shape=None, dtype=dtypes.float32, ragged_rank=None, row_splits_dtype=dtypes.int64, flat_values_spec=None):
self._shape = tensor_shape.as_shape(shape)
self._row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if flat_values_spec is not None:
if dtype is None:
dtype = flat_values_spec.dtype
elif dtype != flat_values_spec.dtype:
raise ValueError('dtype must be the same as flat_values_spec.dtype')
elif dtype is None:
raise ValueError('At least one of dtype or flat_values_spec must be provided')
self._dtype = dtypes.as_dtype(dtype)
self._flat_values_spec = flat_values_spec
rank = self._shape.ndims
if ragged_rank is None:
if rank is None:
raise ValueError('Must specify ragged_rank or a shape with a known rank.')
ragged_rank = rank - 1
self._ragged_rank = ragged_rank
if not isinstance(self._ragged_rank, int):
raise TypeError(f'Argument `ragged_rank` must be an int. Received {ragged_rank}.')
if rank is not None:
if ragged_rank >= rank:
raise ValueError(f'Argument `ragged_rank` ({ragged_rank}) must be less than rank ({rank}).')
|
Constructs a type specification for a `tf.RaggedTensor`.
Args:
shape: The shape of the RaggedTensor, or `None` to allow any shape. If a
shape is specified, then all ragged dimensions must have size `None`.
dtype: `tf.DType` of values in the RaggedTensor.
ragged_rank: Python integer, the number of times the RaggedTensor's
flat_values is partitioned. Defaults to `shape.ndims - 1`.
row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor. One
of `tf.int32` or `tf.int64`.
flat_values_spec: TypeSpec for flat_value of the RaggedTensor. It shall be
provided when the flat_values is a CompositeTensor rather then Tensor.
If both `dtype` and `flat_values_spec` and are provided, `dtype` must
be the same as `flat_values_spec.dtype`. (experimental)
|
github-repos
|
def read_int32(self, little_endian=True):
if little_endian:
endian = '<'
else:
endian = '>'
return self.unpack(('%si' % endian), 4)
|
Read 4 bytes as a signed integer value from the stream.
Args:
little_endian (bool): specify the endianness. (Default) Little endian.
Returns:
int:
|
codesearchnet
|
def delta_hv(scatterer):
Z = scatterer.get_Z()
return np.arctan2((Z[(2, 3)] - Z[(3, 2)]), ((- Z[(2, 2)]) - Z[(3, 3)]))
|
Delta_hv for the current setup.
Args:
scatterer: a Scatterer instance.
Returns:
Delta_hv [rad].
|
codesearchnet
|
def GetMountPoint(self, path=None):
path = os.path.abspath(
client_utils.CanonicalPathToLocalPath(path or self.path))
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
|
Walk back from the path to find the mount point.
Args:
path: a Unicode string containing the path or None. If path is None the
value in self.path is used.
Returns:
path string of the mount point
|
juraj-google-style
|
def do_phonefy(self, query, **kwargs):
results = []
test = self.check_phonefy(query, kwargs)
if test:
r = {'type': 'i3visio.phone', 'value': ((self.platformName + ' - ') + query), 'attributes': []}
try:
aux = {'type': 'i3visio.uri', 'value': self.createURL(query, mode='phonefy'), 'attributes': []}
r['attributes'].append(aux)
except:
pass
aux = {'type': 'i3visio.platform', 'value': self.platformName, 'attributes': []}
r['attributes'].append(aux)
r['attributes'] += self.process_phonefy(test)
results.append(r)
return results
|
Verifying a phonefy query in this platform.
This might be redefined in any class inheriting from Platform.
Args:
-----
query: The element to be searched.
Return:
-------
A list of elements to be appended.
|
codesearchnet
|
def Run(script, container=None, exit_on_error=False, gas=Fixed8.Zero(), test_mode=True):
from neo.Core.Blockchain import Blockchain
from neo.SmartContract.StateMachine import StateMachine
from neo.EventHub import events
bc = Blockchain.Default()
accounts = DBCollection(bc._db, DBPrefix.ST_Account, AccountState)
assets = DBCollection(bc._db, DBPrefix.ST_Asset, AssetState)
validators = DBCollection(bc._db, DBPrefix.ST_Validator, ValidatorState)
contracts = DBCollection(bc._db, DBPrefix.ST_Contract, ContractState)
storages = DBCollection(bc._db, DBPrefix.ST_Storage, StorageItem)
script_table = CachedScriptTable(contracts)
service = StateMachine(accounts, validators, assets, contracts, storages, None)
engine = ApplicationEngine(
trigger_type=TriggerType.Application,
container=container,
table=script_table,
service=service,
gas=gas,
testMode=test_mode,
exit_on_error=exit_on_error
)
script = binascii.unhexlify(script)
engine.LoadScript(script)
try:
success = engine.Execute()
engine.testMode = True
service.ExecutionCompleted(engine, success)
except Exception as e:
engine.testMode = True
service.ExecutionCompleted(engine, False, e)
for event in service.events_to_dispatch:
events.emit(event.event_type, event)
return engine
|
Runs a script in a test invoke environment
Args:
script (bytes): The script to run
container (neo.Core.TX.Transaction): [optional] the transaction to use as the script container
Returns:
ApplicationEngine
|
juraj-google-style
|
def save(self, data: List[dict]):
with open(self.output_path, 'w') as f:
if len(data) > 0:
writer = csv.DictWriter(f, list(data[0].keys()))
writer.writeheader()
writer.writerows(data)
|
Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`].
Args:
data (`List[dict]`): The data to store.
|
github-repos
|
def _check_archive_signature(archive_file: io.BufferedIOBase) -> None:
signature = archive_file.read(8)
if signature != b'!<arch>\n':
raise RuntimeError('Invalid archive file format.')
|
Checks if the file has the correct archive header signature.
The cursor is moved to the first available file header section after
successfully checking the signature.
Args:
archive_file: The archive file object pointing at its beginning.
Raises:
RuntimeError: The archive signature is invalid.
|
github-repos
|
def __init__(self, job_id=None, future=None):
self._job_id = str(uuid.uuid4()) if job_id is None else job_id
self._future = future
self._is_complete = False
self._errors = None
self._fatal_error = None
self._result = None
self._start_time = datetime.datetime.utcnow()
self._end_time = None
|
Initializes an instance of a Job.
Args:
job_id: a unique ID for the job. If None, a UUID will be generated.
future: the Future associated with the Job, if any.
|
juraj-google-style
|
def get_orientation_error(target_orn, current_orn):
current_orn = np.array(
[current_orn[3], current_orn[0], current_orn[1], current_orn[2]]
)
target_orn = np.array([target_orn[3], target_orn[0], target_orn[1], target_orn[2]])
pinv = np.zeros((3, 4))
pinv[0, :] = [-current_orn[1], current_orn[0], -current_orn[3], current_orn[2]]
pinv[1, :] = [-current_orn[2], current_orn[3], current_orn[0], -current_orn[1]]
pinv[2, :] = [-current_orn[3], -current_orn[2], current_orn[1], current_orn[0]]
orn_error = 2.0 * pinv.dot(np.array(target_orn))
return orn_error
|
Returns the difference between two quaternion orientations as a 3 DOF numpy array.
For use in an impedance controller / task-space PD controller.
Args:
target_orn: 4-dim iterable, desired orientation as a (x, y, z, w) quaternion
current_orn: 4-dim iterable, current orientation as a (x, y, z, w) quaternion
Returns:
orn_error: 3-dim numpy array for current orientation error, corresponds to
(target_orn - current_orn)
|
juraj-google-style
|
def clean_file(c_source, virtualenv_dirname):
with open(c_source, 'r') as file_obj:
contents = file_obj.read().rstrip()
py_version = 'python{}.{}'.format(*sys.version_info[:2])
lib_path = os.path.join('.nox', virtualenv_dirname, 'lib', py_version, 'site-packages', '')
contents = contents.replace(lib_path, '')
lines = contents.split('\n')
with open(c_source, 'w') as file_obj:
for line in lines:
file_obj.write((line.rstrip() + '\n'))
|
Strip trailing whitespace and clean up "local" names in C source.
These source files are autogenerated from the ``cython`` CLI.
Args:
c_source (str): Path to a ``.c`` source file.
virtualenv_dirname (str): The name of the ``virtualenv``
directory where Cython is installed (this is part of a
relative path ``.nox/{NAME}/lib/...``).
|
codesearchnet
|
def load_notebook_node(notebook_path):
nb = nbformat.reads(papermill_io.read(notebook_path), as_version=4)
if not hasattr(nb.metadata, 'papermill'):
nb.metadata['papermill'] = {
'parameters': dict(),
'environment_variables': dict(),
'version': __version__,
}
for cell in nb.cells:
if not hasattr(cell.metadata, 'tags'):
cell.metadata['tags'] = []
if not hasattr(cell.metadata, 'papermill'):
cell.metadata['papermill'] = dict()
return nb
|
Returns a notebook object with papermill metadata loaded from the specified path.
Args:
notebook_path (str): Path to the notebook file.
Returns:
nbformat.NotebookNode
|
juraj-google-style
|
def _ReadLabels(self, artifact_definition_values, artifact_definition, name):
labels = artifact_definition_values.get('labels', [])
undefined_labels = set(labels).difference(self.labels)
if undefined_labels:
raise errors.FormatError(
'Artifact definition: {0:s} found undefined labels: {1:s}.'.format(
name, ', '.join(undefined_labels)))
artifact_definition.labels = labels
|
Reads the optional artifact definition labels.
Args:
artifact_definition_values (dict[str, object]): artifact definition
values.
artifact_definition (ArtifactDefinition): an artifact definition.
name (str): name of the artifact definition.
Raises:
FormatError: if there are undefined labels.
|
juraj-google-style
|
def remove_droplets(self, droplet_ids):
return self.get_data(
"load_balancers/%s/droplets/" % self.id,
type=DELETE,
params={"droplet_ids": droplet_ids}
)
|
Unassign a LoadBalancer.
Args:
droplet_ids (obj:`list` of `int`): A list of Droplet IDs
|
juraj-google-style
|
def register_once(event_name: str, callback: Callable[..., None], info: Hashable) -> None:
ip = IPython.get_ipython()
info = hash(info)
for old_callback in ip.events.callbacks[event_name]:
if getattr(old_callback, '__ecolab_event__', None) == info:
ip.events.unregister(event_name, old_callback)
break
callback.__ecolab_event__ = info
ip.events.register(event_name, callback)
|
Register the IPython event once (replace the previous event if exists).
Alias for `InteractiveShell.events.register` but replaces previous event
if it exists.
This avoids duplicated events after ecolab reload or running cell twice.
Args:
event_name: Forwarded to `ip.events.register`
callback: Forwarded to `ip.events.register`
info: If an event with the same info already exists, it is replaced.
|
github-repos
|
def tox(args=''):
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
highest_minor_python = _highest_minor(latest_pythons)
_local_needs_pythons(flo('cd {basedir} && python{highest_minor_python} -m tox {args}'))
|
Run tox.
Build package and run unit tests against several pythons.
Args:
args: Optional arguments passed to tox.
Example:
fab tox:'-e py36 -r'
|
codesearchnet
|
def can_encode(nested_structure):
try:
encode_structure(nested_structure)
except NotEncodableError:
return False
return True
|
Determines whether a nested structure can be encoded into a proto.
Args:
nested_structure: Structure to encode.
Returns:
True if the nested structured can be encoded.
|
github-repos
|
def __init__(self, protojson_protocol=None, **kwargs):
super(MessageJSONEncoder, self).__init__(**kwargs)
self.__protojson_protocol = (
protojson_protocol or ProtoJson.get_default())
|
Constructor.
Args:
protojson_protocol: ProtoJson instance.
|
juraj-google-style
|
def replace_666(meta_df, convert_neg_666):
if convert_neg_666:
out_df = meta_df.replace([-666, "-666", -666.0], np.nan)
else:
out_df = meta_df.replace([-666, -666.0], "-666")
return out_df
|
Replace -666, -666.0, and optionally "-666".
Args:
meta_df (pandas df):
convert_neg_666 (bool):
Returns:
out_df (pandas df): updated meta_df
|
juraj-google-style
|
def from_json(cls, json_value: JSONValueType, **kwargs) -> 'JSONConvertible':
assert isinstance(json_value, dict)
init_args = {k: from_json(v, **kwargs) for k, v in json_value.items() if k != JSONConvertible.TYPE_NAME_KEY}
return cls(**init_args)
|
Creates an instance of this class from a plain Python value.
NOTE(daiyip): ``pg.Symbolic`` overrides ``from_json`` class method.
Args:
json_value: JSON value type.
**kwargs: Keyword arguments as flags to control object creation.
Returns:
An instance of cls.
|
github-repos
|
def get(pb_or_dict, key, default=_SENTINEL):
(key, subkey) = _resolve_subkeys(key)
if isinstance(pb_or_dict, Message):
answer = getattr(pb_or_dict, key, default)
elif isinstance(pb_or_dict, collections.Mapping):
answer = pb_or_dict.get(key, default)
else:
raise TypeError('Tried to fetch a key %s on an invalid object; expected a dict or protobuf message.')
if (answer is _SENTINEL):
raise KeyError(key)
if (subkey and (answer is not default)):
return get(answer, subkey, default=default)
return answer
|
Retrieve the given key off of the object.
If a default is specified, return it if the key is not found, otherwise
raise KeyError.
Args:
pb_or_dict (Union[~google.protobuf.message.Message, Mapping]): the
object.
key (str): The key to retrieve from the object in question.
default (Any): If the key is not present on the object, and a default
is set, returns that default instead. A type-appropriate falsy
default is generally recommended, as protobuf messages almost
always have default values for unset values and it is not always
possible to tell the difference between a falsy value and an
unset one. If no default is set, raises KeyError for not found
values.
Returns:
Any: The return value from the underlying message or dict.
Raises:
KeyError: If the key is not found. Note that, for unset values,
messages and dictionaries may not have consistent behavior.
TypeError: If pb_or_dict is not a Message or Mapping.
|
codesearchnet
|
def HasIndex(self, index):
for i in self.Items:
if i.index == index:
return True
return False
|
Flag indicating the index exists in any of the spent coin items.
Args:
index (int):
Returns:
|
juraj-google-style
|
def data_to_unicode(self, data):
if isinstance(data, dict):
return {self.to_unicode(k): self.to_unicode(v) for k, v in data.iteritems()}
if isinstance(data, list):
return [self.to_unicode(l) for l in data]
else:
return self.to_unicode(data)
|
Recursively convert a list or dictionary to unicode.
Args:
data: The data to be unicoded.
Returns:
Unicoded data.
|
juraj-google-style
|
def parse(type: Type):
def decorator(parser):
EnvVar.parsers[type] = parser
return parser
return decorator
|
Register a parser for a attribute type.
Parsers will be used to parse `str` type objects from either
the commandline arguments or environment variables.
Args:
type: the type the decorated function will be responsible
for parsing a environment variable to.
|
juraj-google-style
|
def build_image(image_path, image_name, build_args=None, dockerfile_path=None):
cmd = ['docker', 'build', '-t', image_name, image_path]
if dockerfile_path:
cmd.extend(['-f', dockerfile_path])
for (k, v) in (build_args or {}).items():
cmd += ['--build-arg', '{}={}'.format(k, v)]
check_call(cmd)
|
Build an image
Args:
image_path (str): the path to the image directory
image_name (str): image 'name:tag' to build
build_args (dict, optional): dict of docker build arguments
dockerfile_path (str, optional):
path to dockerfile relative to image_path
if not `image_path/Dockerfile`.
|
codesearchnet
|
def decode_base64_dict(data):
b64 = base64.b64decode(data['__ndarray__'])
array = np.copy(np.frombuffer(b64, dtype=data['dtype']))
if len(data['shape']) > 1:
array = array.reshape(data['shape'])
return array
|
Decode a base64 encoded array into a NumPy array.
Args:
data (dict) : encoded array data to decode
Data should have the format encoded by :func:`encode_base64_dict`.
Returns:
np.ndarray
|
juraj-google-style
|
def datetime_string(day, month, year, hour, minute):
if hour < 0 or hour > 23: hour = 0
if minute < 0 or minute > 60: minute = 0
return '%d-%02d-%02dT%02d:%02d:00' % (year, month, day, hour, minute)
|
Build a date string using the provided day, month, year numbers.
Automatically adds a leading zero to ``day`` and ``month`` if they only have
one digit.
Args:
day (int): Day number.
month(int): Month number.
year(int): Year number.
hour (int): Hour of the day in 24h format.
minute (int): Minute of the hour.
Returns:
str: Date in the format *YYYY-MM-DDThh:mm:ss*.
|
juraj-google-style
|
def _download_mlu_data(tmp_dir, data_dir):
if not tf.gfile.Exists(data_dir):
tf.gfile.MakeDirs(data_dir)
filename = os.path.basename(_URL)
file_path = os.path.join(tmp_dir, filename)
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/63.0.3239.132 Safari/537.36"}
resp = requests.get(_URL, headers=headers)
with open(file_path, "wb") as f:
f.write(resp.content)
with tarfile.open(file_path, "r:gz") as tar:
tar.extractall(tmp_dir)
return tmp_dir
|
Downloads and extracts the dataset.
Args:
tmp_dir: temp directory to download and extract the dataset
data_dir: The base directory where data and vocab files are stored.
Returns:
tmp_dir: temp directory containing the raw data.
|
juraj-google-style
|
def push_plugin(self, name):
url = self._url('/plugins/{0}/pull', name)
headers = {}
registry, repo_name = auth.resolve_repository_name(name)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
res = self._post(url, headers=headers)
self._raise_for_status(res)
return self._stream_helper(res, decode=True)
|
Push a plugin to the registry.
Args:
name (string): Name of the plugin to upload. The ``:latest``
tag is optional, and is the default if omitted.
Returns:
``True`` if successful
|
juraj-google-style
|
class Document(object):
def __init__(self, content: str, type: Union[str, language_v1.Document.Type]='PLAIN_TEXT', language_hint: Optional[str]=None, encoding: Optional[str]='UTF8', from_gcs: bool=False):
self.content = content
self.type = type
self.encoding = encoding
self.language_hint = language_hint
self.from_gcs = from_gcs
@staticmethod
def to_dict(document: 'Document') -> Mapping[str, Optional[str]]:
if document.from_gcs:
dict_repr = {'gcs_content_uri': document.content}
else:
dict_repr = {'content': document.content}
dict_repr.update({'type': document.type, 'language': document.language_hint})
return dict_repr
|
Represents the input to :class:`AnnotateText` transform.
Args:
content (str): The content of the input or the Google Cloud Storage URI
where the file is stored.
type (`Union[str, google.cloud.language_v1.Document.Type]`): Text type.
Possible values are `HTML`, `PLAIN_TEXT`. The default value is
`PLAIN_TEXT`.
language_hint (`Optional[str]`): The language of the text. If not specified,
language will be automatically detected. Values should conform to
ISO-639-1 standard.
encoding (`Optional[str]`): Text encoding. Possible values are: `NONE`,
`UTF8`, `UTF16`, `UTF32`. The default value is `UTF8`.
from_gcs (bool): Whether the content should be interpret as a Google Cloud
Storage URI. The default value is :data:`False`.
|
github-repos
|
def GetDecoder(cls, encoding_method):
encoding_method = encoding_method.lower()
decoder = cls._decoders.get(encoding_method, None)
if not decoder:
return None
return decoder()
|
Retrieves the decoder object for a specific encoding method.
Args:
encoding_method (str): encoding method identifier.
Returns:
Decoder: decoder or None if the encoding method does not exists.
|
juraj-google-style
|
def _ToJSonObj(self, columns_order=None, order_by=()):
if (columns_order is None):
columns_order = [col['id'] for col in self.__columns]
col_dict = dict([(col['id'], col) for col in self.__columns])
col_objs = []
for col_id in columns_order:
col_obj = {'id': col_dict[col_id]['id'], 'label': col_dict[col_id]['label'], 'type': col_dict[col_id]['type']}
if col_dict[col_id]['custom_properties']:
col_obj['p'] = col_dict[col_id]['custom_properties']
col_objs.append(col_obj)
row_objs = []
for (row, cp) in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]['type'])
if (value is None):
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {'v': value[0]}
if ((len(value) > 1) and (value[1] is not None)):
cell_obj['f'] = value[1]
if (len(value) == 3):
cell_obj['p'] = value[2]
else:
cell_obj = {'v': value}
cell_objs.append(cell_obj)
row_obj = {'c': cell_objs}
if cp:
row_obj['p'] = cp
row_objs.append(row_obj)
json_obj = {'cols': col_objs, 'rows': row_objs}
if self.custom_properties:
json_obj['p'] = self.custom_properties
return json_obj
|
Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
|
codesearchnet
|
def console_fill_foreground(con: tcod.console.Console, r: Sequence[int], g: Sequence[int], b: Sequence[int]) -> None:
if ((len(r) != len(g)) or (len(r) != len(b))):
raise TypeError('R, G and B must all have the same size.')
if (isinstance(r, np.ndarray) and isinstance(g, np.ndarray) and isinstance(b, np.ndarray)):
r_ = np.ascontiguousarray(r, dtype=np.intc)
g_ = np.ascontiguousarray(g, dtype=np.intc)
b_ = np.ascontiguousarray(b, dtype=np.intc)
cr = ffi.cast('int *', r_.ctypes.data)
cg = ffi.cast('int *', g_.ctypes.data)
cb = ffi.cast('int *', b_.ctypes.data)
else:
cr = ffi.new('int[]', r)
cg = ffi.new('int[]', g)
cb = ffi.new('int[]', b)
lib.TCOD_console_fill_foreground(_console(con), cr, cg, cb)
|
Fill the foregound of a console with r,g,b.
Args:
con (Console): Any Console instance.
r (Sequence[int]): An array of integers with a length of width*height.
g (Sequence[int]): An array of integers with a length of width*height.
b (Sequence[int]): An array of integers with a length of width*height.
.. deprecated:: 8.4
You should assign to :any:`tcod.console.Console.fg` instead.
|
codesearchnet
|
def make_path(path: PathLike) -> abstract_path.Path:
is_windows = os.name == 'nt'
if isinstance(path, str):
uri_splits = path.split(':
if len(uri_splits) > 1:
return _URI_PREFIXES_TO_CLS[uri_splits[0] + ':
elif is_windows:
return gpath.WindowsGPath(path)
else:
return gpath.PosixGPath(path)
elif isinstance(path, _PATHLIKE_CLS):
return path
elif isinstance(path, os.PathLike):
path_cls = gpath.WindowsGPath if is_windows else gpath.PosixGPath
return path_cls(path)
else:
raise TypeError(f'Invalid path type: {path!r}')
|
Create a generic `pathlib.Path`-like abstraction.
Depending on the input (e.g. `gs://`, `github://`, `ResourcePath`,...), the
system (Windows, Linux,...), the function will create the right pathlib-like
abstraction.
Args:
path: Pathlike object.
Returns:
path: The `pathlib.Path`-like abstraction.
|
github-repos
|
def _collect_certificate_data(self, enterprise_enrollment):
if (self.certificates_api is None):
self.certificates_api = CertificatesApiClient(self.user)
course_id = enterprise_enrollment.course_id
username = enterprise_enrollment.enterprise_customer_user.user.username
try:
certificate = self.certificates_api.get_course_certificate(course_id, username)
completed_date = certificate.get('created_date')
if completed_date:
completed_date = parse_datetime(completed_date)
else:
completed_date = timezone.now()
is_passing = certificate.get('is_passing')
grade = (self.grade_passing if is_passing else self.grade_failing)
except HttpNotFoundError:
completed_date = None
grade = self.grade_incomplete
is_passing = False
return (completed_date, grade, is_passing)
|
Collect the learner completion data from the course certificate.
Used for Instructor-paced courses.
If no certificate is found, then returns the completed_date = None, grade = In Progress, on the idea that a
certificate will eventually be generated.
Args:
enterprise_enrollment (EnterpriseCourseEnrollment): the enterprise enrollment record for which we need to
collect completion/grade data
Returns:
completed_date: Date the course was completed, this is None if course has not been completed.
grade: Current grade in the course.
is_passing: Boolean indicating if the grade is a passing grade or not.
|
codesearchnet
|
def version(msg):
tc = typecode(msg)
if (tc != 31):
raise RuntimeError(('%s: Not a status operation message, expecting TC = 31' % msg))
msgbin = common.hex2bin(msg)
version = common.bin2int(msgbin[72:75])
return version
|
ADS-B Version
Args:
msg (string): 28 bytes hexadecimal message string, TC = 31
Returns:
int: version number
|
codesearchnet
|
def _save_private_file(filename, json_contents):
temp_filename = tempfile.mktemp()
file_desc = os.open(temp_filename, os.O_WRONLY | os.O_CREAT, 0o600)
with os.fdopen(file_desc, 'w') as file_handle:
json.dump(json_contents, file_handle, sort_keys=True,
indent=2, separators=(',', ': '))
shutil.move(temp_filename, filename)
|
Saves a file with read-write permissions on for the owner.
Args:
filename: String. Absolute path to file.
json_contents: JSON serializable object to be saved.
|
juraj-google-style
|
def __init__(self, chgcar):
self.chgcar = chgcar
self.structure = chgcar.structure
self.extrema_coords = []
self.extrema_type = None
self._extrema_df = None
self._charge_distribution_df = None
|
Initialization.
Args:
chgcar (pmg.Chgcar): input Chgcar object.
|
juraj-google-style
|
def generate_password(length=32):
return ''.join((random.SystemRandom().choice((string.ascii_letters + '!@
|
Generate a cryptographically secure random string to use for passwords
Args:
length (int): Length of password, defaults to 32 characters
Returns:
Randomly generated string
|
codesearchnet
|
def _ParseIndexTable(self, file_object):
cache_address_map = self._GetDataTypeMap('uint32le')
file_offset = file_object.get_offset()
cache_address_data = file_object.read(4)
while len(cache_address_data) == 4:
try:
value = self._ReadStructureFromByteStream(
cache_address_data, file_offset, cache_address_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to map cache address at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
if value:
cache_address = CacheAddress(value)
self.index_table.append(cache_address)
file_offset += 4
cache_address_data = file_object.read(4)
|
Parses the index table.
Args:
file_object (dfvfs.FileIO): a file-like object to parse.
Raises:
ParseError: if the index table cannot be read.
|
juraj-google-style
|
def render_list(self, cnt, unique=False, progress_callback=None, **kwargs):
rendered_list = []
i = 0
total_attempts = 0
while True:
if (i >= cnt):
break
if (total_attempts > (cnt * self.unique_attempts_factor)):
raise StringGenerator.UniquenessError(u"couldn't satisfy uniqueness")
s = self.render(**kwargs)
if unique:
if (not (s in rendered_list)):
rendered_list.append(s)
i += 1
else:
rendered_list.append(s)
i += 1
total_attempts += 1
if (progress_callback and callable(progress_callback)):
progress_callback(i, cnt)
return rendered_list
|
Return a list of generated strings.
Args:
cnt (int): length of list
unique (bool): whether to make entries unique
Returns:
list.
We keep track of total attempts because a template may
specify something impossible to attain, like [1-9]{} with cnt==1000
|
codesearchnet
|
def get_initialized_tpu_systems():
return _INITIALIZED_TPU_SYSTEMS.copy()
|
Returns all currently initialized tpu systems.
Returns:
A dictionary, with tpu name as the key and the tpu topology as the value.
|
github-repos
|
def _parse_book_links(dom):
links = []
picker = lambda x: x.params.get("class", "").startswith("boxProKnihy")
for el in dom.find(None, fn=picker):
book_ref = el.find("a")
if not book_ref or "href" not in book_ref[0].params:
continue
links.append(book_ref[0].params["href"])
return links
|
Parse links to the details about publications from page with book list.
Args:
dom (obj): HTMLElement container of the page with book list.
Returns:
list: List of strings / absolute links to book details.
|
juraj-google-style
|
def read_raster(raster_file):
ds = gdal_Open(raster_file)
band = ds.GetRasterBand(1)
data = band.ReadAsArray()
xsize = band.XSize
ysize = band.YSize
nodata_value = band.GetNoDataValue()
geotrans = ds.GetGeoTransform()
dttype = band.DataType
srs = osr_SpatialReference()
srs.ImportFromWkt(ds.GetProjection())
if nodata_value is None:
nodata_value = DEFAULT_NODATA
band = None
ds = None
return Raster(ysize, xsize, data, nodata_value, geotrans, srs, dttype)
|
Read raster by GDAL.
Args:
raster_file: raster file path.
Returns:
Raster object.
|
juraj-google-style
|
def get_fixers(self):
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ['*'])
fix_name = fix_mod_path.rsplit('.', 1)[(- 1)]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split('_')
class_name = (self.CLASS_PREFIX + ''.join([p.title() for p in parts]))
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError(("Can't find %s.%s" % (fix_name, class_name)))
fixer = fix_class(self.options, self.fixer_log)
if (fixer.explicit and (self.explicit is not True) and (fix_mod_path not in self.explicit)):
self.log_message('Skipping implicit fixer: %s', fix_name)
continue
self.log_debug('Adding transformation: %s', fix_name)
if (fixer.order == 'pre'):
pre_order_fixers.append(fixer)
elif (fixer.order == 'post'):
post_order_fixers.append(fixer)
else:
raise FixerError(('Illegal fixer order: %r' % fixer.order))
key_func = operator.attrgetter('run_order')
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
|
Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.
|
codesearchnet
|
def simulate_phases(self, phase_map: Dict[(Tuple[(int, ...)], float)]):
self._pool.map(_clear_scratch, self._shard_num_args())
for (indices, half_turns) in phase_map.items():
args = self._shard_num_args({'indices': indices, 'half_turns': half_turns})
if (len(indices) == 1):
self._pool.map(_single_qubit_accumulate_into_scratch, args)
elif (len(indices) == 2):
self._pool.map(_two_qubit_accumulate_into_scratch, args)
self._pool.map(_apply_scratch_as_phase, self._shard_num_args())
|
Simulate a set of phase gates on the xmon architecture.
Args:
phase_map: A map from a tuple of indices to a value, one for each
phase gate being simulated. If the tuple key has one index, then
this is a Z phase gate on the index-th qubit with a rotation
angle of pi times the value of the map. If the tuple key has two
indices, then this is a |11> phasing gate, acting on the qubits
at the two indices, and a rotation angle of pi times the value
of the map.
|
codesearchnet
|
def get_script_module(script_information, package='pylabcontrol', verbose=False):
module, _, _, _, _, _, _ = Script.get_script_information(script_information=script_information, package=package, verbose=verbose)
return module
|
wrapper to get the module for a script
Args:
script_information: information of the script. This can be
- a dictionary
- a Script instance
- name of Script class
package (optional): name of the package to which the script belongs, i.e. pylabcontrol or b26toolkit only used when script_information is a string
Returns:
module
|
juraj-google-style
|
def parse_columns(lines):
data = []
index = []
for line in lines:
line = line.rstrip()
if line.startswith("
tmp = __parse_entry(line)
data.append(tmp[1])
index.append(tmp[0])
return DataFrame(data, index=index, columns=['description'])
|
Parse list of lines with columns description from SOFT file.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
Returns:
:obj:`pandas.DataFrame`: Columns description.
|
juraj-google-style
|
def virt_customize(self, options):
cmd = ['virt-customize', '-a', self.disk_path]
if 'ssh-inject' in options and not options['ssh-inject']:
options['ssh-inject'] = 'root:file:{}'.format(
self.paths.ssh_id_rsa_pub()
)
options = self.normalize_options(options)
cmd.extend(options)
return Command('virt-customize', cmd)
|
Handler for 'virt-customize'
note: if 'ssh-inject' option was specified without a path to a key,
the prefix' key will be copied to the vm.
Args:
options (lst of str): Options and arguments for 'virt-customize'
Returns:
callable: which handles cmd
Raises:
lago.build.BuildException: If an handler for cmd doesn't exist
|
juraj-google-style
|
def get_HDX_code_from_location(location, locations=None, configuration=None):
if locations is None:
locations = Locations.validlocations(configuration)
locationupper = location.upper()
for locdict in locations:
locationcode = locdict['name'].upper()
if locationupper == locationcode:
return locationcode
for locdict in locations:
if locationupper == locdict['title'].upper():
return locdict['name'].upper()
return None
|
Get HDX code for location
Args:
location (str): Location for which to get HDX code
locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX.
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[str]: HDX code or None
|
juraj-google-style
|
def gcd(a, b, name=None):
with ops.name_scope(name, 'gcd', [a, b]):
a = ops.convert_to_tensor(a)
b = ops.convert_to_tensor(b)
a.shape.assert_has_rank(0)
b.shape.assert_has_rank(0)
if not a.dtype.is_integer:
raise ValueError('a must be an integer type. Got: %s' % a.dtype)
if not b.dtype.is_integer:
raise ValueError('b must be an integer type. Got: %s' % b.dtype)
const_a = tensor_util.constant_value(a)
const_b = tensor_util.constant_value(b)
if const_a is not None and const_b is not None:
if sys.version_info.major < 3:
math_gcd = fractions.gcd
else:
math_gcd = math.gcd
return ops.convert_to_tensor(math_gcd(const_a, const_b))
cond = lambda _, b: math_ops.greater(b, array_ops.zeros_like(b))
body = lambda a, b: [b, math_ops.mod(a, b)]
a, b = while_loop.while_loop(cond, body, [a, b], back_prop=False)
return a
|
Returns the greatest common divisor via Euclid's algorithm.
Args:
a: The dividend. A scalar integer `Tensor`.
b: The divisor. A scalar integer `Tensor`.
name: An optional name for the operation.
Returns:
A scalar `Tensor` representing the greatest common divisor between `a` and
`b`.
Raises:
ValueError: If `a` or `b` are not scalar integers.
|
github-repos
|
def jaccard_sim(features1, features2):
set1 = set(features1)
set2 = set(features2)
try:
return len(set1.intersection(set2))/float(max(len(set1), len(set2)))
except ZeroDivisionError:
return 0
|
Compute similarity between two sets using Jaccard similarity.
Args:
features1: list of PE Symbols.
features2: list of PE Symbols.
Returns:
Returns an int.
|
juraj-google-style
|
def serialize_cert_to_der(cert_obj):
return cert_obj.public_bytes(cryptography.hazmat.primitives.serialization.Encoding.DER)
|
Serialize certificate to DER.
Args:
cert_obj: cryptography.Certificate
Returns:
bytes: DER encoded certificate
|
codesearchnet
|
def _read_accept_states(self):
states = []
i = 0
regex = re.compile('[ \t\n\r:,]+')
found = 0
state = 0
mapping = []
cur_line = None
with open(self.outfile) as flex_file:
for cur_line in flex_file:
if cur_line[0:37] == "static yyconst flex_int16_t yy_accept" or cur_line[0:35] == "static const flex_int16_t yy_accept":
found = 1
continue
if found == 1:
if state == 0 and cur_line[0:5] == " {":
mapping.append(0)
state = 1
continue
if state == 1:
if cur_line[0:7] != " } ;":
cur_line = "".join(cur_line.split())
if cur_line == '':
continue
if cur_line[cur_line.__len__() - 1] == ',':
splitted_line = regex.split(
cur_line[:cur_line.__len__() - 1])
else:
splitted_line = regex.split(cur_line)
mapping = mapping + splitted_line
continue
else:
cleared = []
for j in mapping:
cleared.append(int(j))
max_value = max(cleared)
for i in range(0, len(cleared)):
if cleared[i] > 0 and cleared[
i] < (max_value - 1):
states.append(i)
return states
return []
|
Read DFA accepted states from flex compiled file
Args:
None
Returns:
list: The list of accepted states
|
juraj-google-style
|
def signature(self, name, file_name, file_type, file_content, owner=None, **kwargs):
return Signature(self.tcex, name, file_name, file_type, file_content, owner=owner, **kwargs)
|
Create the Signature TI object.
Args:
owner:
file_content:
file_name:
file_type:
name:
**kwargs:
Return:
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.