code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def buckets_list(self, projection='noAcl', max_results=0, page_token=None, project_id=None):
if (max_results == 0):
max_results = Api._MAX_RESULTS
args = {'project': (project_id if project_id else self._project_id), 'maxResults': max_results}
if (projection is not None):
args['projection'] = projection
if (page_token is not None):
args['pageToken'] = page_token
url = (Api._ENDPOINT + (Api._BUCKET_PATH % ''))
return google.datalab.utils.Http.request(url, args=args, credentials=self._credentials)
|
Issues a request to retrieve the list of buckets.
Args:
projection: the projection of the bucket information to retrieve.
max_results: an optional maximum number of objects to retrieve.
page_token: an optional token to continue the retrieval.
project_id: the project whose buckets should be listed.
Returns:
A parsed list of bucket information dictionaries.
Raises:
Exception if there is an error performing the operation.
|
codesearchnet
|
def filter(self, versions, key=lambda x: x):
return [x for x in versions if self.check(key(x))]
|
Filter all of the versions in an iterable that match this version range
Args:
versions (iterable): An iterable of SemanticVersion objects
Returns:
list: A list of the SemanticVersion objects that matched this range
|
juraj-google-style
|
def make_tar_stream(build_context, buffer):
tf = tarfile.TarFile(fileobj=buffer, mode='w')
for context_path, fileobj in build_context.items():
if getattr(fileobj, 'localpath', None) is not None:
tf.add(fileobj.localpath, arcname=context_path)
else:
tar_add_bytes(tf, context_path, fileobj.read('rb'))
tf.close()
|
Write a tar stream of the build context to the provided buffer
Args:
build_context (Mapping[str, pyccc.FileReferenceBase]): dict mapping filenames to file references
buffer (io.BytesIO): writable binary mode buffer
|
juraj-google-style
|
def remove_interceptor(self, name):
for (index, interceptor) in enumerate(self.interceptors):
matches = ((type(interceptor).__name__ == name) or (getattr(interceptor, 'name') == name))
if matches:
self.interceptors.pop(index)
return True
return False
|
Removes a specific interceptor by name.
Arguments:
name (str): interceptor name to disable.
Returns:
bool: `True` if the interceptor was disabled, otherwise `False`.
|
codesearchnet
|
def deep_update(d, u):
for k, v in u.items():
if isinstance(v, Mapping):
d[k] = deep_update(d.get(k, {}), v)
elif isinstance(v, list):
existing_elements = d.get(k, [])
d[k] = existing_elements + [ele for ele in v if ele not in existing_elements]
else:
d[k] = v
return d
|
Deeply updates a dictionary. List values are concatenated.
Args:
d (dict): First dictionary which will be updated
u (dict): Second dictionary use to extend the first one
Returns:
dict: The merge dictionary
|
juraj-google-style
|
def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True):
try:
student_item = StudentItem.objects.get(student_id=student_id, course_id=course_id, item_id=item_id)
except StudentItem.DoesNotExist:
return
try:
score = Score.create_reset_score(student_item)
if emit_signal:
score_reset.send(sender=None, anonymous_user_id=student_id, course_id=course_id, item_id=item_id, created_at=score.created_at)
if clear_state:
for sub in student_item.submission_set.all():
sub.status = Submission.DELETED
sub.save(update_fields=['status'])
cache_key = Submission.get_cache_key(sub.uuid)
cache.delete(cache_key)
except DatabaseError:
msg = u'Error occurred while reseting scores for item {item_id} in course {course_id} for student {student_id}'.format(item_id=item_id, course_id=course_id, student_id=student_id)
logger.exception(msg)
raise SubmissionInternalError(msg)
else:
msg = u'Score reset for item {item_id} in course {course_id} for student {student_id}'.format(item_id=item_id, course_id=course_id, student_id=student_id)
logger.info(msg)
|
Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
|
codesearchnet
|
def _get_default_help_message(func, args, description=None, args_help=None):
if description is None:
description = "Argument parsing for %s" % func.__name__
args_help = args_help or {}
for argument in [arg_name for arg_name in args
if arg_name not in args_help]:
args_help[argument] = "Help message for %s" % argument
return (description, args_help)
|
Create a default description for the parser and help message for the
agurments if they are missing.
Args:
func: the method we are creating a parser for
args: the argument names of the method
description: a potentially existing description created from the
function docstring
args_help: a dict {arg_name: help} with potentially missing arguments
Returns:
a tuple (arg_parse_description, complete_args_help)
|
juraj-google-style
|
def _force_close(self, file_length=None):
if file_length is None:
file_length = self._get_offset_from_gcs() + 1
self._send_data('', 0, file_length)
|
Close this buffer on file_length.
Finalize this upload immediately on file_length.
Contents that are still in memory will not be uploaded.
This is a utility method that does not modify self.
Args:
file_length: file length. Must match what has been uploaded. If None,
it will be queried from GCS.
|
juraj-google-style
|
def compress(d, output, fmt='gz', logger=None):
if not logger:
logger = log.get_logger('s3')
if type(d) not in [list, tuple]:
d = [d, ]
d = [os.path.expanduser(_d) for _d in d]
print_compress_info(d, output, compress, logger)
if fmt.lower() == 'none':
fmt = ''
elif fmt.lower() not in ['gz', 'bz2']:
logger.info('Compression option ("{}") is invalid.\nFalling back to uncompressed.'.format(fmt))
fmt = ''
output = os.path.expanduser(output)
tar = tarfile.open(output, 'w:{}'.format(fmt))
for obj in d:
tar.add(obj)
tar.close()
return output
|
Creates a compressed/uncompressed tar file.
Args:
d: Can be one of three things:
1. the path to a single file, as a string
2. the path to a single directory, as a string
3. an iterable of file or directory paths
output (str): Output file path.
fmt: Compression method. Options are ``'gz'`` (gzip),
``'bz2'`` (bzip2) and ``'none'`` (uncompressed). Default is ``'gz'``.
|
juraj-google-style
|
def show(self, progress, msg=None):
if (self.whole_tag.style.display == 'none'):
self.whole_tag.style.display = 'block'
if (isinstance(progress, int) or isinstance(progress, float)):
percentage = progress
else:
percentage = self.__class__._compute_percentage(progress)
self.tag.class_name = 'progress-bar'
if (percentage < 100):
self.tag.class_name += ' progress-bar-striped active'
else:
msg = 'Hotovo'
self.tag.aria_valuemin = percentage
self.tag.style.width = '{}%'.format(percentage)
if msg:
self.tag.text = msg
|
Show the progress bar and set it to `progress` tuple or value.
Args:
progress (tuple / int / float): Tuple ``(done / len(all))`` or
the direct percentage value as int / float.
msg (str, default None): Alternative background description.
|
codesearchnet
|
def related(self, *, exclude_self=False):
manager = type(self)._default_manager
queryset = manager.related_to(self)
if exclude_self:
queryset = queryset.exclude(id=self.id)
return queryset
|
Get a QuerySet for all trigger log objects for the same connected model.
Args:
exclude_self (bool): Whether to exclude this log object from the result list
|
codesearchnet
|
def __init__(self, message, callback, color=''):
super(ConsolePrompt, self).__init__()
self.daemon = True
self._message = message
self._callback = callback
self._color = color
self._stop_event = threading.Event()
self._answered = False
|
Initializes a ConsolePrompt.
Args:
message: A string to be presented to the user.
callback: A function to be called with the response string.
color: An ANSI color code, or the empty string.
|
juraj-google-style
|
def CreateSignatureScanner(cls, specification_store):
scanner_object = pysigscan.scanner()
for format_specification in specification_store.specifications:
for signature in format_specification.signatures:
pattern_offset = signature.offset
if pattern_offset is None:
signature_flags = pysigscan.signature_flags.NO_OFFSET
elif pattern_offset < 0:
pattern_offset *= -1
signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END
else:
signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START
scanner_object.add_signature(
signature.identifier, pattern_offset, signature.pattern,
signature_flags)
return scanner_object
|
Creates a signature scanner for format specifications with signatures.
Args:
specification_store (FormatSpecificationStore): format specifications
with signatures.
Returns:
pysigscan.scanner: signature scanner.
|
juraj-google-style
|
def from_text_vision_configs(cls, text_config: AlignTextConfig, vision_config: AlignVisionConfig, **kwargs):
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
|
Instantiate a [`AlignConfig`] (or a derived class) from align text model configuration and align vision model
configuration.
Returns:
[`AlignConfig`]: An instance of a configuration object
|
github-repos
|
def str2dict_values(str_in):
tmp_dict = str2dict(str_in)
if (tmp_dict is None):
return None
return [tmp_dict[key] for key in sorted((k for k in tmp_dict))]
|
Extracts the values from a string that represents a dict and returns them
sorted by key.
Args:
str_in (string) that contains python dict
Returns:
(list) with values or None if no valid dict was found
Raises:
-
|
codesearchnet
|
def to_zmat(self, buf=None, upper_triangle=True, implicit_index=True, float_format='{:.6f}'.format, overwrite=True, header=False):
out = self.copy()
if implicit_index:
out = out.change_numbering(new_index=range(1, (len(self) + 1)))
if (not upper_triangle):
out = out._remove_upper_triangle()
output = out.to_string(index=(not implicit_index), float_format=float_format, header=header)
if (buf is not None):
if overwrite:
with open(buf, mode='w') as f:
f.write(output)
else:
with open(buf, mode='x') as f:
f.write(output)
else:
return output
|
Write zmat-file
Args:
buf (str): StringIO-like, optional buffer to write to
implicit_index (bool): If implicit_index is set, the zmat indexing
is changed to ``range(1, len(self) + 1)``.
Using :meth:`~chemcoord.Zmat.change_numbering`
Besides the index is omitted while writing which means,
that the index is given
implicitly by the row number.
float_format (one-parameter function): Formatter function
to apply to column’s elements if they are floats.
The result of this function must be a unicode string.
overwrite (bool): May overwrite existing files.
Returns:
formatted : string (or unicode, depending on data and options)
|
codesearchnet
|
def create_iam_role(self, account):
try:
iam = self.session.client('iam')
trust = get_template('vpc_flow_logs_iam_role_trust.json').render()
policy = get_template('vpc_flow_logs_role_policy.json').render()
newrole = iam.create_role(Path='/', RoleName=self.role_name, AssumeRolePolicyDocument=trust)['Role']['Arn']
iam.put_role_policy(RoleName=self.role_name, PolicyName='VpcFlowPolicy', PolicyDocument=policy)
self.log.debug('Created VPC Flow Logs role & policy for {}'.format(account.account_name))
auditlog(event='vpc_flow_logs.create_iam_role', actor=self.ns, data={'account': account.account_name, 'roleName': self.role_name, 'trustRelationship': trust, 'inlinePolicy': policy})
return newrole
except Exception:
self.log.exception('Failed creating the VPC Flow Logs role for {}.'.format(account))
|
Create a new IAM role. Returns the ARN of the newly created role
Args:
account (:obj:`Account`): Account where to create the IAM role
Returns:
`str`
|
codesearchnet
|
def load_obj(fn):
position = [np.zeros(3, dtype=np.float32)]
normal = [np.zeros(3, dtype=np.float32)]
uv = [np.zeros(2, dtype=np.float32)]
tuple2idx = OrderedDict()
trinagle_indices = []
input_file = open(fn) if isinstance(fn, str) else fn
for line in input_file:
line = line.strip()
if not line or line[0] == '
continue
line = line.split(' ', 1)
tag = line[0]
if len(line) > 1:
line = line[1]
else:
line = ''
if tag == 'v':
position.append(np.fromstring(line, sep=' '))
elif tag == 'vt':
uv.append(np.fromstring(line, sep=' '))
elif tag == 'vn':
normal.append(np.fromstring(line, sep=' '))
elif tag == 'f':
output_face_indices = []
for chunk in line.split():
vt = _parse_vertex_tuple(chunk)
if vt not in tuple2idx:
tuple2idx[vt] = len(tuple2idx)
output_face_indices.append(tuple2idx[vt])
for i in range(1, len(output_face_indices)-1):
for vi in [0, i, i+1]:
trinagle_indices.append(output_face_indices[vi])
outputs = {}
outputs['face'] = np.int32(trinagle_indices)
pos_idx, uv_idx, normal_idx = np.int32(list(tuple2idx)).T
if np.any(pos_idx):
outputs['position'] = _unify_rows(position)[pos_idx]
if np.any(uv_idx):
outputs['uv'] = _unify_rows(uv)[uv_idx]
if np.any(normal_idx):
outputs['normal'] = _unify_rows(normal)[normal_idx]
return outputs
|
Load 3d mesh form .obj' file.
Args:
fn: Input file name or file-like object.
Returns:
dictionary with the following keys (some of which may be missing):
position: np.float32, (n, 3) array, vertex positions
uv: np.float32, (n, 2) array, vertex uv coordinates
normal: np.float32, (n, 3) array, vertex uv normals
face: np.int32, (k*3,) traingular face indices
|
juraj-google-style
|
def comparator(objective):
if isinstance(objective, Minimum):
return lambda l, r: l < r
else:
return lambda l, r: l > r
|
Higher order function creating a compare function for objectives.
Args:
objective (cipy.algorithms.core.Objective): The objective to create a
compare for.
Returns:
callable: Function accepting two objectives to compare.
Examples:
>>> a = Minimum(0.1)
>>> b = Minimum(0.2)
>>> compare = comparator(a)
>>> comparison = compare(a, b) # False
|
juraj-google-style
|
def highway_core_with_recurrent_dropout(
hidden_size,
num_layers,
keep_prob=0.5,
**kwargs):
core = HighwayCore(hidden_size, num_layers, **kwargs)
return RecurrentDropoutWrapper(core, keep_prob), core
|
Highway core with recurrent dropout.
Args:
hidden_size: (int) Hidden size dimensionality.
num_layers: (int) Number of highway layers.
keep_prob: the probability to keep an entry when applying dropout.
**kwargs: Extra keyword arguments to pass to the highway core.
Returns:
A tuple (train_core, test_core) where train_core is a higway core with
recurrent dropout enabled to be used for training and test_core is the
same highway core without recurrent dropout.
|
juraj-google-style
|
def make_one_shot_iterator(self) -> Union[iterator_ops.Iterator, iterator_ops.OwnedIterator]:
return self._make_one_shot_iterator()
|
Creates an iterator for elements of this dataset.
Note: The returned iterator will be initialized automatically.
A "one-shot" iterator does not currently support re-initialization. For
that see `make_initializable_iterator`.
Example:
```python
# Building graph ...
dataset = ...
next_value = dataset.make_one_shot_iterator().get_next()
# ... from within a session ...
try:
while True:
value = sess.run(next_value)
...
except tf.errors.OutOfRangeError:
pass
```
Returns:
An `tf.data.Iterator` for elements of this dataset.
|
github-repos
|
def compute_attr_metadata(self, own_attrs: list[Attribute], decorator: str) -> Sequence[Attribute]:
assert decorator in _METADATA_KEYS, f'No metadata key for {decorator}'
key = _METADATA_KEYS[decorator]
attrs = self._get_attrs_from_mro(own_attrs, key)
self.metadata[key] = attrs
return attrs
|
Sets combined metadata based on inherited and own attrs.
Args:
own_attrs: The attrs defined explicitly in this class
decorator: The fully qualified decorator name
Returns:
The list of combined attrs.
|
github-repos
|
def list_keyvaults(access_token, subscription_id, rgname):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults', '?api-version=', KEYVAULT_API])
return do_get_next(endpoint, access_token)
|
Lists key vaults in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. 200 OK.
|
codesearchnet
|
def dummy_inputs(self):
if self.config.use_lang_emb and self.config.n_langs > 1:
return {'input_ids': tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32), 'langs': tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32)}
else:
return {'input_ids': tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32)}
|
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
|
github-repos
|
def shape_list(tensor: Union[tf.Tensor, np.ndarray]) -> list[int]:
if isinstance(tensor, np.ndarray):
return list(tensor.shape)
dynamic = tf.shape(tensor)
if tensor.shape == tf.TensorShape(None):
return dynamic
static = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
|
Deal with dynamic shape in tensorflow cleanly.
Args:
tensor (`tf.Tensor` or `np.ndarray`): The tensor we want the shape of.
Returns:
`List[int]`: The shape of the tensor as a list.
|
github-repos
|
def live(self):
session = self._session
url = '{}/live'.format(self._base_url)
supported_params = frozenset(['filter[port]'])
params = {k: v for (k, v) in iteritems(self._params) if (k in supported_params)}
return session.live(url, self._datapoint_class, {'is_aggregate': self._is_aggregate}, params=params)
|
Get a live stream of timeseries readings.
This returns an Iterable over a live stream of readings. Note
that the result will need to be closed since the system can
not tell when you'll be done with it.
You can either call ``close`` on the endpoint when you're or
use the context management facilities of the endpoint.
.. code-block:: python
# Fetch a sensor
timeseries = sensor.timeseries()
# ensure live endpoint closed
with timeseries.live() as live:
# Wait for 10 readings
first10 = list(islice(live, 10))
Returns:
|
codesearchnet
|
def record_request_completion(self, created_time: float, request_id: str) -> None:
if not _has_opentelemetry:
return
latency_ms = (time.time() - created_time) * 1000.0
try:
self.request_latency_histogram.record(latency_ms)
logger.debug(f'Recorded request completion for {request_id}: {latency_ms:.2f}ms')
except Exception as e:
logger.warning(f'Failed to record request completion metric: {e}')
|
Record metrics about a completed request.
Args:
created_time: The time the request was created
request_id: The ID of the request
|
github-repos
|
def disqualified(self, num, natural=True, **kwargs):
search_type = ('natural' if natural else 'corporate')
baseuri = (self._BASE_URI + 'disqualified-officers/{}/{}'.format(search_type, num))
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res
|
Search for disqualified officers by officer ID.
Searches for natural disqualifications by default. Specify
natural=False to search for corporate disqualifications.
Args:
num (str): Company number to search on.
natural (Optional[bool]): Natural or corporate search
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
|
codesearchnet
|
def _read_addr_resolve(self, length, htype):
if (htype == 1):
_byte = self._read_fileng(6)
_addr = '-'.join(textwrap.wrap(_byte.hex(), 2))
else:
_addr = self._read_fileng(length)
return _addr
|
Resolve MAC address according to protocol.
Positional arguments:
* length -- int, hardware address length
* htype -- int, hardware type
Returns:
* str -- MAC address
|
codesearchnet
|
def _has_extras(ctx):
if (not ctx.index.entries):
return False
return ((ctx.data_offset > 8) and (ctx.data_offset > (ctx.signatures.offset_end + 8)))
|
Determine if a MAR file has an additional section block or not.
It does this by looking at where file data starts in the file. If this
starts immediately after the signature data, then no additional sections are present.
Args:
ctx (context): construct parsing context
Returns:
True if the MAR file has an additional section block
False otherwise
|
codesearchnet
|
def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
size = get_size_dict(size, default_to_square=False)
if 'height' in size and 'width' in size:
output_size = (size['height'], size['width'])
elif 'longest_edge' in size:
output_size = get_resize_output_image_size(image, size['longest_edge'], input_data_format)
else:
raise ValueError(f"Size must have 'height' and 'width' or 'longest_edge' as keys. Got {size.keys()}")
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
|
Resize an image.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will
have the size `(h, w)`. If `size` is of the form `{"longest_edge": s}`, the output image will have its
longest edge of length `s` while keeping the aspect ratio of the original image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
|
github-repos
|
def with_wget(url_dict=None, target_file=None):
def wget_decorator(cls):
def download_impl(self):
'Download the selected version from the url_dict value.'
t_file = (target_file if target_file else self.SRC_FILE)
t_version = url_dict[self.version]
Wget(t_version, t_file)
@staticmethod
def versions_impl():
'Return a list of versions from the url_dict keys.'
return list(url_dict.keys())
cls.versions = versions_impl
cls.download = download_impl
return cls
return wget_decorator
|
Decorate a project class with wget-based version information.
This adds two attributes to a project class:
- A `versions` method that returns a list of available versions
for this project.
- A `repository` attribute that provides a repository string to
download from later.
We use the `git rev-list` subcommand to list available versions.
Args:
url_dict (dict): A dictionary that assigns a version to a download URL.
target_file (str): An optional path where we should put the clone.
If unspecified, we will use the `SRC_FILE` attribute of
the decorated class.
|
codesearchnet
|
def _remove_hdxobject(self, objlist, obj, matchon='id', delete=False):
if (objlist is None):
return False
if isinstance(obj, six.string_types):
obj_id = obj
elif (isinstance(obj, dict) or isinstance(obj, HDXObject)):
obj_id = obj.get(matchon)
else:
raise HDXError('Type of object not a string, dict or T<=HDXObject')
if (not obj_id):
return False
for (i, objdata) in enumerate(objlist):
objid = objdata.get(matchon)
if (objid and (objid == obj_id)):
if delete:
objlist[i].delete_from_hdx()
del objlist[i]
return True
return False
|
Remove an HDX object from a list within the parent HDX object
Args:
objlist (List[Union[T <= HDXObject,Dict]]): list of HDX objects
obj (Union[T <= HDXObject,Dict,str]): Either an id or hdx object metadata either from an HDX object or a dictionary
matchon (str): Field to match on. Defaults to id.
delete (bool): Whether to delete HDX object. Defaults to False.
Returns:
bool: True if object removed, False if not
|
codesearchnet
|
def put(self):
return self.manager.put(id=self.id, name=self.name, description=self.description, whitelisted_container_task_types=self.whitelisted_container_task_types, whitelisted_executable_task_types=self.whitelisted_executable_task_types)
|
Updates this task whitelist on the saltant server.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just updated.
|
codesearchnet
|
def port_create_vlan(br, port, id, internal=False):
interfaces = __salt__['network.interfaces']()
if (not (0 <= id <= 4095)):
return False
elif (not bridge_exists(br)):
return False
elif ((not internal) and (port not in interfaces)):
return False
elif (port in port_list(br)):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
|
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
|
codesearchnet
|
def configs(self, filters=None):
url = self._url('/configs')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)
|
List configs
Args:
filters (dict): A map of filters to process on the configs
list. Available filters: ``names``
Returns (list): A list of configs
|
juraj-google-style
|
def _GenerateNotices(self):
items = []
for e in self._notices:
d = e.GetDictToFormat()
if ('url' in d.keys()):
d['url'] = ('<a href="%(url)s">%(url)s</a>' % d)
items.append(('<li class="notice">%s</li>' % e.FormatProblem(d).replace('\n', '<br>')))
if items:
return ('<h2>Notices:</h2>\n<ul>%s</ul>\n' % '\n'.join(items))
else:
return ''
|
Generate a summary of any notices.
Returns:
The generated HTML as a string.
|
codesearchnet
|
def __getDecision(self, result, multiple=False, **values):
values = self.__toString(values)
__valueKeyWithHeaderIndex = self.__valueKeyWithHeaderIndex(values)
errors = self.__checkDecisionParameters(result, **values)
if errors:
view.Tli.showErrors('ParametersError', errors)
machingData = {}
for line in self.decisions:
match = True
for index in __valueKeyWithHeaderIndex:
if line[index] != __valueKeyWithHeaderIndex[index]:
if line[index] != self.__wildcardSymbol:
match = False
break
if match:
if multiple:
for header in result:
if header not in machingData:
machingData[header] = [line[self.header.index(header)]]
else:
machingData[header].append(line[self.header.index(header)])
else:
for header in result:
machingData[header] = line[self.header.index(header)]
return machingData
if multiple:
if machingData:
return machingData
return dict((key, None) for key in result)
|
The main method for decision picking.
Args:
result (array of str): What values you want to get in return array.
multiple (bolean, optional): Do you want multiple result if it finds many maching decisions.
**values (dict): What should finder look for, (headerString : value).
Returns: Maped result values with finded elements in row/row.
|
juraj-google-style
|
def __init__(self, speaker, audio_format, key, lang="ru-RU", **kwargs):
self.__params = {
"speaker": speaker,
"format": audio_format,
"key": key,
"lang": lang,
}
self.__params.update(kwargs)
self._data = None
|
Class for generate of speech.
Args:
speaker: Speaker.
audio_format: Audio file format.
key: API-key for Yandex speech kit.
lang (optional): Language. Defaults to "ru-RU".
emotion (optional): The color of the voice. Defaults to "normal".
speed (optional): Speech tempo. Defaults to 1.0.
|
juraj-google-style
|
def get_registered_object(name, custom_objects=None, module_objects=None):
custom_objects_scope_dict = global_state.get_global_attribute('custom_objects_scope_dict', {})
if name in custom_objects_scope_dict:
return custom_objects_scope_dict[name]
elif name in GLOBAL_CUSTOM_OBJECTS:
return GLOBAL_CUSTOM_OBJECTS[name]
elif custom_objects and name in custom_objects:
return custom_objects[name]
elif module_objects and name in module_objects:
return module_objects[name]
return None
|
Returns the class associated with `name` if it is registered with Keras.
This function is part of the Keras serialization and deserialization
framework. It maps strings to the objects associated with them for
serialization/deserialization.
Example:
```python
def from_config(cls, config, custom_objects=None):
if 'my_custom_object_name' in config:
config['hidden_cls'] = tf.keras.saving.get_registered_object(
config['my_custom_object_name'], custom_objects=custom_objects)
```
Args:
name: The name to look up.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, custom_objects is provided by the user.
module_objects: A dictionary of custom objects to look the name up in.
Generally, module_objects is provided by midlevel library
implementers.
Returns:
An instantiable class associated with `name`, or `None` if no such class
exists.
|
github-repos
|
def _grouper(iterable, n, fillvalue=0):
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
|
Collect data into fixed-length chunks or blocks.
Args:
n (int): The size of the chunk.
fillvalue (int): The fill value.
Returns:
iterator: An iterator over the chunks.
|
juraj-google-style
|
def nsarg_completions(
completion_text: str,
entity_types: list,
bel_spec: BELSpec,
namespace: str,
species_id: str,
bel_fmt: str,
size: int,
):
minimal_nsarg_completion_len = 1
species = [species_id]
namespaces = [namespace]
replace_list = []
if len(completion_text) >= minimal_nsarg_completion_len:
url = f'{config["bel_api"]["servers"]["api_url"]}/terms/completions/{url_path_param_quoting(completion_text)}'
params = {
"size": size,
"entity_types": entity_types,
"namespaces": namespaces,
"species": species,
}
r = get_url(url, params=params)
if r.status_code == 200:
ns_completions = r.json()
else:
log.error(f"Status code of {r.status_code} for {url}")
ns_completions = {}
for complete in ns_completions.get("completions", []):
replace_list.append(
{
"replacement": complete["id"],
"label": f"{complete['id']} ({complete['label']})",
"highlight": complete["highlight"][-1],
"type": "NSArg",
}
)
for entity_type in entity_types:
default_namespace = bel_spec["namespaces"].get(entity_type, [])
if default_namespace:
for obj in default_namespace["info"]:
replacement = None
if bel_fmt == "long" and re.match(
completion_text, obj["name"], re.IGNORECASE
):
replacement = obj["name"]
elif bel_fmt in ["short", "medium"] and re.match(
completion_text, obj["abbreviation"], re.IGNORECASE
):
replacement = obj["abbreviation"]
if replacement:
highlight = replacement.replace(
completion_text, f"<em>{completion_text}</em>"
)
replace_list.insert(
0,
{
"replacement": replacement,
"label": replacement,
"highlight": highlight,
"type": "NSArg",
},
)
return replace_list[:size]
|
Namespace completions
Args:
completion_text
entity_types: used to filter namespace search results
bel_spec: used to search default namespaces
namespace: used to filter namespace search results
species_id: used to filter namespace search results
bel_fmt: used to select full name or abbrev for default namespaces
size: how many completions to return
Results:
list of replacement text objects
|
juraj-google-style
|
def setHolidayDates(self, cmd_dict=None, password="00000000"):
result = False
self.setContext("setHolidayDates")
if not cmd_dict:
cmd_dict = self.m_holiday_date_params
try:
if not self.request(False):
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_table = ""
req_table += binascii.hexlify(str(cmd_dict["Holiday_1_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_1_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_2_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_2_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_3_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_3_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_4_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_4_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_5_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_5_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_6_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_6_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_7_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_7_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_8_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_8_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_9_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_9_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_10_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_10_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_11_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_11_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_12_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_12_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_13_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_13_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_14_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_14_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_15_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_15_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_16_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_16_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_17_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_17_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_18_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_18_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_19_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_19_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_20_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_20_Day"]).zfill(2))
req_str = "015731023030423028" + req_table + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success(setHolidayDates: 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
|
Serial call to set holiday list.
If a buffer dictionary is not supplied, the method will use
the class object buffer populated with assignHolidayDate.
Args:
cmd_dict (dict): Optional dictionary of holidays.
password (str): Optional password.
Returns:
bool: True on completion.
|
juraj-google-style
|
def compare(expr, value, regex_expr=False):
if expr == value:
return True
negate = False
if isinstance(expr, str):
negate = expr.startswith(NEGATE)
expr = strip_negate(expr) if negate else expr
try:
test(expr, value, regex_expr=regex_expr)
except Exception as err:
if negate:
return True
else:
raise err
return True
|
Compares an string or regular expression againast a given value.
Arguments:
expr (str|regex): string or regular expression value to compare.
value (str): value to compare against to.
regex_expr (bool, optional): enables string based regex matching.
Raises:
AssertionError: in case of assertion error.
Returns:
bool
|
juraj-google-style
|
def get_db_row(db, start, size):
type_ = snap7.snap7types.wordlen_to_ctypes[snap7.snap7types.S7WLByte]
data = client.db_read(db, start, type_, size)
return data
|
Here you see and example of readying out a part of a DB
Args:
db (int): The db to use
start (int): The index of where to start in db data
size (int): The size of the db data to read
|
codesearchnet
|
def _InternalUnpackAny(msg):
type_url = msg.type_url
db = symbol_database.Default()
if not type_url:
return None
type_name = type_url.split("/")[-1]
descriptor = db.pool.FindMessageTypeByName(type_name)
if descriptor is None:
return None
message_class = db.GetPrototype(descriptor)
message = message_class()
message.ParseFromString(msg.value)
return message
|
Unpacks Any message and returns the unpacked message.
This internal method is differnt from public Any Unpack method which takes
the target message as argument. _InternalUnpackAny method does not have
target message type and need to find the message type in descriptor pool.
Args:
msg: An Any message to be unpacked.
Returns:
The unpacked message.
|
juraj-google-style
|
def load_morfessor_model(lang="en", version="2"):
src_dir = "morph{}".format(version)
p = locate_resource(src_dir, lang)
file_handler = _open(p)
tmp_file_ = NamedTemporaryFile(delete=False)
tmp_file_.write(file_handler.read())
tmp_file_.close()
io = morfessor.MorfessorIO()
model = io.read_any_model(tmp_file_.name)
os.remove(tmp_file_.name)
return model
|
Return a morfessor model for `lang` and of version `version`
Args:
lang (string): language code.
version (string): version of the parameters to be used.
|
juraj-google-style
|
def write_file(self, file_name, vasp4_compatible=False):
def _print_fortran_float(f):
'\n Fortran codes print floats with a leading zero in scientific\n notation. When writing CHGCAR files, we adopt this convention\n to ensure written CHGCAR files are byte-to-byte identical to\n their input files as far as possible.\n :param f: float\n :return: str\n '
s = '{:.10E}'.format(f)
if (f >= 0):
return (((('0.' + s[0]) + s[2:12]) + 'E') + '{:+03}'.format((int(s[13:]) + 1)))
else:
return (((('-.' + s[1]) + s[3:13]) + 'E') + '{:+03}'.format((int(s[14:]) + 1)))
with zopen(file_name, 'wt') as f:
p = Poscar(self.structure)
comment = getattr(self, 'name', p.comment)
lines = (comment + '\n')
lines += ' 1.00000000000000\n'
latt = self.structure.lattice.matrix
lines += (' %12.6f%12.6f%12.6f\n' % tuple(latt[(0, :)]))
lines += (' %12.6f%12.6f%12.6f\n' % tuple(latt[(1, :)]))
lines += (' %12.6f%12.6f%12.6f\n' % tuple(latt[(2, :)]))
if (not vasp4_compatible):
lines += (''.join([('%5s' % s) for s in p.site_symbols]) + '\n')
lines += (''.join([('%6d' % x) for x in p.natoms]) + '\n')
lines += 'Direct\n'
for site in self.structure:
lines += ('%10.6f%10.6f%10.6f\n' % tuple(site.frac_coords))
lines += ' \n'
f.write(lines)
a = self.dim
def write_spin(data_type):
lines = []
count = 0
f.write(' {} {} {}\n'.format(a[0], a[1], a[2]))
for (k, j, i) in itertools.product(list(range(a[2])), list(range(a[1])), list(range(a[0]))):
lines.append(_print_fortran_float(self.data[data_type][(i, j, k)]))
count += 1
if ((count % 5) == 0):
f.write(((' ' + ''.join(lines)) + '\n'))
lines = []
else:
lines.append(' ')
f.write(((' ' + ''.join(lines)) + ' \n'))
f.write(''.join(self.data_aug.get(data_type, [])))
write_spin('total')
if (self.is_spin_polarized and self.is_soc):
write_spin('diff_x')
write_spin('diff_y')
write_spin('diff_z')
elif self.is_spin_polarized:
write_spin('diff')
|
Write the VolumetricData object to a vasp compatible file.
Args:
file_name (str): Path to a file
vasp4_compatible (bool): True if the format is vasp4 compatible
|
codesearchnet
|
def substitute_symbol_table(table, version, max_id):
if (not table.table_type.is_shared):
raise ValueError('Symbol table to substitute from must be a shared table')
if (version <= 0):
raise ValueError(('Version must be grater than or equal to 1: %s' % version))
if (max_id < 0):
raise ValueError(('Max ID must be zero or positive: %s' % max_id))
if (max_id <= table.max_id):
symbols = (token.text for token in islice(table, max_id))
else:
symbols = chain((token.text for token in table), repeat(None, (max_id - table.max_id)))
return SymbolTable(table_type=SHARED_TABLE_TYPE, symbols=symbols, name=table.name, version=version, is_substitute=True)
|
Substitutes a given shared symbol table for another version.
* If the given table has **more** symbols than the requested substitute, then the generated
symbol table will be a subset of the given table.
* If the given table has **less** symbols than the requested substitute, then the generated
symbol table will have symbols with unknown text generated for the difference.
Args:
table (SymbolTable): The shared table to derive from.
version (int): The version to target.
max_id (int): The maximum ID allocated by the substitute, must be ``>= 0``.
Returns:
SymbolTable: The synthesized table.
|
codesearchnet
|
def add_entry(self, path_object):
if ((not is_root()) and (not (self.st_mode & PERM_WRITE)) and (not self.filesystem.is_windows_fs)):
exception = (IOError if IS_PY2 else OSError)
raise exception(errno.EACCES, 'Permission Denied', self.path)
if (path_object.name in self.contents):
self.filesystem.raise_os_error(errno.EEXIST, self.path)
self.contents[path_object.name] = path_object
path_object.parent_dir = self
self.st_nlink += 1
path_object.st_nlink += 1
path_object.st_dev = self.st_dev
if (path_object.st_nlink == 1):
self.filesystem.change_disk_usage(path_object.size, path_object.name, self.st_dev)
|
Adds a child FakeFile to this directory.
Args:
path_object: FakeFile instance to add as a child of this directory.
Raises:
OSError: if the directory has no write permission (Posix only)
OSError: if the file or directory to be added already exists
|
codesearchnet
|
def nested_update(d, u):
for (k, v) in list(u.items()):
if isinstance(v, collections.Mapping):
r = nested_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
|
Merge two nested dicts.
Nested dicts are sometimes used for representing various recursive structures. When
updating such a structure, it may be convenient to present the updated data as a
corresponding recursive structure. This function will then apply the update.
Args:
d: dict
dict that will be updated in-place. May or may not contain nested dicts.
u: dict
dict with contents that will be merged into ``d``. May or may not contain
nested dicts.
|
codesearchnet
|
def __init__(
self, cipher_mode=None, initialization_vector=None, key=None, **kwargs):
if not key:
raise ValueError('Missing key.')
cipher_mode = self.ENCRYPTION_MODES.get(cipher_mode, None)
if cipher_mode is None:
raise ValueError('Unsupported cipher mode: {0!s}'.format(cipher_mode))
if cipher_mode != AES.MODE_ECB and not initialization_vector:
raise ValueError('Missing initialization vector.')
super(AESDecrypter, self).__init__()
if cipher_mode == AES.MODE_ECB:
self._aes_cipher = AES.new(key, mode=cipher_mode)
else:
self._aes_cipher = AES.new(
key, IV=initialization_vector, mode=cipher_mode)
|
Initializes a decrypter.
Args:
cipher_mode (Optional[str]): cipher mode.
initialization_vector (Optional[bytes]): initialization vector.
key (Optional[bytes]): key.
kwargs (dict): keyword arguments depending on the decrypter.
Raises:
ValueError: when key is not set, block cipher mode is not supported,
or initialization_vector is required and not set.
|
juraj-google-style
|
def _format_ase2clusgeo(obj, all_atomtypes=None):
totalAN = len(obj)
if (all_atomtypes is not None):
atomtype_set = set(all_atomtypes)
else:
atomtype_set = set(obj.get_atomic_numbers())
atomtype_lst = np.sort(list(atomtype_set))
n_atoms_per_type_lst = []
pos_lst = []
for atomtype in atomtype_lst:
condition = (obj.get_atomic_numbers() == atomtype)
pos_onetype = obj.get_positions()[condition]
n_onetype = pos_onetype.shape[0]
pos_lst.append(pos_onetype)
n_atoms_per_type_lst.append(n_onetype)
typeNs = n_atoms_per_type_lst
Ntypes = len(n_atoms_per_type_lst)
atomtype_lst
Apos = np.concatenate(pos_lst).ravel()
return (Apos, typeNs, Ntypes, atomtype_lst, totalAN)
|
Takes an ase Atoms object and returns numpy arrays and integers
which are read by the internal clusgeo. Apos is currently a flattened
out numpy array
Args:
obj():
all_atomtypes():
sort():
|
codesearchnet
|
def __init__(self, *others):
selectors = list()
heads = collections.defaultdict(set)
for other in others:
if isinstance(other, MultiFieldSelector):
for head, tail in other.heads.iteritems():
heads[head].add(tail)
elif isinstance(other, FieldSelector):
selectors.append(other)
else:
selectors.append(self.FieldSelector(other))
for selector in selectors:
chain = selector.selectors
if chain:
head = chain[0]
tail = self.FieldSelector(chain[1:]) if len(chain) > 1 else all
heads[head].add(tail)
else:
heads[None].add(all)
self.heads = dict(
(head, all if all in tail else MultiFieldSelector(*tail))
for head, tail in heads.iteritems()
) if None not in heads or heads[None] is not all else {None: all}
head_types = set(type(x) for x in self.heads)
self.has_int = int in head_types or long in head_types
self.has_string = any(issubclass(x, basestring) for x in head_types)
self.has_none = types.NoneType in head_types
self.complete = self.has_none and self.heads[None] is all
if self.has_none and (self.has_int or self.has_string):
raise ValueError(
"MultiFieldSelector cannot yet specify a list and a hash/"
"object at the same level: %r" % self.heads.keys()
)
|
Returns a MultiFieldSelector based on combining the passed-in
FieldSelector and MultiFieldSelector objects.
args:
``*others=``\ *FieldSelector*\ \|\ *iterable*
Each argument is interpreted as either a FieldSelector, or a
FieldSelector constructor.
|
juraj-google-style
|
def modify_module(channel, module_name, module_state):
gui = ui_embed.UI(channel, '{} updated'.format(module_name), '{} is now {}'.format(module_name, ('activated' if module_state else 'deactivated')), modulename=modulename)
return gui
|
Creates an embed UI containing the module modified message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
module_name (str): The name of the module that was updated
module_state (bool): The current state of the module
Returns:
embed: The created embed
|
codesearchnet
|
def to_sigproc_keyword(keyword, value=None):
keyword = bytes(keyword)
if value is None:
return np.int32(len(keyword)).tostring() + keyword
else:
dtype = header_keyword_types[keyword]
dtype_to_type = {b'<l' : np.int32,
b'str' : str,
b'<d' : np.float64,
b'angle' : to_sigproc_angle}
value_dtype = dtype_to_type[dtype]
if value_dtype is str:
return np.int32(len(keyword)).tostring() + keyword + np.int32(len(value)).tostring() + value
else:
return np.int32(len(keyword)).tostring() + keyword + value_dtype(value).tostring()
|
Generate a serialized string for a sigproc keyword:value pair
If value=None, just the keyword will be written with no payload.
Data type is inferred by keyword name (via a lookup table)
Args:
keyword (str): Keyword to write
value (None, float, str, double or angle): value to write to file
Returns:
value_str (str): serialized string to write to file.
|
juraj-google-style
|
def run(self, blocking: bool=True):
if not self._run_control_loop:
err = ("`run` called, but not using the internal control loop. Use"
" `start` instead")
raise RuntimeError(err)
self._setup()
self._heartbeat_reciever.start()
if blocking:
return self.loop.start()
else:
self._run_thread = _threading.Thread(target=self.loop.start,
daemon=True)
self._thread.run()
|
Run the internal control loop.
Args:
blocking (bool): Defaults to `True`. If set to `False`, will
intialize a thread to run the control loop.
Raises:
RuntimeError: If called and not using the internal control loop
via `self._run_control_loop`, set in the intializer of the
class
|
juraj-google-style
|
def has_cwd(state, dir, incorrect_msg='Your current working directory should be `{{dir}}`. Use `cd {{dir}}` to navigate there.'):
expr = "[[ $PWD == '{}' ]]".format(dir)
_msg = state.build_message(incorrect_msg, fmt_kwargs={'dir': dir})
has_expr_exit_code(state, expr, output='0', incorrect_msg=_msg)
return state
|
Check whether the student is in the expected directory.
This check is typically used before using ``has_expr_output()``
to make sure the student didn't navigate somewhere else.
Args:
state: State instance describing student and solution code. Can be omitted if used with ``Ex()``.
dir: Directory that the student should be in. Always use the absolute path.
incorrect_msg: If specified, this overrides the automatically generated message in
case the student is not in the expected directory.
:Example:
If you want to be sure that the student is in ``/home/repl/my_dir``: ::
Ex().has_cwd('/home/repl/my_dir')
|
codesearchnet
|
def call_for_each_replica(self, fn, args=(), kwargs=None):
distribute_lib._require_cross_replica_or_default_context_extended(self)
if kwargs is None:
kwargs = {}
map_fn = functools.partial(dtensor_util.convert_inputs_to_dtensor, mesh=self._mesh)
d_args = nest.map_structure(map_fn, args)
d_kwargs = nest.map_structure(map_fn, kwargs)
with self._container_strategy().scope():
with dtensor_util.DTensorReplicaContext(self._container_strategy()):
dtensor_result = fn(*d_args, **d_kwargs)
return nest.map_structure(dtensor_util.DTensorDistributedValue, dtensor_result)
|
Run `fn` once per replica.
This is a method that expected by the strategy base class in its `run()`.
Args:
fn: function to run (will be run once per replica).
args: Tuple or list with positional arguments for `fn`.
kwargs: Dict with keyword arguments for `fn`.
Returns:
Merged return value of `fn` across all replicas.
|
github-repos
|
def torque_on(self):
data = []
data.append(10)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(1)
data.append(96)
send_data(data)
|
Enable the torques of Herkulex
In this mode, position control and velocity control
will work.
Args:
none
|
codesearchnet
|
def dump_session_params(path):
var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
var.extend(tf.get_collection(tf.GraphKeys.MODEL_VARIABLES))
assert len(set(var)) == len(var), "TRAINABLE and MODEL variables have duplication!"
gvars = set([k.name for k in tf.global_variables()])
var = [v for v in var if v.name in gvars]
result = {}
for v in var:
result[v.name] = v.eval()
save_chkpt_vars(result, path)
|
Dump value of all TRAINABLE + MODEL variables to a dict, and save as
npz format (loadable by :func:`sessinit.get_model_loader`).
Args:
path(str): the file name to save the parameters. Must ends with npz.
|
juraj-google-style
|
def create(self, resource):
return self.service.create(
resource, self.url_prefix, self.auth, self.session,
self.session_send_opts)
|
Create the given resource.
Args:
resource (intern.resource.boss.BossResource): Create a data model object with attributes matching those of the resource.
Returns:
(intern.resource.boss.BossResource): Returns resource of type requested on success.
Raises:
requests.HTTPError on failure.
|
juraj-google-style
|
def print_info(self, buf=None, format_=FileFormat.yaml,
skip_attributes=None, include_release=False):
data = self.validated_data().copy()
data.pop("config", None)
if self.config:
if isinstance(self, Package):
config_dict = self.data.get("config")
else:
config_dict = self.parent.data.get("config")
data["config"] = config_dict
if not include_release:
skip_attributes = list(skip_attributes or []) + list(package_release_keys)
buf = buf or sys.stdout
dump_package_data(data, buf=buf, format_=format_,
skip_attributes=skip_attributes)
|
Print the contents of the package.
Args:
buf (file-like object): Stream to write to.
format_ (`FileFormat`): Format to write in.
skip_attributes (list of str): List of attributes to not print.
include_release (bool): If True, include release-related attributes,
such as 'timestamp' and 'changelog'
|
juraj-google-style
|
def testBasic(self, count, batch_size, drop_remainder, num_parallel_calls):
components = (np.arange(7), np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis], np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return (math_ops.square(x), math_ops.square(y), math_ops.square(z))
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn).repeat(count).batch(batch_size, drop_remainder, num_parallel_calls)
get_next = self.getNext(dataset)
if drop_remainder:
dim0 = batch_size
else:
dim0 = None
self.assertEqual([ts.as_list() for ts in nest.flatten(dataset_ops.get_legacy_output_shapes(dataset))], [[dim0] + list(c.shape[1:]) for c in components])
num_full_batches = count * 7
for i in range(num_full_batches):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
for j in range(batch_size):
self.assertAllEqual(component[(i * batch_size + j) % 7] ** 2, result_component[j])
if not drop_remainder and count * 7 % batch_size > 0:
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
for j in range(count * 7 % batch_size):
self.assertAllEqual(component[(num_full_batches * batch_size + j) % 7] ** 2, result_component[j])
with self.assertRaises(errors.OutOfRangeError):
result = self.evaluate(get_next())
|
Tests the batch dataset logic for various input configurations.
Args:
count: the number of input elements
batch_size: the batch size
drop_remainder: whether a smaller batch size should be produced if batch
size does not divide number of inputs evenly
num_parallel_calls: the number batches to process asynchronously in
parallel
|
github-repos
|
def process_openxml_file(filename: str,
print_good: bool,
delete_if_bad: bool) -> None:
print_bad = not print_good
try:
file_good = is_openxml_good(filename)
file_bad = not file_good
if (print_good and file_good) or (print_bad and file_bad):
print(filename)
if delete_if_bad and file_bad:
log.warning("Deleting: {}", filename)
os.remove(filename)
except Exception as e:
log.critical("Uncaught error in subprocess: {!r}\n{}", e,
traceback.format_exc())
raise
|
Prints the filename of, or deletes, an OpenXML file depending on whether
it is corrupt or not.
Args:
filename: filename to check
print_good: if ``True``, then prints the filename if the file
appears good.
delete_if_bad: if ``True``, then deletes the file if the file
appears corrupt.
|
juraj-google-style
|
def ensure_tensor_on_device(self, **inputs):
return self._ensure_tensor_on_device(inputs, self.device)
|
Ensure PyTorch tensors are on the specified device.
Args:
inputs (keyword arguments that should be `torch.Tensor`, the rest is ignored):
The tensors to place on `self.device`.
Recursive on lists **only**.
Return:
`Dict[str, torch.Tensor]`: The same as `inputs` but on the proper device.
|
github-repos
|
def insert(self, **fields):
if self.conflict_target or self.conflict_action:
compiler = self._build_insert_compiler([fields])
rows = compiler.execute_sql(return_id=True)
pk_field_name = self.model._meta.pk.name
return rows[0][pk_field_name]
return super().create(**fields).pk
|
Creates a new record in the database.
This allows specifying custom conflict behavior using .on_conflict().
If no special behavior was specified, this uses the normal Django create(..)
Arguments:
fields:
The fields of the row to create.
Returns:
The primary key of the record that was created.
|
juraj-google-style
|
def find_next(self, *strings, **kwargs):
start = kwargs.pop('start', None)
keys_only = kwargs.pop('keys_only', False)
staht = (start if (start is not None) else self.cursor)
for (start, stop) in [(staht, len(self)), (0, staht)]:
for i in range(start, stop):
for string in strings:
if (string in self[i]):
tup = (i, self[i])
self.cursor = (i + 1)
if keys_only:
return i
return tup
|
From the editor's current cursor position find the next instance of the
given string.
Args:
strings (iterable): String or strings to search for
Returns:
tup (tuple): Tuple of cursor position and line or None if not found
Note:
This function cycles the entire editor (i.e. cursor to length of
editor to zero and back to cursor position).
|
codesearchnet
|
def get_qubit_los(self, user_lo_config):
try:
_q_los = self.default_qubit_los.copy()
except KeyError:
raise PulseError('Qubit default frequencies not exist.')
for channel, lo_freq in user_lo_config.qubit_lo_dict().items():
_q_los[channel.index] = lo_freq
if _q_los == self.default_qubit_los:
return None
return _q_los
|
Embed default qubit LO frequencies from backend and format them to list object.
If configured lo frequency is the same as default, this method returns `None`.
Args:
user_lo_config (LoConfig): A dictionary of LOs to format.
Returns:
list: A list of qubit LOs.
Raises:
PulseError: when LO frequencies are missing.
|
juraj-google-style
|
def GetSysFeeAmountByHeight(self, height):
hash = self.GetBlockHash(height)
return self.GetSysFeeAmount(hash)
|
Get the system fee for the specified block.
Args:
height (int): block height.
Returns:
int:
|
codesearchnet
|
def __init__(self, orig_image, dpi, save_image):
self._shreds = None
self.orig_img = orig_image
self.save_image = save_image
self._fg_mask = None
self._shreds = None
if dpi is None:
self.res_x, self.res_y = self._guess_dpi()
else:
self.res_x, self.res_y = dpi
|
Initializes a Sheet instance.
Args:
orig_image: cv.Mat instance with the original sheet image.
dpi: optional (x resolution, y resolution) tuple or None.
If set to None, will try to guess dpi.
save_image: A callback to save debug images with args (name, img)
|
juraj-google-style
|
def getindex(self, child, recursive=True, ignore=True):
for (i, c) in enumerate(self.data):
if (c is child):
return i
if recursive:
for (i, c) in enumerate(self.data):
if (ignore is True):
try:
if (not c.auth):
continue
except AttributeError:
pass
elif ignore:
doignore = False
for e in ignore:
if (e is True):
try:
if (not c.auth):
doignore = True
break
except AttributeError:
pass
elif ((e == c.__class__) or issubclass(c.__class__, e)):
doignore = True
break
if doignore:
continue
if isinstance(c, AbstractElement):
j = c.getindex(child, recursive)
if (j != (- 1)):
return i
return (- 1)
|
Get the index at which an element occurs, recursive by default!
Returns:
int
|
codesearchnet
|
def ParseContactRow(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = TangoAndroidContactEventData()
first_name = self._GetRowValue(query_hash, row, 'first_name')
try:
decoded_text = base64_decode(first_name)
event_data.first_name = codecs.decode(decoded_text, 'utf-8')
except ValueError:
event_data.first_name = first_name
parser_mediator.ProduceExtractionWarning(
'unable to parse first name: {0:s}'.format(first_name))
last_name = self._GetRowValue(query_hash, row, 'last_name')
try:
decoded_text = base64_decode(last_name)
event_data.last_name = codecs.decode(decoded_text, 'utf-8')
except ValueError:
event_data.last_name = last_name
parser_mediator.ProduceExtractionWarning(
'unable to parse last name: {0:s}'.format(last_name))
event_data.birthday = self._GetRowValue(query_hash, row, 'birthday')
event_data.gender = self._GetRowValue(query_hash, row, 'gender')
status = self._GetRowValue(query_hash, row, 'status')
try:
decoded_text = base64_decode(status)
event_data.status = codecs.decode(decoded_text, 'utf-8')
except ValueError:
event_data.status = status
parser_mediator.ProduceExtractionWarning(
'unable to parse status: {0:s}'.format(status))
event_data.distance = self._GetRowValue(query_hash, row, 'distance')
is_friend = self._GetRowValue(query_hash, row, 'friend')
event_data.is_friend = False
if is_friend:
event_data.is_friend = True
event_data.friend_request_type = self._GetRowValue(
query_hash, row, 'friend_request_type')
friend_request_message = self._GetRowValue(
query_hash, row, 'friend_request_message')
try:
decoded_text = base64_decode(friend_request_message)
event_data.friend_request_message = codecs.decode(decoded_text, 'utf-8')
except ValueError:
event_data.friend_request_message = friend_request_message
parser_mediator.ProduceExtractionWarning(
'unable to parse status: {0:s}'.format(friend_request_message))
timestamp = self._GetRowValue(query_hash, row, 'last_active_time')
if timestamp:
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACTIVE)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'last_access_time')
if timestamp:
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'friend_request_time')
if timestamp:
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_SENT)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a contact row from the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
|
juraj-google-style
|
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, dtype, use_gpu, op_name):
x1 = self._CreateNumpyTensor(tensor_in_sizes)
x2 = self._CreateNumpyTensor(filter_in_sizes)
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if isinstance(padding, (list, tuple)):
padding = [(0, 0)] + padding + [(0, 0)]
if data_format == 'NCHW':
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
if isinstance(padding, (list, tuple)):
padding = test_util.NHWCToNCHW(padding)
if op_name == 'Conv2D':
conv = nn_ops.conv2d(t1, t2, dilations=dilations, strides=strides, padding=padding, data_format=data_format)
elif op_name == 'Conv':
conv_format = 'CHANNELS_LAST' if data_format == 'NHWC' else 'CHANNELS_FIRST'
conv_padding, explicit_paddings = nn_ops.convert_padding(padding)
conv = gen_nn_ops.conv(t1, t2, strides=strides, padding=conv_padding, explicit_paddings=explicit_paddings, data_format=conv_format, dilations=dilations)
else:
raise ValueError('Invalid op name: %s' % op_name)
self.assertEqual(conv.dtype, dtype)
if data_format == 'NCHW':
conv = test_util.NCHWToNHWC(conv)
return conv
|
Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,
input_depth, output_depth].
dilations: Dilated rate: [col_dilation, row_dilation]
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
use_gpu: True if the operations should be run on GPU
op_name: Name of the op to be tested
Returns:
Symbolic tensor value that can be used to execute the computation
|
github-repos
|
def run_inference(self, batch: Sequence[ExampleT], model: ModelT, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionT]:
while self.throttler.throttle_request(time.time() * _MILLISECOND_TO_SECOND):
self.logger.info('Delaying request for %d seconds due to previous failures', self.throttle_delay_secs)
time.sleep(self.throttle_delay_secs)
self.throttled_secs.inc(self.throttle_delay_secs)
try:
req_time = time.time()
predictions = self.request(batch, model, inference_args)
self.throttler.successful_request(req_time * _MILLISECOND_TO_SECOND)
return predictions
except Exception as e:
self.logger.error('exception raised as part of request, got %s', e)
raise
|
Runs inferences on a batch of examples. Calls a remote model for
predictions and will retry if a retryable exception is raised.
Args:
batch: A sequence of examples or features.
model: The model used to make inferences.
inference_args: Extra arguments for models whose inference call requires
extra parameters.
Returns:
An Iterable of Predictions.
|
github-repos
|
def version(msg):
tc = typecode(msg)
if tc != 31:
raise RuntimeError("%s: Not a status operation message, expecting TC = 31" % msg)
msgbin = common.hex2bin(msg)
version = common.bin2int(msgbin[72:75])
return version
|
ADS-B Version
Args:
msg (string): 28 bytes hexadecimal message string, TC = 31
Returns:
int: version number
|
juraj-google-style
|
def ConfigureLogging(
debug_output=False, filename=None, mode='w', quiet_mode=False):
for handler in logging.root.handlers:
logging.root.removeHandler(handler)
logger = logging.getLogger()
if filename and filename.endswith('.gz'):
handler = CompressedFileHandler(filename, mode=mode)
elif filename:
handler = logging.FileHandler(filename, mode=mode)
else:
handler = logging.StreamHandler()
format_string = (
'%(asctime)s [%(levelname)s] (%(processName)-10s) PID:%(process)d '
'<%(module)s> %(message)s')
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
if debug_output:
level = logging.DEBUG
elif quiet_mode:
level = logging.WARNING
else:
level = logging.INFO
logger.setLevel(level)
handler.setLevel(level)
logger.addHandler(handler)
|
Configures the logging root logger.
Args:
debug_output (Optional[bool]): True if the logging should include debug
output.
filename (Optional[str]): log filename.
mode (Optional[str]): log file access mode.
quiet_mode (Optional[bool]): True if the logging should not include
information output. Note that debug_output takes precedence over
quiet_mode.
|
juraj-google-style
|
def mask_from_embedding(emb):
return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keepdims=True))
|
Input embeddings -> padding mask.
We have hacked symbol_modality to return all-zero embeddings for padding.
Returns a mask with 0.0 in the padding positions and 1.0 elsewhere.
Args:
emb: a Tensor with shape [batch, width, height, depth].
Returns:
a 0.0/1.0 Tensor with shape [batch, width, height, 1].
|
juraj-google-style
|
def grab_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None, if_id=0):
self.freqs = self.populate_freqs()
self.timestamps = self.populate_timestamps()
if f_start is None:
f_start = self.freqs[0]
if f_stop is None:
f_stop = self.freqs[-1]
i0 = np.argmin(np.abs(self.freqs - f_start))
i1 = np.argmin(np.abs(self.freqs - f_stop))
if i0 < i1:
plot_f = self.freqs[i0:i1 + 1]
plot_data = np.squeeze(self.data[t_start:t_stop, ..., i0:i1 + 1])
else:
plot_f = self.freqs[i1:i0 + 1]
plot_data = np.squeeze(self.data[t_start:t_stop, ..., i1:i0 + 1])
return plot_f, plot_data
|
Extract a portion of data by frequency range.
Args:
f_start (float): start frequency in MHz
f_stop (float): stop frequency in MHz
if_id (int): IF input identification (req. when multiple IFs in file)
Returns:
(freqs, data) (np.arrays): frequency axis in MHz and data subset
|
juraj-google-style
|
def get_svg_layers(svg_sources):
layers = []
width, height = None, None
def extract_length(attr):
'Extract length in pixels.'
match = CRE_MM_LENGTH.match(attr)
if match:
return INKSCAPE_PPmm.magnitude * float(match.group('length'))
else:
return float(attr)
for svg_source_i in svg_sources:
xml_root = etree.parse(svg_source_i)
svg_root = xml_root.xpath('/svg:svg', namespaces=INKSCAPE_NSMAP)[0]
width = max(extract_length(svg_root.attrib['width']), width)
height = max(extract_length(svg_root.attrib['height']), height)
layers += svg_root.xpath('
namespaces=INKSCAPE_NSMAP)
for i, layer_i in enumerate(layers):
layer_i.attrib['id'] = 'layer%d' % (i + 1)
return (width, height), layers
|
Collect layers from input svg sources.
Args:
svg_sources (list) : A list of file-like objects, each containing
one or more XML layers.
Returns
-------
(width, height), layers : (int, int), list
The first item in the tuple is the shape of the largest layer, and the
second item is a list of ``Element`` objects (from :mod:`lxml.etree`
module), one per SVG layer.
|
juraj-google-style
|
def generate_sb(date: datetime.datetime, project: str, programme_block: str) -> dict:
date = date.strftime('%Y%m%d')
instance_id = randint(0, 9999)
sb_id = 'SB-{}-{}-{:04d}'.format(date, project, instance_id)
return dict(id=sb_id, project=project, programme_block=programme_block)
|
Generate a Scheduling Block data object.
Args:
date (datetime.datetime): UTC date of the SBI
project (str): Project Name
programme_block (str): Programme
Returns:
str, Scheduling Block Instance (SBI) ID.
|
codesearchnet
|
def eventFilter(self, object, event):
if (object is self.tree_scripts):
if (event.type() == QtCore.QEvent.ChildAdded):
item = self.tree_scripts.selectedItems()[0]
if not isinstance(item.value, Script):
print('ONLY SCRIPTS CAN BE DRAGGED')
return False
print(('XXX ChildAdded', self.tree_scripts.selectedItems()[0].name))
if (event.type() == QtCore.QEvent.ChildRemoved):
print(('XXX ChildRemoved', self.tree_scripts.selectedItems()[0].name))
if (event.type() == QtCore.QEvent.Drop):
print('XXX Drop')
return False
return False
|
TEMPORARY / UNDER DEVELOPMENT
THIS IS TO ALLOW COPYING OF PARAMETERS VIA DRAP AND DROP
Args:
object:
event:
Returns:
|
juraj-google-style
|
def _AddToTree(self, x, prevx):
self.s.add(x)
self.prev[x] = prevx
for y in self.right:
slack = self._CalcSlack(x, y)
if slack < self.slack[y]:
self.slack[y] = slack
self.slackx[y] = x
|
Adds |x| to the current augmenting tree.
x is a node which has already been matched to a node y in Right (which is
itself connected to prevx via a non-matching edge in the equality subgraph).
We indicate prevx comes before x in the tree so we can trace the path later.
Args:
x: Node which has already been matched to a node y in right
prevx: Previous node in Left along the path.
|
github-repos
|
def get_all_without_ethernet(self, start=0, count=(- 1), filter='', sort=''):
without_ethernet_client = ResourceClient(self._connection, '/rest/logical-downlinks/withoutEthernet')
return without_ethernet_client.get_all(start, count, filter=filter, sort=sort)
|
Gets a paginated collection of logical downlinks without ethernet. The collection is
based on optional sorting and filtering and is constrained by start and count parameters.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
dict
|
codesearchnet
|
def plot_path(line, lattice=None, coords_are_cartesian=False, ax=None, **kwargs):
(ax, fig, plt) = get_ax3d_fig_plt(ax)
if ('color' not in kwargs):
kwargs['color'] = 'r'
if ('linewidth' not in kwargs):
kwargs['linewidth'] = 3
for k in range(1, len(line)):
vertex1 = line[(k - 1)]
vertex2 = line[k]
if (not coords_are_cartesian):
if (lattice is None):
raise ValueError('coords_are_cartesian False requires the lattice')
vertex1 = lattice.get_cartesian_coords(vertex1)
vertex2 = lattice.get_cartesian_coords(vertex2)
ax.plot(*zip(vertex1, vertex2), **kwargs)
return (fig, ax)
|
Adds a line passing through the coordinates listed in 'line' to a matplotlib Axes
Args:
line: list of coordinates.
lattice: Lattice object used to convert from reciprocal to cartesian coordinates
coords_are_cartesian: Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
Requires lattice if False.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to red
and linewidth to 3.
Returns:
matplotlib figure and matplotlib ax
|
codesearchnet
|
def _maybe_init_run(self, experiment_name, run_name):
experiment_id = self._maybe_init_experiment(experiment_name)
cursor = self._db.cursor()
cursor.execute(
,
(experiment_id, run_name))
row = cursor.fetchone()
if row:
return row[0]
run_id = self._create_id()
started_time = 0
cursor.execute(
,
(experiment_id, run_id, run_name, time.time(), started_time))
return run_id
|
Returns the ID for the given run, creating the row if needed.
Args:
experiment_name: name of experiment containing this run.
run_name: name of run.
|
juraj-google-style
|
def format(self, exclude_class=False):
if exclude_class:
msg = self.msg
else:
msg = ('%s: %s' % (self.__class__.__name__, self.msg))
if (len(self.params) != 0):
paramstring = '\n'.join([((str(key) + ': ') + str(val)) for (key, val) in self.params.items()])
msg += ('\nAdditional Information:\n' + paramstring)
return msg
|
Format this exception as a string including class name.
Args:
exclude_class (bool): Whether to exclude the exception class
name when formatting this exception
Returns:
string: a multiline string with the message, class name and
key value parameters passed to create the exception.
|
codesearchnet
|
def _process_image_files(name, filenames, texts, labels, num_shards):
assert (len(filenames) == len(texts))
assert (len(filenames) == len(labels))
spacing = np.linspace(0, len(filenames), (FLAGS.num_threads + 1)).astype(np.int)
ranges = []
for i in range((len(spacing) - 1)):
ranges.append([spacing[i], spacing[(i + 1)]])
print(('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges)))
sys.stdout.flush()
coord = tf.train.Coordinator()
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames, texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
coord.join(threads)
print(('%s: Finished writing all %d images in data set.' % (datetime.now(), len(filenames))))
sys.stdout.flush()
|
Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
|
codesearchnet
|
def delete_permissions(self, grp_name, resource):
self.project_service.set_auth(self._token_project)
self.project_service.delete_permissions(grp_name, resource)
|
Removes permissions from the group for the given resource.
Args:
grp_name (string): Name of group.
resource (intern.resource.boss.Resource): Identifies which data
model object to operate on.
Raises:
requests.HTTPError on failure.
|
juraj-google-style
|
def get_range(self, name_prefix, vlan_id_range):
filter = '"\'name\' matches \'{}\\_%\'"'.format(name_prefix)
ethernet_networks = self.get_all(filter=filter, sort='vlanId:ascending')
vlan_ids = self.dissociate_values_or_ranges(vlan_id_range)
for net in ethernet_networks[:]:
if (int(net['vlanId']) not in vlan_ids):
ethernet_networks.remove(net)
return ethernet_networks
|
Gets a list of Ethernet Networks that match the 'given name_prefix' and the 'vlan_id_range'.
Examples:
>>> enet.get_range('Enet_name', '1-2,5')
# The result contains the ethernet network with names:
['Enet_name_1', 'Enet_name_2', 'Enet_name_5']
>>> enet.get_range('Enet_name', '2')
# The result contains the ethernet network with names:
['Enet_name_1', 'Enet_name_2']
Args:
name_prefix: The Ethernet Network prefix
vlan_id_range: A combination of values or ranges to be retrieved. For example, '1-10,50,51,500-700'.
Returns:
list: A list of Ethernet Networks.
|
codesearchnet
|
def _get_element_attr_or_none(document, selector, attribute):
element = document.cssselect(selector)
if element:
return element[0].get(attribute)
return None
|
Using a CSS selector, get the element and return the given attribute value, or None if no element.
Args:
document (HTMLElement) - HTMLElement document
selector (str) - CSS selector
attribute (str) - The attribute to get from the element
|
codesearchnet
|
def _set_bearer_user_vars_local(token, allowed_client_ids, scopes):
result = urlfetch.fetch(
'%s?%s' % (_TOKENINFO_URL, urllib.urlencode({'access_token': token})))
if result.status_code != 200:
try:
error_description = json.loads(result.content)['error_description']
except (ValueError, KeyError):
error_description = ''
_logger.error('Token info endpoint returned status %s: %s',
result.status_code, error_description)
return
token_info = json.loads(result.content)
if 'email' not in token_info:
_logger.warning('Oauth token doesn\'t include an email address.')
return
if token_info.get('email_verified') != 'true':
_logger.warning('Oauth token email isn\'t verified.')
return
client_id = token_info.get('azp')
if (list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK and
client_id not in allowed_client_ids):
_logger.warning('Client ID is not allowed: %s', client_id)
return
_, sufficient_scopes = _process_scopes(scopes)
authorized_scopes = token_info.get('scope', '').split(' ')
if not _are_scopes_sufficient(authorized_scopes, sufficient_scopes):
_logger.warning('Oauth token scopes don\'t match any acceptable scopes.')
return
os.environ[_ENV_AUTH_EMAIL] = token_info['email']
os.environ[_ENV_AUTH_DOMAIN] = ''
_logger.debug('Local dev returning user from token.')
|
Validate the oauth bearer token on the dev server.
Since the functions in the oauth module return only example results in local
development, this hits the tokeninfo endpoint and attempts to validate the
token. If it's valid, we'll set _ENV_AUTH_EMAIL and _ENV_AUTH_DOMAIN so we
can get the user from the token.
Args:
token: String with the oauth token to validate.
allowed_client_ids: List of client IDs that are acceptable.
scopes: List of acceptable scopes.
|
juraj-google-style
|
def search(self, scope, search, **kwargs):
data = {'scope': scope, 'search': search}
return self.http_list('/search', query_data=data, **kwargs)
|
Search GitLab resources matching the provided string.'
Args:
scope (str): Scope of the search
search (str): Search string
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabSearchError: If the server failed to perform the request
Returns:
GitlabList: A list of dicts describing the resources found.
|
codesearchnet
|
def monkhorst(cls, ngkpt, shiftk=(0.5, 0.5, 0.5), chksymbreak=None, use_symmetries=True, use_time_reversal=True, comment=None):
return cls(kpts=[ngkpt], kpt_shifts=shiftk, use_symmetries=use_symmetries, use_time_reversal=use_time_reversal, chksymbreak=chksymbreak, comment=(comment if comment else 'Monkhorst-Pack scheme with user-specified shiftk'))
|
Convenient static constructor for a Monkhorst-Pack mesh.
Args:
ngkpt: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.
shiftk: Shift to be applied to the kpoints.
use_symmetries: Use spatial symmetries to reduce the number of k-points.
use_time_reversal: Use time-reversal symmetry to reduce the number of k-points.
Returns:
:class:`KSampling` object.
|
codesearchnet
|
def save_args(conditions, out_path):
if isinstance(conditions, argparse.Namespace):
args = vars(conditions)
else:
args = conditions
try:
os.makedirs(out_path)
except OSError:
pass
with tempdir(prefix='args', dir=out_path) as tempd:
path = os.path.join(tempd, 'args.json')
with open(path, 'w') as f:
json.dump(args, f, indent=4)
new_path = os.path.join(out_path, 'args')
shutil.move(path, new_path)
|
A util function to save experiment condition for job table.
Args:
conditions (:class:`argparse.Namespace` or dict): Experiment conditions
to show on a job table. Keys are show as table header and values
are show at a job row.
out_path (str): Output directory name to save conditions.
|
codesearchnet
|
def _optimize_tf_model(self, graph_def, input_tensors, output_tensors, quant_mode):
if self.saved_model_dir or quant_mode.is_quantization_aware_trained_model():
return graph_def
try:
graph = _convert_to_constants.disable_lower_using_switch_merge(graph_def)
optimized_graph = _run_graph_optimizations(graph, input_tensors, output_tensors, config=self._grappler_config(['function']))
return optimized_graph
except Exception:
return graph_def
|
Run a Grappler pass to optimize the TensorFlow graph.
Args:
graph_def: Frozen GraphDef to be optimized.
input_tensors: List of input tensors.
output_tensors: List of output tensors.
quant_mode: the quantization mode.
Returns:
The optimized TensorFlow graph.
|
github-repos
|
def __init__(self, message, raises=False):
super(CustodianError, self).__init__(message)
self.raises = raises
self.message = message
|
Initializes the error with a message.
Args:
message (str): Message passed to Exception
raises (bool): Whether this should be raised outside custodian
|
juraj-google-style
|
def __init__(self, dfk, *args, threshold=20, interval=5):
self.dfk = dfk
self.threshold = threshold
self.interval = interval
self.cb_args = args
self.strategy = Strategy(dfk)
self.callback = self.strategy.strategize
self._handle = None
self._event_count = 0
self._event_buffer = []
self._wake_up_time = time.time() + 1
self._kill_event = threading.Event()
self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))
self._thread.daemon = True
self._thread.start()
|
Initialize the flowcontrol object.
We start the timer thread here
Args:
- dfk (DataFlowKernel) : DFK object to track parsl progress
KWargs:
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
|
juraj-google-style
|
def send_to_prv_exchange(self, user_id, message=None):
exchange = ('prv_%s' % user_id.lower())
msg = json.dumps(message, cls=ZEngineJSONEncoder)
log.debug(('Sending following users "%s" exchange:\n%s ' % (exchange, msg)))
self.get_channel().publish(exchange=exchange, routing_key='', body=msg)
|
Send messages through logged in users private exchange.
Args:
user_id string: User key
message dict: Message object
|
codesearchnet
|
def __init__(self, graph, name=None):
if not isinstance(graph, BipartiteGraph):
raise ValueError(
"Given graph is not instance of Bipartite:", graph)
self._graph = graph
if name:
self.name = name
else:
self.name = super(_Node, self).__str__()
self._hash = None
|
Construct a new node.
Args:
name: Specifying the name of this node.
If not given, use strings returned from __str__ method.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.