code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def patch(self, payload, append_to_arrays=True):
if (not isinstance(payload, dict)):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
payload = self.__class__.set_id_in_fkeys(payload)
if append_to_arrays:
for key in payload:
val = payload[key]
if (type(val) == list):
val.extend(getattr(self, key))
payload[key] = list(set(val))
payload = self.check_boolean_fields(payload)
payload = self.__class__.add_model_name_to_payload(payload)
self.debug_logger.debug('PATCHING payload {}'.format(json.dumps(payload, indent=4)))
res = requests.patch(url=self.record_url, json=payload, headers=HEADERS, verify=False)
self.write_response_html_to_file(res, 'bob.html')
res.raise_for_status()
json_res = res.json()
self.debug_logger.debug('Success')
self.attrs = json_res
return json_res
|
Patches current record and udpates the current instance's 'attrs'
attribute to reflect the new changes.
Args:
payload - hash. This will be JSON-formatted prior to sending the request.
Returns:
`dict`. The JSON formatted response.
Raises:
`requests.exceptions.HTTPError`: The status code is not ok.
|
codesearchnet
|
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
self.application_namespace.write(tstream, kmip_version=kmip_version)
self.application_data.write(tstream, kmip_version=kmip_version)
self.length = tstream.length()
super(ApplicationSpecificInformation, self).write(
ostream,
kmip_version=kmip_version
)
ostream.write(tstream.buffer)
|
Write the data encoding the ApplicationSpecificInformation object to a
stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
|
juraj-google-style
|
def add_annotation(self, state_or_vector, text, **kwargs):
if (isinstance(state_or_vector, (list, np.ndarray, tuple)) and (len(state_or_vector) == 3)):
vec = state_or_vector
else:
raise Exception(('Position needs to be specified by a qubit ' + 'state or a 3D vector.'))
self.annotations.append({'position': vec, 'text': text, 'opts': kwargs})
|
Add a text or LaTeX annotation to Bloch sphere,
parametrized by a qubit state or a vector.
Args:
state_or_vector (array_like):
Position for the annotation.
Qobj of a qubit or a vector of 3 elements.
text (str):
Annotation text.
You can use LaTeX, but remember to use raw string
e.g. r"$\\langle x \\rangle$"
or escape backslashes
e.g. "$\\\\langle x \\\\rangle$".
**kwargs:
Options as for mplot3d.axes3d.text, including:
fontsize, color, horizontalalignment, verticalalignment.
Raises:
Exception: If input not array_like or tuple.
|
codesearchnet
|
def put(self, resource_json: Dict[str, Any], parent_bundle: Optional[Dict[str, Any]]=None) -> None:
if parent_bundle is None:
self.resources_by_uri[resource_json['url']] = resource_json
else:
self.resources_by_uri[resource_json['url']] = parent_bundle
|
Puts the given resource into this collection.
Adds the resource represented by `resource_json` found inside
`parent_bundle` into this collection for subsequent lookup via the Get
method. `parent_bundle` may be None if `resource_json` is not located inside
a bundle.
Args:
resource_json: The JSON object representing the resource.
parent_bundle: The bundle `resource_json` is located inside, if any.
|
github-repos
|
def extract_images(self, f):
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = self._read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' %
(magic, f.name))
num_images = self._read32(bytestream)
rows = self._read32(bytestream)
cols = self._read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
|
Extract the images into a 4D uint8 numpy array [index, y, x, depth].
Args:
f: A file object that can be passed into a gzip reader.
Returns:
data: A 4D unit8 numpy array [index, y, x, depth].
Raises:
ValueError: If the bytestream does not start with 2051.
|
juraj-google-style
|
def get_stream_action_type(stream_arn):
stream_type_map = {'kinesis': awacs.kinesis.Action, 'dynamodb': awacs.dynamodb.Action}
stream_type = stream_arn.split(':')[2]
try:
return stream_type_map[stream_type]
except KeyError:
raise ValueError(("Invalid stream type '%s' in arn '%s'" % (stream_type, stream_arn)))
|
Returns the awacs Action for a stream type given an arn
Args:
stream_arn (str): The Arn of the stream.
Returns:
:class:`awacs.aws.Action`: The appropriate stream type awacs Action
class
Raises:
ValueError: If the stream type doesn't match kinesis or dynamodb.
|
codesearchnet
|
def __fill_buffer(self, size=0):
read_size = min(max(size, self.__buffer_size), MAX_BLOB_FETCH_SIZE)
self.__buffer = fetch_data(self.__blob_key, self.__position, ((self.__position + read_size) - 1))
self.__buffer_position = 0
self.__eof = (len(self.__buffer) < read_size)
|
Fills the internal buffer.
Args:
size: Number of bytes to read. Will be clamped to
[self.__buffer_size, MAX_BLOB_FETCH_SIZE].
|
codesearchnet
|
def _peek(self, chars=1):
line = self._socket.recv(chars, socket.MSG_PEEK)
logger.debug('Server sent (peek): ' + line.rstrip())
return line
|
Peek at the data in the server response.
Peeking should only be done when the response can be predicted.
Make sure that the socket will not block by requesting too
much data from it while peeking.
Args:
chars -- the number of characters to peek.
|
juraj-google-style
|
def subgroup_tile(cls, tile_assignment, subgroup_modes):
if not isinstance(tile_assignment, _np.ndarray):
raise TypeError('SubgroupTile assignment must be of type np.ndarray')
if not isinstance(subgroup_modes, list):
raise TypeError('subgroup_modes in subgroup manual must be of type list')
if len(tile_assignment.shape) < len(subgroup_modes):
raise TypeError('SubgroupTile assignment must have rank larger than length of subgroup_modes')
for sharding_type in subgroup_modes:
if sharding_type not in [xla_data_pb2.OpSharding.REPLICATED, xla_data_pb2.OpSharding.MANUAL]:
raise TypeError('Each sharding_type in subgroup_modes in subgroup manual must be of type xla_data_pb2.OpSharding.REPLICATED or xla_data_pb2.OpSharding.MANUAL')
dims = list(tile_assignment.shape)
flattened_devices = tile_assignment.reshape(-1, order='C')
return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.OTHER, tile_assignment_dimensions=dims, tile_assignment_devices=list(flattened_devices), last_tile_dims=list(subgroup_modes)))
|
Returns a subgroup manual sharding attribute.
This is similar to tile(), but tile_assignment has one or more dimension
than the tensor, and subgroup_modes define the sharding types in the last
dimensions of tile_assignment.
Args:
tile_assignment: An np.ndarray describing the topology of the tiling and
which device will compute which part of the topology.
subgroup_modes: sharding types for the dimension more than the tensor
shape rank.
Raises:
TypeError: tile_assignment was not of np.array type or subgroup_modes
has unsupported sharding type.
|
github-repos
|
def all_tokens(self, delimiter=' '):
tokens = set()
for label in self:
tokens = tokens.union(set(label.tokenized(delimiter=delimiter)))
return tokens
|
Return a list of all tokens occurring in the label-list.
Args:
delimiter (str): The delimiter used to split labels into tokens
(see :meth:`audiomate.annotations.Label.tokenized`).
Returns:
:class:`set`: A set of distinct tokens.
|
codesearchnet
|
def initialize(self):
return self._initializer
|
Initialize underlying iterators.
Returns:
A list of any initializer ops that should be run.
|
github-repos
|
def residual_block_v1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
if conv_shortcut:
shortcut = layers.Conv2D(4 * filters, 1, strides=stride, name=name + '_0_conv')(x)
shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_0_bn')(shortcut)
else:
shortcut = x
x = layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_1_bn')(x)
x = layers.Activation('relu', name=name + '_1_relu')(x)
x = layers.Conv2D(filters, kernel_size, padding='SAME', name=name + '_2_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_2_bn')(x)
x = layers.Activation('relu', name=name + '_2_relu')(x)
x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_3_bn')(x)
x = layers.Add(name=name + '_add')([shortcut, x])
x = layers.Activation('relu', name=name + '_out')(x)
return x
|
A residual block for ResNet*_v1.
Args:
x: Input tensor.
filters: No of filters in the bottleneck layer.
kernel_size: Kernel size of the bottleneck layer. Defaults to `3`.
stride: Stride of the first layer. Defaults to `1`.
conv_shortcut: Use convolution shortcut if `True`, otherwise
use identity shortcut. Defaults to `True`
name(optional): Name of the block
Returns:
Output tensor for the residual block.
|
github-repos
|
def reorder_resources(self, resource_ids, hxl_update=True):
dataset_id = self.data.get('id')
if (not dataset_id):
raise HDXError('Dataset has no id! It must be read, created or updated first.')
data = {'id': dataset_id, 'order': resource_ids}
self._write_to_hdx('reorder', data, 'package_id')
if hxl_update:
self.hxl_update()
|
Reorder resources in dataset according to provided list.
If only some resource ids are supplied then these are
assumed to be first and the other resources will stay in
their original order.
Args:
resource_ids (List[str]): List of resource ids
hxl_update (bool): Whether to call package_hxl_update. Defaults to True.
Returns:
None
|
codesearchnet
|
def merge_and_fit(self, segment):
self.points = sort_segment_points(self.points, segment.points)
return self
|
Merges another segment with this one, ordering the points based on a
distance heuristic
Args:
segment (:obj:`Segment`): Segment to merge with
Returns:
:obj:`Segment`: self
|
juraj-google-style
|
def remove_item(self, item):
for idx, _item in enumerate(self.items):
if item == _item:
del self.items[idx]
return True
return False
|
Remove the specified item from the menu.
Args:
item (MenuItem): the item to be removed.
Returns:
bool: True if the item was removed; False otherwise.
|
juraj-google-style
|
def __expand_meta_datas(meta_datas, meta_datas_expanded):
if isinstance(meta_datas, dict):
meta_datas_expanded.append(meta_datas)
elif isinstance(meta_datas, list):
for meta_data in meta_datas:
__expand_meta_datas(meta_data, meta_datas_expanded)
|
expand meta_datas to one level
Args:
meta_datas (dict/list): maybe in nested format
Returns:
list: expanded list in one level
Examples:
>>> meta_datas = [
[
dict1,
dict2
],
dict3
]
>>> meta_datas_expanded = []
>>> __expand_meta_datas(meta_datas, meta_datas_expanded)
>>> print(meta_datas_expanded)
[dict1, dict2, dict3]
|
codesearchnet
|
def UpdateChainAndProcess(self, parser_mediator, registry_key, **kwargs):
parser_mediator.AppendToParserChain(self)
try:
self.Process(parser_mediator, registry_key, **kwargs)
finally:
parser_mediator.PopFromParserChain()
|
Updates the parser chain and processes a Windows Registry key or value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Raises:
ValueError: If the Windows Registry key is not set.
|
codesearchnet
|
def draw(self, filename, color=True):
verify_dependencies(['pgv'])
if not hasattr(self, '_results'):
raise RuntimeError("Graph cannot be drawn before it is executed. "
"Try calling run() first.")
g = pgv.AGraph(directed=True)
g.node_attr['colorscheme'] = 'set312'
for elem in self._results:
if not hasattr(elem, 'history'):
continue
log = elem.history
while log:
source_from = log.parent[6] if log.parent else ''
s_node = hash((source_from, log[2]))
s_color = stim_list.index(log[2])
s_color = s_color % 12 + 1
t_node = hash((log[6], log[7]))
t_style = 'filled,' if color else ''
t_style += 'dotted' if log.implicit else ''
if log[6].endswith('Extractor'):
t_color = '
elif log[6].endswith('Filter'):
t_color = '
else:
t_color = '
r_node = hash((log[6], log[5]))
r_color = stim_list.index(log[5])
r_color = r_color % 12 + 1
if color:
g.add_node(s_node, label=log[2], shape='ellipse',
style='filled', fillcolor=s_color)
g.add_node(t_node, label=log[6], shape='box',
style=t_style, fillcolor=t_color)
g.add_node(r_node, label=log[5], shape='ellipse',
style='filled', fillcolor=r_color)
else:
g.add_node(s_node, label=log[2], shape='ellipse')
g.add_node(t_node, label=log[6], shape='box',
style=t_style)
g.add_node(r_node, label=log[5], shape='ellipse')
g.add_edge(s_node, t_node, style=t_style)
g.add_edge(t_node, r_node, style=t_style)
log = log.parent
g.draw(filename, prog='dot')
|
Render a plot of the graph via pygraphviz.
Args:
filename (str): Path to save the generated image to.
color (bool): If True, will color graph nodes based on their type,
otherwise will draw a black-and-white graph.
|
juraj-google-style
|
def update_script(self, script_body):
uri = "{}/script".format(self.data['uri'])
return self._helper.update(script_body, uri=uri)
|
Updates the configuration script of the enclosure-group with the specified URI.
Args:
id_or_uri: Resource id or resource uri.
script_body: Configuration script.
Returns:
dict: Updated enclosure group.
|
juraj-google-style
|
def map(self, map_fn: Callable[..., _Tout], *trees: Tree[_Tin], is_leaf: Optional[LeafFn]=None) -> Tree[_Tout]:
return self.backend.map(map_fn, *trees, is_leaf=is_leaf)
|
Same as `tree.map_structure`.
Args:
map_fn: Worker function
*trees: Nested input to pass to the `map_fn`
is_leaf: Don't recurse into leaf if `is_leaf(node)` is `True`
Returns:
The nested structure after `map_fn` has been applied.
|
github-repos
|
def fit_effective_mass(distances, energies, parabolic=True):
if parabolic:
fit = np.polyfit(distances, energies, 2)
c = (2 * fit[0])
else:
def f(x, alpha, d):
top = (np.sqrt(((((4 * alpha) * d) * (x ** 2)) + 1)) - 1)
bot = (2 * alpha)
return (top / bot)
bounds = ((1e-08, (- np.inf)), (np.inf, np.inf))
(popt, _) = curve_fit(f, distances, energies, p0=[1.0, 1.0], bounds=bounds)
c = (2 * popt[1])
eff_mass = (((angstrom_to_bohr ** 2) / eV_to_hartree) / c)
return eff_mass
|
Fit the effective masses using either a parabolic or nonparabolic fit.
Args:
distances (:obj:`numpy.ndarray`): The x-distances between k-points in
reciprocal Angstroms, normalised to the band extrema.
energies (:obj:`numpy.ndarray`): The band eigenvalues normalised to the
eigenvalue of the band extrema.
parabolic (:obj:`bool`, optional): Use a parabolic fit of the band
edges. If ``False`` then nonparabolic fitting will be attempted.
Defaults to ``True``.
Returns:
float: The effective mass in units of electron rest mass, :math:`m_0`.
|
codesearchnet
|
def _GetAttributeContainerByIndex(self, container_type, index):
sequence_number = (index + 1)
query = 'SELECT _data FROM {0:s} WHERE rowid = {1:d}'.format(container_type, sequence_number)
try:
self._cursor.execute(query)
except sqlite3.OperationalError as exception:
raise IOError('Unable to query storage file with error: {0!s}'.format(exception))
row = self._cursor.fetchone()
if row:
identifier = identifiers.SQLTableIdentifier(container_type, sequence_number)
if (self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB):
serialized_data = zlib.decompress(row[0])
else:
serialized_data = row[0]
if self._storage_profiler:
self._storage_profiler.Sample('read', container_type, len(serialized_data), len(row[0]))
attribute_container = self._DeserializeAttributeContainer(container_type, serialized_data)
attribute_container.SetIdentifier(identifier)
return attribute_container
count = self._CountStoredAttributeContainers(container_type)
index -= count
serialized_data = self._GetSerializedAttributeContainerByIndex(container_type, index)
attribute_container = self._DeserializeAttributeContainer(container_type, serialized_data)
if attribute_container:
identifier = identifiers.SQLTableIdentifier(container_type, sequence_number)
attribute_container.SetIdentifier(identifier)
return attribute_container
|
Retrieves a specific attribute container.
Args:
container_type (str): attribute container type.
index (int): attribute container index.
Returns:
AttributeContainer: attribute container or None if not available.
Raises:
IOError: when there is an error querying the storage file.
OSError: when there is an error querying the storage file.
|
codesearchnet
|
def _runDecodeProtoTests(self, fields, case_sizes, batch_shape, batch, message_type, message_format, sanitize, force_disordered=False):
if force_disordered:
assert not sanitize
extra_fields = test_example_pb2.ExtraFields()
extra_fields.string_value = 'IGNORE ME'
extra_fields.bool_value = False
extra_msg = extra_fields.SerializeToString()
batch = [extra_msg + msg for msg in batch]
batch = np.array(batch, dtype=object)
batch = np.reshape(batch, batch_shape)
field_names = [f.name for f in fields]
output_types = [f.dtype for f in fields]
with self.cached_session() as sess:
sizes, vtensor = self._decode_module.decode_proto(batch, message_type=message_type, field_names=field_names, output_types=output_types, message_format=message_format, sanitize=sanitize)
vlist = sess.run([sizes] + vtensor)
sizes = vlist[0]
value_tensors = vlist[1:]
self.assertTrue(np.all(np.array(sizes.shape) == batch_shape + [len(field_names)]))
self.assertEqual(len(sizes.flat), len(case_sizes))
self.assertTrue(np.all(sizes.flat == np.array(case_sizes, dtype=np.int32)))
field_dict = dict(zip(field_names, value_tensors))
self._compareProtos(batch_shape, sizes, fields, field_dict)
|
Run decode tests on a batch of messages.
Args:
fields: list of test_example_pb2.FieldSpec (types and expected values)
case_sizes: expected sizes array
batch_shape: the shape of the input tensor of serialized messages
batch: list of serialized messages
message_type: descriptor name for messages
message_format: format of messages, 'text' or 'binary'
sanitize: whether to sanitize binary protobuf inputs
force_disordered: whether to force fields encoded out of order.
|
github-repos
|
def ReadManyFromPath(filepath):
with io.open(filepath, mode="r", encoding="utf-8") as filedesc:
return ReadManyFromFile(filedesc)
|
Reads a Python object stored in a specified YAML file.
Args:
filepath: A filepath to the YAML file.
Returns:
A Python data structure corresponding to the YAML in the given file.
|
juraj-google-style
|
def console_print(con: tcod.console.Console, x: int, y: int, fmt: str) -> None:
lib.TCOD_console_printf(_console(con), x, y, _fmt(fmt))
|
Print a color formatted string on a console.
Args:
con (Console): Any Console instance.
x (int): Character x position from the left.
y (int): Character y position from the top.
fmt (AnyStr): A unicode or bytes string optionaly using color codes.
.. deprecated:: 8.5
Use :any:`Console.print_` instead.
|
juraj-google-style
|
def set_clbit(self, clbit, element):
self.clbit_layer[self.cregs.index(clbit)] = element
|
Sets the clbit to the element
Args:
clbit (cbit): Element of self.cregs.
element (DrawElement): Element to set in the clbit
|
juraj-google-style
|
def auto_shard_dataset(dataset, num_shards, index, num_replicas_in_sync=None):
if isinstance(dataset, distribute_types.DistributedDatasetInterface):
return dataset.auto_shard(num_shards, index)
if dataset.options().experimental_distribute.auto_shard_policy != AutoShardPolicy.OFF:
if num_replicas_in_sync is None:
num_replicas_in_sync = 1
if isinstance(dataset, data_types.DatasetV1):
return distribute._AutoShardDatasetV1(dataset, num_shards, index, num_replicas_in_sync)
else:
return distribute._AutoShardDataset(dataset, num_shards, index, num_replicas_in_sync)
else:
return dataset
|
Shard the input pipeline by sharding the underlying list of files.
Args:
dataset: A `tf.data.Dataset` instance, typically the result of a bunch of
dataset transformations.
num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of
shards operating in parallel. Same usage as in `tf.data.Dataset.shard`.
index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.
Same usage as in `tf.data.Dataset.shard`.
num_replicas_in_sync: An integer representing the total number of replicas
across all workers. This is used in the rewrite when sharding by data.
Returns:
A modified `Dataset` obtained by updating the pipeline sharded by the
files. The input dataset will be returned if we cannot automatically
determine a good way to shard the input dataset.
|
github-repos
|
def _maybe_download_corpora(tmp_dir, dataset_split):
cnn_filename = "cnn_stories.tgz"
cnn_finalpath = os.path.join(tmp_dir, "cnn/stories/")
dailymail_filename = "dailymail_stories.tgz"
dailymail_finalpath = os.path.join(tmp_dir, "dailymail/stories/")
if not tf.gfile.Exists(cnn_finalpath):
cnn_file = generator_utils.maybe_download_from_drive(
tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL)
with tarfile.open(cnn_file, "r:gz") as cnn_tar:
cnn_tar.extractall(tmp_dir)
if not tf.gfile.Exists(dailymail_finalpath):
dailymail_file = generator_utils.maybe_download_from_drive(
tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL)
with tarfile.open(dailymail_file, "r:gz") as dailymail_tar:
dailymail_tar.extractall(tmp_dir)
cnn_files = tf.gfile.Glob(cnn_finalpath + "*")
dailymail_files = tf.gfile.Glob(dailymail_finalpath + "*")
all_files = cnn_files + dailymail_files
if dataset_split == problem.DatasetSplit.TRAIN:
urls_path = generator_utils.maybe_download(tmp_dir, "all_train.txt",
_TRAIN_URLS)
elif dataset_split == problem.DatasetSplit.EVAL:
urls_path = generator_utils.maybe_download(tmp_dir, "all_val.txt",
_DEV_URLS)
else:
urls_path = generator_utils.maybe_download(tmp_dir, "all_test.txt",
_TEST_URLS)
return all_files, urls_path
|
Download corpora if necessary and unzip them.
Args:
tmp_dir: directory containing dataset.
dataset_split: whether we're in train/dev/test mode.
Returns:
List of all files generated and path to file containing
train/dev/test split info.
|
juraj-google-style
|
def AddAnalysisReport(self, analysis_report):
self._RaiseIfNotWritable()
analysis_report = self._PrepareAttributeContainer(analysis_report)
self.analysis_reports.append(analysis_report)
|
Adds an analysis report.
Args:
analysis_report (AnalysisReport): analysis report.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed.
|
juraj-google-style
|
def CompileFilter(self, filter_expression):
filter_parser = pfilter.BaseParser(filter_expression).Parse()
matcher = filter_parser.Compile(pfilter.PlasoAttributeFilterImplementation)
self._filter_expression = filter_expression
self._matcher = matcher
|
Compiles the filter expression.
The filter expression contains an object filter expression.
Args:
filter_expression (str): filter expression.
Raises:
ParseError: if the filter expression cannot be parsed.
|
codesearchnet
|
def history(self, image):
res = self._get(self._url('/images/{0}/history', image))
return self._result(res, True)
|
Show the history of an image.
Args:
image (str): The image to show history for
Returns:
(str): The history of the image
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def run(self, data, max_epochs=1):
self.state = State(dataloader=data, epoch=0, max_epochs=max_epochs, metrics={})
try:
self._logger.info("Engine run starting with max_epochs={}.".format(max_epochs))
start_time = time.time()
self._fire_event(Events.STARTED)
while self.state.epoch < max_epochs and not self.should_terminate:
self.state.epoch += 1
self._fire_event(Events.EPOCH_STARTED)
hours, mins, secs = self._run_once_on_dataset()
self._logger.info("Epoch[%s] Complete. Time taken: %02d:%02d:%02d", self.state.epoch, hours, mins, secs)
if self.should_terminate:
break
self._fire_event(Events.EPOCH_COMPLETED)
self._fire_event(Events.COMPLETED)
time_taken = time.time() - start_time
hours, mins, secs = _to_hours_mins_secs(time_taken)
self._logger.info("Engine run complete. Time taken %02d:%02d:%02d" % (hours, mins, secs))
except BaseException as e:
self._logger.error("Engine run is terminating due to exception: %s.", str(e))
self._handle_exception(e)
return self.state
|
Runs the process_function over the passed data.
Args:
data (Iterable): Collection of batches allowing repeated iteration (e.g., list or `DataLoader`).
max_epochs (int, optional): max epochs to run for (default: 1).
Returns:
State: output state.
|
juraj-google-style
|
def union_of_bboxes(height, width, bboxes, erosion_rate=0.0, to_int=False):
x1, y1 = width, height
x2, y2 = 0, 0
for b in bboxes:
w, h = b[2] - b[0], b[3] - b[1]
lim_x1, lim_y1 = b[0] + erosion_rate * w, b[1] + erosion_rate * h
lim_x2, lim_y2 = b[2] - erosion_rate * w, b[3] - erosion_rate * h
x1, y1 = np.min([x1, lim_x1]), np.min([y1, lim_y1])
x2, y2 = np.max([x2, lim_x2]), np.max([y2, lim_y2])
return x1, y1, x2, y2
|
Calculate union of bounding boxes.
Args:
height (float): Height of image or space.
width (float): Width of image or space.
bboxes (list): List like bounding boxes. Format is `[x_min, y_min, x_max, y_max]`.
erosion_rate (float): How much each bounding box can be shrinked, useful for erosive cropping.
Set this in range [0, 1]. 0 will not be erosive at all, 1.0 can make any bbox to lose its volume.
|
juraj-google-style
|
def register_proto_function(collection_name, proto_type=None, to_proto=None, from_proto=None) -> None:
if to_proto and (not callable(to_proto)):
raise TypeError('to_proto must be callable.')
if from_proto and (not callable(from_proto)):
raise TypeError('from_proto must be callable.')
_proto_function_registry.register((proto_type, to_proto, from_proto), collection_name)
|
Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
|
github-repos
|
def _check_root_tag(self, root):
supported = self.supported_tags()
if (root.tag in supported):
return
error = 'Document root element ({0}) not one of ({1})'
raise UnsupportedRootElementError(message=error.format(root.tag, supported), expected=supported, found=root.tag)
|
Check that the XML element tree has a supported root element.
Args:
root (etree.Element)
Raises:
UnsupportedRootElementError
|
codesearchnet
|
def get_client(self, name):
mech = self.get(name)
return mech if isinstance(mech, ClientMechanism) else None
|
Like :meth:`.get`, but only mechanisms inheriting
:class:`ClientMechanism` will be returned.
Args:
name: The SASL mechanism name.
Returns:
The mechanism object or ``None``
|
juraj-google-style
|
def check_mailfy(self, query, kwargs={}):
data = self.launchQueryForMode(query=query, mode="mailfy")
if self._somethingFound(data, mode="mailfy"):
return data
return None
|
Verifying a mailfy query in this platform.
This might be redefined in any class inheriting from Platform. The only
condition is that any of this should return a dictionary as defined.
Args:
-----
query: The element to be searched.
kwargs: Dictionary with extra parameters. Just in case.
Return:
-------
Returns the collected data if exists or None if not.
|
juraj-google-style
|
def _run(broker, graph=None, root=None, context=None, inventory=None):
if (not root):
context = (context or HostContext)
broker[context] = context()
return dr.run(graph, broker=broker)
if os.path.isdir(root):
return process_dir(broker, root, graph, context, inventory=inventory)
else:
with extract(root) as ex:
return process_dir(broker, ex.tmp_dir, graph, context, inventory=inventory)
|
run is a general interface that is meant for stand alone scripts to use
when executing insights components.
Args:
root (str): None will causes a host collection in which command and
file specs are run. A directory or archive path will cause
collection from the directory or archive, and only file type specs
or those that depend on `insights.core.context.HostArchiveContext`
will execute.
component (function or class): The component to execute. Will only execute
the component and its dependency graph. If None, all components with
met dependencies will execute.
Returns:
broker: object containing the result of the evaluation.
|
codesearchnet
|
def get_params_and_defaults(param_list, db):
return [[p, d] for p, d in db.get_all_values_of_all_params().items()]
|
Deduce [parameter, default] pairs from simulations available in the db.
Args:
param_list (list): List of parameters to query for.
db (DatabaseManager): Database where to query for defaults.
|
juraj-google-style
|
def _dedup_strings(device_strs):
new_device_strs = []
for device_str, vals in itertools.groupby(device_strs):
num = len(list(vals))
if num == 1:
new_device_strs.append(device_str)
else:
new_device_strs.append('%s (x%d)' % (device_str, num))
return new_device_strs
|
Groups together consecutive identical strings.
For example, given:
['GPU 1', 'GPU 2', 'GPU 2', 'GPU 3', 'GPU 3', 'GPU 3']
This function returns:
['GPU 1', 'GPU 2 (x2)', 'GPU 3 (x3)']
Args:
device_strs: A list of strings, each representing a device.
Returns:
A copy of the input, but identical consecutive strings are merged into a
single string.
|
github-repos
|
def add_skip_connection(self, u, v, connection_type):
if connection_type not in [self.CONCAT_CONNECT, self.ADD_CONNECT]:
raise ValueError(
"connection_type should be NetworkDescriptor.CONCAT_CONNECT "
"or NetworkDescriptor.ADD_CONNECT."
)
self.skip_connections.append((u, v, connection_type))
|
Add a skip-connection to the descriptor.
Args:
u: Number of convolutional layers before the starting point.
v: Number of convolutional layers before the ending point.
connection_type: Must be either CONCAT_CONNECT or ADD_CONNECT.
|
juraj-google-style
|
def _send(self, line):
if not line.endswith('\r\n'):
if line.endswith('\n'):
logger.debug('Fixing bare LF before sending data to socket')
line = line[0:-1] + '\r\n'
else:
logger.debug(
'Fixing missing CRLF before sending data to socket')
line = line + '\r\n'
logger.debug('Client sent: ' + line.rstrip())
self._socket.send(line)
|
Write a line of data to the server.
Args:
line -- A single line of data to write to the socket.
|
juraj-google-style
|
def get_paths(self, key):
final_paths = []
if key in self.__cli:
paths = self.__cli[key] or []
from_conf = False
else:
paths = self.__config.get(key) or []
from_conf = True
for path in flatten_list(paths):
final_path = self.__abspath(path, from_conf)
if final_path:
final_paths.append(final_path)
return final_paths
|
Same as `ConfigParser.get_path` for a list of paths.
Args:
key: str, the key to lookup the paths with
Returns:
list: The paths.
|
juraj-google-style
|
class PerceiverMultimodalPostprocessor(nn.Module):
def __init__(self, modalities: Mapping[str, PostprocessorType], input_is_dict: bool=False):
super().__init__()
self.modalities = nn.ModuleDict(modalities)
self.input_is_dict = input_is_dict
def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor]=None, modality_sizes=None) -> Mapping[str, torch.Tensor]:
if not self.input_is_dict:
if modality_sizes is None:
raise ValueError('Modality sizes should be specified if input is not a dictionary.')
inputs = restructure(modality_sizes=modality_sizes, inputs=inputs)
outputs = {modality: postprocessor(inputs[modality], pos=pos, modality_sizes=None) for modality, postprocessor in self.modalities.items()}
return outputs
|
Multimodal postprocessing for Perceiver. Can be used to combine modality-specific postprocessors into a single
postprocessor.
Args:
modalities (`Mapping[str, PostprocessorType]`):
Dictionary mapping modality name to postprocessor class for that modality.
input_is_dict (`bool`, *optional*, defaults to `False`):
If True, input is assumed to be dictionary structured, and outputs keep the same dictionary shape. If
False, input is a tensor which is sliced up during postprocessing by *modality_sizes*.
|
github-repos
|
def __init__(self, filename, events=None):
self.filename = os.path.abspath(filename)
self.stat = os.stat(self.filename)
self.start_datetime, self.end_datetime = None, None
self._events = []
self._events_by_baseclass = collections.defaultdict(list)
if events is not None:
for ev in events:
self.append(ev)
|
List of ABINIT events.
Args:
filename: Name of the file
events: List of Event objects
|
juraj-google-style
|
def auto_forward(auto=True):
global __auto_forward_state
prev = __auto_forward_state
__auto_forward_state = auto
yield
__auto_forward_state = prev
|
Context for dynamic graph execution mode.
Args:
auto (bool): Whether forward computation is executed during a
computation graph construction.
Returns: bool
|
juraj-google-style
|
def get_value(self, tau):
tau = np.asarray(tau)
(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = self.coefficients
k = get_kernel_value(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, tau.flatten())
return np.asarray(k).reshape(tau.shape)
|
Compute the value of the term for an array of lags
Args:
tau (array[...]): An array of lags where the term should be
evaluated.
Returns:
The value of the term for each ``tau``. This will have the same
shape as ``tau``.
|
codesearchnet
|
def _ParseHeader(self, format_type, value_data):
data_type_map_name = self._HEADER_DATA_TYPE_MAP_NAMES.get(format_type, None)
if (not data_type_map_name):
raise errors.ParseError('Unsupported format type: {0:d}'.format(format_type))
data_type_map = self._GetDataTypeMap(data_type_map_name)
try:
header = self._ReadStructureFromByteStream(value_data, 0, data_type_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to parse header value with error: {0!s}'.format(exception))
header_data_size = data_type_map.GetByteSize()
if (format_type == self._FORMAT_TYPE_10):
header_data_size = header.signature
cache_header = AppCompatCacheHeader()
cache_header.header_size = header_data_size
cache_header.number_of_cached_entries = getattr(header, 'number_of_cached_entries', 0)
return cache_header
|
Parses the header.
Args:
format_type (int): format type.
value_data (bytes): value data.
Returns:
AppCompatCacheHeader: header.
Raises:
ParseError: if the value data could not be parsed.
|
codesearchnet
|
def seek(self, offset: int, whence: Literal[0, 1, 2]=0) -> int:
|
Changes the current position of the file.
Args:
offset: Offset from the position to a reference point.
whence: The reference point, with 0 meaning the beginning of the file,
1 meaning the current position, or 2 meaning the end of the file.
Returns:
The position from the beginning of the file.
|
github-repos
|
def clear_signature_defs(tflite_model):
model = tflite_model
if not isinstance(tflite_model, bytearray):
model = bytearray(tflite_model)
return signature_def_util.ClearSignatureDefs(model)
|
Clears SignatureDefs from the Metadata of a TfLite flatbuffer buffer.
Args:
tflite_model: TFLite model buffer to remove signature_defs.
Returns:
buffer: A TFLite model binary identical to model buffer with
no SignatureDef metadata.
Raises:
ValueError:
tflite_model buffer does not contain a valid TFLite model.
|
github-repos
|
def restore(self, state):
self.storage.restore(state.get('storage'))
dump_walker = state.get('dump_walker')
if dump_walker is not None:
dump_walker = self.storage.restore_walker(dump_walker)
self.dump_walker = dump_walker
self.next_id = state.get('next_id', 1)
|
Restore the state of this subsystem from a prior call to dump().
Calling restore must be properly sequenced with calls to other
subsystems that include stream walkers so that their walkers are
properly restored.
Args:
state (dict): The results of a prior call to dump().
|
juraj-google-style
|
def delete_user_role(self, user, role):
self.project_service.set_auth(self._token_project)
self.project_service.delete_user_role(user, role)
|
Remove role from given user.
Args:
user (string): User name.
role (string): Role to remove.
Raises:
requests.HTTPError on failure.
|
codesearchnet
|
def _DefaultValueConstructorForField(field):
if _IsMapField(field):
return _GetInitializeDefaultForMap(field)
if (field.label == _FieldDescriptor.LABEL_REPEATED):
if (field.has_default_value and (field.default_value != [])):
raise ValueError(('Repeated field default value not empty list: %s' % field.default_value))
if (field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE):
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if (field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE):
message_type = field.message_type
def MakeSubMessageDefault(message):
result = message_type._concrete_class()
result._SetListener((_OneofListener(message, field) if (field.containing_oneof is not None) else message._listener_for_children))
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
return field.default_value
return MakeScalarDefault
|
Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference.
|
codesearchnet
|
def _DocPackageFromTop(self, packages, showprivate=False, showinh=False):
appIndex = ''
if not isinstance(packages, list):
packages = [packages]
if os.path.exists('content'):
shutil.rmtree('content')
os.makedirs('content')
appIndex += r % ('API Index')
for i in range(len(packages)):
package = packages[i]
try:
name = package.__displayname__
except AttributeError:
name = package.__name__
path = 'content/%s' % package.__name__
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
meta = 'About %s\n%s\n' % (name, '='*len('About ' + name))
author = getattr(package, "__author__", None)
license = getattr(package, "__license__", None)
copyright = getattr(package, "__copyright__", None)
version = getattr(package, "__version__", None)
if author: meta += '\n* Author: %s' % author
if license: meta += '\n* License: %s' % license
if copyright: meta += '\n* Copyright: %s' % copyright
if version: meta += '\n* Version: %s' % version
about = '%s/%s' % (path, 'index.rst')
this_toc = r % (name)
this_toc += self._MakePackagePages(package, showprivate=showprivate, showinh=showinh)
this_toc = this_toc.replace('%s/' % path, '')
with open(about, 'w') as f:
f.write('%s\n\n' % meta)
if package.__doc__:
f.write(package.__doc__)
f.write(this_toc)
appIndex += '\n %s' % about
return appIndex
|
Generates all of the documentation for given packages and
appends new tocrees to the index. All documentation pages will be under the
set relative path.
Args:
packages (list(module)): A package or list of packages that contain submodules to document
showprivate (bool): A flag for whether or not to display private members
Returns:
str: The new content to append to the index
|
juraj-google-style
|
def _make_tensor_trace_fun(self, tensor_name, tensor_trace_order):
def _print_tensor(tensor_name, num_elements, tensor, output_tensor):
if self._parameters.is_brief_mode():
if tensor_name not in tensor_trace_order.tensorname_to_cache_idx:
raise ValueError('Tensor %s with name %s is not in the tensorname_to_cache_idx' % (tensor, tensor_name))
msg = '%d' % tensor_trace_order.tensorname_to_cache_idx[tensor_name]
else:
msg = '"%s"' % tensor_name
if self._parameters.trace_dir:
output_path = os.path.join(self._parameters.trace_dir, _TRACE_FILE_NAME + self._get_outfile_suffix())
output_stream = _OUTPUT_STREAM_ESCAPE + output_path
else:
output_stream = sys.stderr
return logging_ops.print_v2(msg, array_ops.shape(output_tensor), '@', self._replica_id, '\n', output_tensor, '\n', summarize=num_elements, output_stream=output_stream)
def _show_part_tensor(tensor):
return _print_tensor(tensor_name, _TRACE_MODE_PART_TENSOR_SIZE, tensor, tensor)
def _show_full_tensor(tensor):
return _print_tensor(tensor_name, -1, tensor, tensor)
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_PART_TENSOR:
return _show_part_tensor
if self._parameters.trace_mode in (tensor_tracer_flags.TRACE_MODE_NAN_INF, tensor_tracer_flags.TRACE_MODE_NORM, tensor_tracer_flags.TRACE_MODE_FULL_TENSOR, tensor_tracer_flags.TRACE_MODE_MAX_ABS, tensor_tracer_flags.TRACE_MODE_SUMMARY, tensor_tracer_flags.TRACE_MODE_HISTORY):
return _show_full_tensor
raise RuntimeError('Full tensor support is not available with trace mode %s' % self._parameters.trace_mode)
|
Makes the tensor tracing function called by outside compilation.
Args:
tensor_name: name of the tensor being traced.
tensor_trace_order: TensorTraceOrder object holding tensorname to id map.
Returns:
A function to be passed as the first argument to outside compilation.
Raises:
RuntimeError: If the trace mode is invalid.
|
github-repos
|
def delete_metadata(self, resource, keys):
self.metadata_service.set_auth(self._token_metadata)
self.metadata_service.delete(resource, keys)
|
Deletes the given key-value pairs associated with the given resource.
Will attempt to delete all key-value pairs even if some fail.
Args:
resource (intern.resource.boss.BossResource)
keys (list)
Raises:
HTTPErrorList on failure.
|
codesearchnet
|
def returns_collection(return_type: FhirPathDataType) -> bool:
return return_type and return_type.returns_collection()
|
Indicates if return_type will evaluate to a collection.
Args:
return_type: The data type to describe.
Returns:
True in the following circumstances
- `return_type` represents an element with cardinality greater than one.
- `return_type` represents an element with a cardinality less than or
equal to one, but that element is a child of a collection and will
evaluate to a collection. For example, the path Patient.name.use will
return a collection, despite 'use' being a scalar, because it is a child
of the collection, 'name.'
False if `return_type` represents a scalar element whose parents are all
also scalars.
|
github-repos
|
def create(cls, endpoint_name, json_body, original_response):
if endpoint_name == "property/value_report":
return ValueReportResponse(endpoint_name, json_body, original_response)
if endpoint_name == "property/rental_report":
return RentalReportResponse(endpoint_name, json_body, original_response)
prefix = endpoint_name.split("/")[0]
if prefix == "block":
return BlockResponse(endpoint_name, json_body, original_response)
if prefix == "zip":
return ZipCodeResponse(endpoint_name, json_body, original_response)
if prefix == "msa":
return MsaResponse(endpoint_name, json_body, original_response)
return PropertyResponse(endpoint_name, json_body, original_response)
|
Factory for creating the correct type of Response based on the data.
Args:
endpoint_name (str) - The endpoint of the request, such as "property/value"
json_body - The response body in json format.
original_response (response object) - server response returned from an http request.
|
juraj-google-style
|
def podcasts(self, *, device_id=None):
if (device_id is None):
device_id = self.device_id
podcast_list = []
for chunk in self.podcasts_iter(device_id=device_id, page_size=49995):
podcast_list.extend(chunk)
return podcast_list
|
Get a listing of subsribed podcast series.
Paramaters:
device_id (str, Optional): A mobile device ID.
Default: Use ``device_id`` of the :class:`MobileClient` instance.
Returns:
list: Podcast series dict.
|
codesearchnet
|
def index_file(self, f, overwrite=False):
if isinstance(f, six.string_types):
f = self.layout.get_file(f)
if ((f.path in self.file_index) and (not overwrite)):
return
if ('suffix' not in f.entities):
return
md = self._get_metadata(f.path)
for (md_key, md_val) in md.items():
if (md_key not in self.key_index):
self.key_index[md_key] = {}
self.key_index[md_key][f.path] = md_val
self.file_index[f.path][md_key] = md_val
|
Index metadata for the specified file.
Args:
f (BIDSFile, str): A BIDSFile or path to an indexed file.
overwrite (bool): If True, forces reindexing of the file even if
an entry already exists.
|
codesearchnet
|
def GetEventFormatter(self, event):
data_type = getattr(event, 'data_type', None)
if (not data_type):
return None
return formatters_manager.FormattersManager.GetFormatterObject(event.data_type)
|
Retrieves the event formatter for a specific event type.
Args:
event (EventObject): event.
Returns:
EventFormatter: event formatter or None.
|
codesearchnet
|
def getCard(self, name):
cards = self.projectCards
for card in cards:
if card.name.upper() == name.upper():
return card
return None
|
Retrieve card object for given card name.
Args:
name (str): Name of card to be retrieved.
Returns:
:class:`.ProjectCard` or None: Project card object. Will return None if the card is not available.
|
juraj-google-style
|
def add_payload(self, key, val, append=False):
if append:
self._payload.setdefault(key, []).append(val)
else:
self._payload[key] = val
|
Add a key value pair to payload for this request.
Args:
key (str): The payload key.
val (str): The payload value.
append (bool, default:False): Indicate whether the value should be appended or
overwritten.
|
juraj-google-style
|
def assert_broadcastable(weights, values):
with ops.name_scope(None, 'assert_broadcastable', (weights, values)) as scope:
with ops.name_scope(None, 'weights', (weights,)) as weights_scope:
weights = ops.convert_to_tensor(weights, name=weights_scope)
weights_shape = array_ops.shape(weights, name='shape')
weights_rank = array_ops.rank(weights, name='rank')
weights_rank_static = tensor_util.constant_value(weights_rank)
with ops.name_scope(None, 'values', (values,)) as values_scope:
values = ops.convert_to_tensor(values, name=values_scope)
values_shape = array_ops.shape(values, name='shape')
values_rank = array_ops.rank(values, name='rank')
values_rank_static = tensor_util.constant_value(values_rank)
if weights_rank_static is not None and values_rank_static is not None:
if weights_rank_static == 0:
return control_flow_ops.no_op(name='static_scalar_check_success')
if weights_rank_static != values_rank_static:
raise ValueError(f'{_ASSERT_BROADCASTABLE_ERROR_PREFIX} values.rank={values_rank_static}. weights.rank={weights_rank_static}. values.shape={values.shape}. weights.shape={weights.shape}. Received weights={weights}, values={values}')
weights_shape_static = tensor_util.constant_value(weights_shape)
values_shape_static = tensor_util.constant_value(values_shape)
if weights_shape_static is not None and values_shape_static is not None:
ndims = len(values_shape_static)
assert ndims == len(weights_shape_static)
for i in range(ndims):
if weights_shape_static[i] not in (1, values_shape_static[i]):
raise ValueError(f'{_ASSERT_BROADCASTABLE_ERROR_PREFIX} Mismatch at dim {i}. values.shape={values_shape_static}, weights.shape={weights_shape_static}. Received weights={weights}, values={values}')
return control_flow_ops.no_op(name='static_dims_check_success')
is_scalar = math_ops.equal(0, weights_rank, name='is_scalar')
data = (_ASSERT_BROADCASTABLE_ERROR_PREFIX, 'weights.shape=', weights.name, weights_shape, 'values.shape=', values.name, values_shape, 'is_scalar=', is_scalar)
is_valid_shape = cond.cond(is_scalar, lambda: is_scalar, lambda: _has_valid_nonscalar_shape(weights_rank, weights_shape, values_rank, values_shape), name='is_valid_shape')
return control_flow_assert.Assert(is_valid_shape, data, name=scope)
|
Asserts `weights` can be broadcast to `values`.
In `tf.losses` and `tf.metrics`, we support limited weight broadcasting. We
let weights be either scalar, or the same rank as the target values, with each
dimension either 1, or the same as the corresponding values dimension.
Args:
weights: `Tensor` of weights.
values: `Tensor` of values to which weights are applied.
Returns:
`Operation` raising `InvalidArgumentError` if `weights` has incorrect shape.
`no_op` if static checks determine `weights` has correct shape.
Raises:
ValueError: If static checks determine `weights` has incorrect shape.
|
github-repos
|
def _get_parser_call_method(self, parser_to_method):
def inner_call(args=None, instance=None):
'Allows to call the method invoked from the command line or\n provided argument.\n\n Args:\n args: list of arguments to parse, defaults to command line\n arguments\n instance: an instance of the decorated class. If instance is\n None, the default, and __init__ is decorated the object will be\n instantiated on the fly from the command line arguments\n '
parser = self._cls.parser
namespace = parser.parse_args(_get_args_to_parse(args, sys.argv))
if (instance is None):
if ('__init__' not in parser_to_method):
raise ParseThisError("'__init__' method is not decorated. Please provide an instance to '{}.parser.call' or decorate the '__init___' method with 'create_parser'".format(self._cls.__name__))
instance = _call_method_from_namespace(self._cls, '__init__', namespace)
method_name = parser_to_method[namespace.method]
return _call_method_from_namespace(instance, method_name, namespace)
return inner_call
|
Return the parser special method 'call' that handles sub-command
calling.
Args:
parser_to_method: mapping of the parser registered name
to the method it is linked to
|
codesearchnet
|
def forward(self, x):
embeddings = self.embedding_convPxP(x).flatten(2)
embeddings = nn.functional.pad(embeddings, (1, 0))
embeddings = embeddings.permute(0, 2, 1)
batch_size, sequence_length, embedding_dim = embeddings.shape
embeddings = embeddings + self.positional_encoding_1d(batch_size, sequence_length, embedding_dim, device=embeddings.device, dtype=embeddings.dtype)
for i in range(4):
embeddings = self.transformer_encoder[i](embeddings)
return embeddings
|
Forward pass
Args:
x (torch.Tensor - NCHW): Input feature tensor
Returns:
torch.Tensor - Transformer output embeddings of shape (batch_size, sequence_length, embedding_dim)
|
github-repos
|
def _parent_info(self):
parent_doc = self.parent
if (parent_doc is None):
parent_path = _helpers.DOCUMENT_PATH_DELIMITER.join((self._client._database_string, 'documents'))
else:
parent_path = parent_doc._document_path
expected_prefix = _helpers.DOCUMENT_PATH_DELIMITER.join((parent_path, self.id))
return (parent_path, expected_prefix)
|
Get fully-qualified parent path and prefix for this collection.
Returns:
Tuple[str, str]: Pair of
* the fully-qualified (with database and project) path to the
parent of this collection (will either be the database path
or a document path).
* the prefix to a document in this collection.
|
codesearchnet
|
def build(X_df=None, y_df=None):
if (X_df is None):
(X_df, _) = load_data()
if (y_df is None):
(_, y_df) = load_data()
features = get_contrib_features()
mapper_X = ballet.feature.make_mapper(features)
X = mapper_X.fit_transform(X_df)
encoder_y = get_target_encoder()
y = encoder_y.fit_transform(y_df)
return {'X_df': X_df, 'features': features, 'mapper_X': mapper_X, 'X': X, 'y_df': y_df, 'encoder_y': encoder_y, 'y': y}
|
Build features and target
Args:
X_df (DataFrame): raw variables
y_df (DataFrame): raw target
Returns:
dict with keys X_df, features, mapper_X, X, y_df, encoder_y, y
|
codesearchnet
|
def matmul_without_tf32(a, b, *args, **kwargs):
if config.tensor_float_32_execution_enabled() and a.dtype == 'float32':
a = math_ops.cast(a, 'float64')
b = math_ops.cast(b, 'float64')
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
elif config.tensor_float_32_execution_enabled() and a.dtype == 'complex64':
a = math_ops.cast(a, 'complex128')
b = math_ops.cast(b, 'complex128')
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
else:
return math_ops.matmul(a, b, *args, **kwargs)
|
Run matmul but cast float32 inputs to float64 if TensorFloat-32 is enabled.
This effectively runs matmul without TensorFloat-32. It should only be used in
tests when verifying some other op or functions works correctly, e.g. to test
`tf.linalg.sqrtm` by matrix multiplying the output of the op by itself. In
such cases, the matmul itself is not being tested so it's OK to run it with
higher precision.
If a matmul itself is being tested, or some other op which uses matmul, use
`run_without_tensor_float_32` instead.
This also casts complex64 inputs to complex128, since TensorFloat-32 can also
be used with complex64
Args:
a: First input to tf.linalg.matmul
b: Second input to tf.linalg.matmul
args: Other positional arguments to tf.linalg.matmul
**kwargs: Other keyword arguments to tf.linalg.matmul
Returns:
A tensor with the same type as `a`.
|
github-repos
|
def output_sector_csv(self, csv_path, file_dict_key, out_path):
csv_file = (csv_path + '{0}_{1}_{2}_{3}.csv'.format(file_dict_key, self.ensemble_name, self.member, self.run_date.strftime(self.date_format)))
if exists(csv_file):
csv_data = pd.read_csv(csv_file)
if (self.inds is None):
lon_obj = csv_data.loc[(:, 'Centroid_Lon')]
lat_obj = csv_data.loc[(:, 'Centroid_Lat')]
self.inds = np.where(((((self.ne_lat >= lat_obj) & (self.sw_lat <= lat_obj)) & (self.ne_lon >= lon_obj)) & (self.sw_lon <= lon_obj)))[0]
if (np.shape(self.inds)[0] > 0):
csv_data = csv_data.reindex(np.array(self.inds))
sector_csv_filename = (out_path + '{0}_{1}_{2}_{3}.csv'.format(file_dict_key, self.ensemble_name, self.member, self.run_date.strftime(self.date_format)))
print(('Output sector csv file ' + sector_csv_filename))
csv_data.to_csv(sector_csv_filename, na_rep='nan', float_format='%0.5f', index=False)
os.chmod(sector_csv_filename, 438)
else:
print('No {0} {1} sector data found'.format(self.member, self.run_date.strftime('%Y%m%d')))
else:
print('No {0} {1} csv file found'.format(self.member, self.run_date.strftime('%Y%m%d')))
return
|
Segment forecast tracks to only output data contined within a
region in the CONUS, as defined by the mapfile.
Args:
csv_path(str): Path to the full CONUS csv file.
file_dict_key(str): Dictionary key for the csv files,
currently either 'track_step' or 'track_total'
out_path (str): Path to output new segmented csv files.
Returns:
Segmented forecast tracks in a csv file.
|
codesearchnet
|
def slice_element_urls(element_definition: ElementDefinition) -> List[str]:
result: List[str] = []
if proto_utils.field_is_set(element_definition, 'type'):
type_refs: List[StructureDefinition] = proto_utils.get_value_at_field(element_definition, 'type')
profile_lists = [cast(Any, t).profile for t in type_refs]
urls = [cast(Any, profile).value for profile in itertools.chain.from_iterable(profile_lists)]
result.extend(urls)
return result
|
Returns the list of profile urls for the given slice element.
Args:
element_definition: The `ElementDefinition` whose profile urls we are
retrieving.
Returns:
A list of strings representing the element's profile urls.
|
github-repos
|
def _validate_alias_command_level(alias, command):
alias_collision_table = AliasManager.build_collision_table([alias])
if (not alias_collision_table):
return
command_collision_table = AliasManager.build_collision_table([command])
alias_collision_levels = alias_collision_table.get(alias.split()[0], [])
command_collision_levels = command_collision_table.get(command.split()[0], [])
if (set(alias_collision_levels) & set(command_collision_levels)):
raise CLIError(COMMAND_LVL_ERROR.format(alias, command))
|
Make sure that if the alias is a reserved command, the command that the alias points to
in the command tree does not conflict in levels.
e.g. 'dns' -> 'network dns' is valid because dns is a level 2 command and network dns starts at level 1.
However, 'list' -> 'show' is not valid because list and show are both reserved commands at level 2.
Args:
alias: The name of the alias.
command: The command that the alias points to.
|
codesearchnet
|
def from_versions(cls, versions):
range = cls(None)
range.bounds = []
for version in dedup(sorted(versions)):
lower = _LowerBound(version, True)
upper = _UpperBound(version, True)
bound = _Bound(lower, upper)
range.bounds.append(bound)
return range
|
Create a range from a list of versions.
This method creates a range that contains only the given versions and
no other. Typically the range looks like (for eg) "==3|==4|==5.1".
Args:
versions: List of Version objects.
Returns:
`VersionRange` object.
|
juraj-google-style
|
def _check_sensor_platform_consistency(self, sensor):
ref_sensor = SENSORS.get(self.platform, None)
if ref_sensor and not sensor == ref_sensor:
logger.error('Sensor-Platform mismatch: {} is not a payload '
'of {}. Did you choose the correct reader?'
.format(sensor, self.platform))
|
Make sure sensor and platform are consistent
Args:
sensor (str) : Sensor name from YAML dataset definition
Raises:
ValueError if they don't match
|
juraj-google-style
|
def compute(self, batch_values, accumulator=None):
pass
|
Compute a step in this computation, returning a new accumulator.
This method computes a step of the computation described by this Combiner.
If an accumulator is passed, the data in that accumulator is also used; so
compute(batch_values) results in f(batch_values), while
compute(batch_values, accumulator) results in
merge(f(batch_values), accumulator).
Args:
batch_values: A list of ndarrays representing the values of the inputs for
this step of the computation.
accumulator: the current accumulator. Can be None.
Returns:
An accumulator that includes the passed batch of inputs.
|
github-repos
|
def pretty_print_config_to_json(self, configs):
descriptor = self.get_directory_list_doc(configs)
return json.dumps(descriptor, sort_keys=True, indent=2,
separators=(',', ': '))
|
JSON string description of a protorpc.remote.Service in a discovery doc.
Args:
configs: Either a single dict or a list of dicts containing the service
configurations to list.
Returns:
string, The directory list document as a JSON string.
|
juraj-google-style
|
def isclose(x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False):
if any_symbolic_tensors((x1, x2)):
return Isclose(equal_nan=equal_nan).symbolic_call(x1, x2, rtol, atol)
return backend.numpy.isclose(x1, x2, rtol, atol, equal_nan)
|
Return whether two tensors are element-wise almost equal.
Args:
x1: First input tensor.
x2: Second input tensor.
rtol: Relative tolerance.
atol: Absolute tolerance.
equal_nan: If `True`, element-wise NaNs are considered equal.
Returns:
Output boolean tensor.
|
github-repos
|
def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides, padding):
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
with self.cached_session(use_gpu=False):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding)
os.environ['TF_USE_DEEP_CONV2D'] = '0'
values_expect = self.evaluate([conv])
os.environ['TF_USE_DEEP_CONV2D'] = '1'
values_test = self.evaluate([conv])
self.assertAllClose(values_expect, values_test, rtol=1e-05, atol=1e-05)
|
Verifies that DeepConv2D and Conv2D produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
|
github-repos
|
def trigger(self, event_name, *args, **kwargs):
ev = Event(event_name, self)
ev.trigger(*args, **kwargs)
return ev
|
Trigger an event on this context.
Params:
event_name (string):
Event name to trigger
Args and kwargs are passed to each handler - see the bubbler.Event class
for more information.
Returns:
bubbler.Event:
Event instance after execution of all handlers
|
juraj-google-style
|
def jsonRender(self, def_buf):
try:
ret_dict = SerialBlock()
ret_dict[Field.Meter_Address] = self.getMeterAddress()
for fld in def_buf:
compare_fld = fld.upper()
if ((not ('RESERVED' in compare_fld)) and (not ('CRC' in compare_fld))):
ret_dict[str(fld)] = def_buf[fld][MeterData.StringValue]
except:
ekm_log(traceback.format_exc(sys.exc_info()))
return ''
return json.dumps(ret_dict, indent=4)
|
Translate the passed serial block into string only JSON.
Args:
def_buf (SerialBlock): Any :class:`~ekmmeters.SerialBlock` object.
Returns:
str: JSON rendering of meter record.
|
codesearchnet
|
def get_ethernet_networks(self):
network_uris = self.data.get('networkUris')
networks = []
if network_uris:
for uri in network_uris:
networks.append(self._ethernet_networks.get_by_uri(uri))
return networks
|
Gets a list of associated ethernet networks of an uplink set.
Args:
id_or_uri: Can be either the uplink set id or the uplink set uri.
Returns:
list: Associated ethernet networks.
|
juraj-google-style
|
def ndtr(x, name='ndtr'):
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name='x')
if (dtype_util.as_numpy_dtype(x.dtype) not in [np.float32, np.float64]):
raise TypeError(('x.dtype=%s is not handled, see docstring for supported types.' % x.dtype))
return _ndtr(x)
|
Normal distribution function.
Returns the area under the Gaussian probability density function, integrated
from minus infinity to x:
```
1 / x
ndtr(x) = ---------- | exp(-0.5 t**2) dt
sqrt(2 pi) /-inf
= 0.5 (1 + erf(x / sqrt(2)))
= 0.5 erfc(x / sqrt(2))
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtr").
Returns:
ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
|
codesearchnet
|
def convert_item_to_command_line_arg(self, action, key, value):
args = []
if action is None:
command_line_key = \
self.get_command_line_key_for_unknown_config_file_setting(key)
else:
command_line_key = action.option_strings[-1]
if action is not None and isinstance(action, ACTION_TYPES_THAT_DONT_NEED_A_VALUE):
if value.lower() in ("true", "yes", "1"):
args.append( command_line_key )
elif value.lower() in ("false", "no", "0"):
pass
else:
self.error("Unexpected value for %s: '%s'. Expecting 'true', "
"'false', 'yes', 'no', '1' or '0'" % (key, value))
elif isinstance(value, list):
if action is None or isinstance(action, argparse._AppendAction):
for list_elem in value:
args.append( command_line_key )
args.append( str(list_elem) )
elif (isinstance(action, argparse._StoreAction) and action.nargs in ('+', '*')) or (
isinstance(action.nargs, int) and action.nargs > 1):
args.append( command_line_key )
for list_elem in value:
args.append( str(list_elem) )
else:
self.error(("%s can't be set to a list '%s' unless its action type is changed "
"to 'append' or nargs is set to '*', '+', or > 1") % (key, value))
elif isinstance(value, str):
args.append( command_line_key )
args.append( value )
else:
raise ValueError("Unexpected value type %s for value: %s" % (
type(value), value))
return args
|
Converts a config file or env var key + value to a list of
commandline args to append to the commandline.
Args:
action: The argparse Action object for this setting, or None if this
config file setting doesn't correspond to any defined
configargparse arg.
key: string (config file key or env var name)
value: parsed value of type string or list
|
juraj-google-style
|
def run(self, text):
for pp in self.pre_processors:
text = pp.run(text)
return text
|
Run each substitution on ``text``.
Args:
text (string): the input text.
Returns:
string: text after all substitutions have been sequentially
applied.
|
juraj-google-style
|
def right_margin(self, margin):
if margin >=1 and margin <=255:
self.send(chr(27)+'Q'+chr(margin))
else:
raise RuntimeError('Invalid margin parameter in function rightMargin')
|
Specify the right margin.
Args:
margin: The right margin, in character width, must be less than the media's width.
Returns:
None
Raises:
RuntimeError: Invalid margin parameter
|
juraj-google-style
|
def updateAccount(self, subject, person, vendorSpecific=None):
response = self.updateAccountResponse(subject, person, vendorSpecific)
return self._read_boolean_response(response)
|
See Also: updateAccountResponse()
Args:
subject:
person:
vendorSpecific:
Returns:
|
juraj-google-style
|
def get_box_field(self, box_key, field_key = None):
self._raise_unimplemented_error()
uri = '/'.join([self.api_uri,
self.boxes_suffix,
box_key,
self.fields_suffix
])
if field_key:
uri = '/'.join([uri, field_key])
return self._req('get', uri)
|
Gets one/all field in a box
Args:
box_key key for pipeline
field_key key for field (default: None i.e. ALL)
returns status code, field dict or list thereof
|
juraj-google-style
|
def _compute_enlarge_labels(self, locator, base_index):
base_index_type = type(base_index)
locator_as_index = base_index_type(locator)
nan_labels = locator_as_index.difference(base_index)
common_labels = locator_as_index.intersection(base_index)
if (len(common_labels) == 0):
raise KeyError('None of [{labels}] are in the [{base_index_name}]'.format(labels=list(locator_as_index), base_index_name=base_index))
return nan_labels
|
Helper for _enlarge_axis, compute common labels and extra labels.
Returns:
nan_labels: The labels needs to be added
|
codesearchnet
|
def get(self, url):
self._driver.get(url)
if self.bot_diary:
self.bot_diary.add_auto_entry(
"I went on",
target=url,
take_screenshot=True
)
if BROME_CONFIG['proxy_driver']['intercept_javascript_error']:
self.init_javascript_error_interception()
return True
|
Navigate to a specific url
This specific implementation inject a javascript
script to intercept the javascript error
Configurable with the "proxy_driver:intercept_javascript_error" config
Args:
url (str): the url to navigate to
Returns:
bool
|
juraj-google-style
|
def delete(self, id, **kwargs):
if id is None:
path = self.path
else:
if not isinstance(id, int):
id = id.replace('/', '%2F')
path = '%s/%s' % (self.path, id)
self.gitlab.http_delete(path, **kwargs)
|
Delete an object on the server.
Args:
id: ID of the object to delete
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request
|
juraj-google-style
|
def add_property_orders(query_proto, *orders):
for order in orders:
proto = query_proto.order.add()
if order[0] == '-':
order = order[1:]
proto.direction = query_pb2.PropertyOrder.DESCENDING
else:
proto.direction = query_pb2.PropertyOrder.ASCENDING
proto.property.name = order
|
Add ordering constraint for the given datastore.Query proto message.
Args:
query_proto: datastore.Query proto message.
orders: list of propertype name string, default to ascending
order and set descending if prefixed by '-'.
Usage:
>>> add_property_orders(query_proto, 'foo') # sort by foo asc
>>> add_property_orders(query_proto, '-bar') # sort by bar desc
|
juraj-google-style
|
def _send_notification(self, handle, value):
value_len = len(value)
value = bytes(value)
payload = struct.pack("<BHB%ds" % value_len, 0xFF, handle, value_len, value)
response = self._send_command(2, 5, payload)
result, = unpack("<H", response.payload)
if result != 0:
return False, {'reason': 'Error code from BLED112 notifying a value', 'code': result, 'handle': handle, 'value': value}
return True, None
|
Send a notification to all connected clients on a characteristic
Args:
handle (int): The handle we wish to notify on
value (bytearray): The value we wish to send
|
juraj-google-style
|
def egress(self, envelope, http_headers, operation, binding_options):
if self._logger.isEnabledFor(logging.INFO):
service_name = operation.binding.wsdl.services.keys()[0]
self._logger.info(_REQUEST_LOG_LINE, service_name, operation.name,
binding_options['address'])
if self._logger.isEnabledFor(logging.DEBUG):
http_headers_safe = http_headers.copy()
if self._AUTHORIZATION_HEADER in http_headers_safe:
http_headers_safe[self._AUTHORIZATION_HEADER] = self._REDACTED
request_string = etree.tostring(envelope, pretty_print=True)
safe_request = self._DEVELOPER_TOKEN_SUB.sub(
self._REDACTED, request_string.decode('utf-8'))
self._logger.debug(
_REQUEST_XML_LOG_LINE, http_headers_safe, safe_request)
return envelope, http_headers
|
Overrides the egress function ror request logging.
Args:
envelope: An Element with the SOAP request data.
http_headers: A dict of the current http headers.
operation: The SoapOperation instance.
binding_options: An options dict for the SOAP binding.
Returns:
A tuple of the envelope and headers.
|
juraj-google-style
|
def produce(self, **kwargs):
produce_args = self._produce_params.copy()
produce_args.update(kwargs)
if self._class:
return getattr(self.instance, self.produce_method)(**produce_args)
produce_args.update(self._hyperparameters)
return self.primitive(**produce_args)
|
Call the primitive function, or the predict method of the primitive.
The given keyword arguments will be passed directly to the primitive,
if it is a simple function, or to the `produce` method of the
primitive instance specified in the JSON annotation, if it is a class.
If any of the arguments expected by the fit method had been given
during the MLBlock initialization, they will be passed as well.
Returns:
The output of the call to the primitive function or primitive
produce method.
|
codesearchnet
|
def sort_dependencies(self, image, dependencies=None):
if dependencies is None:
dependencies = OrderedDict()
if image in dependencies:
return
requires = self.ymldefs[image].get('requires', [])
for dep in requires:
self.sort_dependencies(dep, dependencies)
dependencies[image] = None
return dependencies.keys()
|
Topologically sort the docker commands by their requirements
Note:
Circular "requires" dependencies are assumed to have already been checked in
get_external_base_image, they are not checked here
Args:
image (str): process this docker image's dependencies
dependencies (OrderedDict): running cache of sorted dependencies (ordered dict)
Returns:
List[str]: list of dependencies a topologically-sorted build order
|
juraj-google-style
|
def Feed(self, size=512):
data = self.file_object.read(size)
Lexer.Feed(self, data)
return len(data)
|
Feed data into the buffer.
Args:
size: optional data size to read form the file-like object.
|
codesearchnet
|
def get_group_by_name(self, group_name: str) -> typing.Optional['Group']:
VALID_STR.validate(group_name, 'get_group_by_name')
for group in self.groups:
if (group.group_name == group_name):
return group
return None
|
Gets a group from its name
Args:
group_name:
Returns: Group
|
codesearchnet
|
def euler_angles_1q(unitary_matrix):
if (unitary_matrix.shape != (2, 2)):
raise QiskitError('euler_angles_1q: expected 2x2 matrix')
phase = (la.det(unitary_matrix) ** ((- 1.0) / 2.0))
U = (phase * unitary_matrix)
if (abs(U[(0, 0)]) > _CUTOFF_PRECISION):
theta = (2 * math.acos(abs(U[(0, 0)])))
else:
theta = (2 * math.asin(abs(U[(1, 0)])))
phase11 = 0.0
phase10 = 0.0
if (abs(math.cos((theta / 2.0))) > _CUTOFF_PRECISION):
phase11 = (U[(1, 1)] / math.cos((theta / 2.0)))
if (abs(math.sin((theta / 2.0))) > _CUTOFF_PRECISION):
phase10 = (U[(1, 0)] / math.sin((theta / 2.0)))
phiplambda = (2 * math.atan2(np.imag(phase11), np.real(phase11)))
phimlambda = (2 * math.atan2(np.imag(phase10), np.real(phase10)))
phi = 0.0
if ((abs(U[(0, 0)]) > _CUTOFF_PRECISION) and (abs(U[(1, 0)]) > _CUTOFF_PRECISION)):
phi = ((phiplambda + phimlambda) / 2.0)
lamb = ((phiplambda - phimlambda) / 2.0)
elif (abs(U[(0, 0)]) < _CUTOFF_PRECISION):
lamb = (- phimlambda)
else:
lamb = phiplambda
Rzphi = np.array([[np.exp((((- 1j) * phi) / 2.0)), 0], [0, np.exp(((1j * phi) / 2.0))]], dtype=complex)
Rytheta = np.array([[np.cos((theta / 2.0)), (- np.sin((theta / 2.0)))], [np.sin((theta / 2.0)), np.cos((theta / 2.0))]], dtype=complex)
Rzlambda = np.array([[np.exp((((- 1j) * lamb) / 2.0)), 0], [0, np.exp(((1j * lamb) / 2.0))]], dtype=complex)
V = np.dot(Rzphi, np.dot(Rytheta, Rzlambda))
if (la.norm((V - U)) > _CUTOFF_PRECISION):
raise QiskitError('euler_angles_1q: incorrect result')
return (theta, phi, lamb)
|
Compute Euler angles for a single-qubit gate.
Find angles (theta, phi, lambda) such that
unitary_matrix = phase * Rz(phi) * Ry(theta) * Rz(lambda)
Args:
unitary_matrix (ndarray): 2x2 unitary matrix
Returns:
tuple: (theta, phi, lambda) Euler angles of SU(2)
Raises:
QiskitError: if unitary_matrix not 2x2, or failure
|
codesearchnet
|
def getPaddingNum(chars):
match = PRINTF_SYNTAX_PADDING_RE.match(chars)
if match:
return int(match.group(1))
try:
return sum([PAD_MAP[char] for char in chars])
except KeyError:
msg = "Detected an unsupported padding character: \"{}\"."
msg += " Supported padding characters: {} or printf syntax padding"
msg += " %<int>d"
raise ValueError(msg.format(char, str(PAD_MAP.keys())))
|
Given a supported group of padding characters, return the amount of padding.
Args:
chars (str): a supported group of padding characters
Returns:
int:
Raises:
ValueError: if unsupported padding character is detected
|
juraj-google-style
|
def __init__(self, table, info):
self._table = table
self._info = info
|
Initializes a TableMetadata instance.
Args:
table: the Table object this belongs to.
info: The BigQuery information about this table as a Python dictionary.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.