code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def prefer_static_value(x):
static_x = tensor_util.constant_value(x)
if static_x is not None:
return static_x
return x
|
Return static value of tensor `x` if available, else `x`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static value is obtainable), else `Tensor`.
|
github-repos
|
def prod(self, vars_list: List[str]) -> 'TensorFluent':
operand = self
if (operand.dtype == tf.bool):
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_prod, operand, vars_list)
|
Returns the TensorFluent for the prod aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the prod aggregation function.
|
codesearchnet
|
def read_into(self, buffer, face, *, alignment=1, write_offset=0) -> None:
if type(buffer) is Buffer:
buffer = buffer.mglo
return self.mglo.read_into(buffer, face, alignment, write_offset)
|
Read a face from the cubemap texture.
Args:
buffer (bytearray): The buffer that will receive the pixels.
face (int): The face to read.
Keyword Args:
alignment (int): The byte alignment of the pixels.
write_offset (int): The write offset.
|
juraj-google-style
|
def imcrop(img, bboxes, scale=1.0, pad_fill=None):
chn = 1 if img.ndim == 2 else img.shape[2]
if pad_fill is not None:
if isinstance(pad_fill, (int, float)):
pad_fill = [pad_fill for _ in range(chn)]
assert len(pad_fill) == chn
_bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes
scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32)
clipped_bbox = bbox_clip(scaled_bboxes, img.shape)
patches = []
for i in range(clipped_bbox.shape[0]):
x1, y1, x2, y2 = tuple(clipped_bbox[i, :])
if pad_fill is None:
patch = img[y1:y2 + 1, x1:x2 + 1, ...]
else:
_x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :])
if chn == 2:
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1)
else:
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn)
patch = np.array(
pad_fill, dtype=img.dtype) * np.ones(
patch_shape, dtype=img.dtype)
x_start = 0 if _x1 >= 0 else -_x1
y_start = 0 if _y1 >= 0 else -_y1
w = x2 - x1 + 1
h = y2 - y1 + 1
patch[y_start:y_start + h, x_start:x_start +
w, ...] = img[y1:y1 + h, x1:x1 + w, ...]
patches.append(patch)
if bboxes.ndim == 1:
return patches[0]
else:
return patches
|
Crop image patches.
3 steps: scale the bboxes -> clip bboxes -> crop and pad.
Args:
img (ndarray): Image to be cropped.
bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes.
scale (float, optional): Scale ratio of bboxes, the default value
1.0 means no padding.
pad_fill (number or list): Value to be filled for padding, None for
no padding.
Returns:
list or ndarray: The cropped image patches.
|
juraj-google-style
|
def dict_strip(d):
_d = deepcopy(d)
for k, v in iteritems(d):
if isinstance(v, str):
_d[k] = v.strip()
elif isinstance(v, dict):
_d[k] = dict_strip(v)
return _d
|
Strips whitespace from the string values of the given dictionary (recursively).
Args:
d: A dictionary object.
Returns:
A new dictionary object, whose string values' whitespace has been stripped out.
|
juraj-google-style
|
def lock(self, key, client):
self.key = key
self.client = client
|
Set the key that will be used to ensure messages come from one party
Args:
key (string): The key used to validate future messages
client (string): A string that will be returned to indicate who
locked this device.
|
juraj-google-style
|
def instantiate_interface(virtual_iface, config, loop):
if (virtual_iface == 'null'):
return StandardDeviceServer(None, {}, loop=loop)
conf = {}
if ('interface' in config):
conf = config['interface']
try:
reg = ComponentRegistry()
if virtual_iface.endswith('.py'):
(_name, iface) = reg.load_extension(virtual_iface, class_filter=AbstractDeviceServer, unique=True)
else:
(_name, iface) = reg.load_extensions('iotile.device_server', name_filter=virtual_iface, class_filter=AbstractDeviceServer, unique=True)
return iface(None, conf, loop=loop)
except ArgumentError as err:
print(('ERROR: Could not load device_server (%s): %s' % (virtual_iface, err.msg)))
sys.exit(1)
|
Find a virtual interface by name and instantiate it
Args:
virtual_iface (string): The name of the pkg_resources entry point corresponding to
the interface. It should be in group iotile.virtual_interface
config (dict): A dictionary with a 'interface' key with the config info for configuring
this virtual interface. This is optional.
Returns:
VirtualInterface: The instantiated subclass of VirtualInterface
|
codesearchnet
|
def exportData(self, datfile):
def ampl_set(name, values):
def format_entry(e):
return repr(e).replace(' ', '')
return 'set {0} := {1};'.format(name, ','.join((format_entry(e) for e in values)))
def ampl_param(name, values):
def format_entry(k, v):
k = repr(k).strip('()').replace(' ', '')
if (v == inf):
v = 'Infinity'
elif (v == (- inf)):
v = '-Infinity'
else:
v = repr(v).strip('()').replace(' ', '')
return '[{0}]{1}'.format(k, v)
return 'param {0} := {1};'.format(name, ''.join((format_entry(k, v) for (k, v) in values.items())))
with open(datfile, 'w') as f:
for (name, entity) in self.getSets():
values = entity.getValues().toList()
print(ampl_set(name, values), file=f)
for (name, entity) in self.getParameters():
if entity.isScalar():
print('param {} := {};'.format(name, entity.value()), file=f)
else:
values = entity.getValues().toDict()
print(ampl_param(name, values), file=f)
|
Create a .dat file with the data that has been loaded.
Args:
datfile: Path to the file (Relative to the current working
directory or absolute).
|
codesearchnet
|
def match_term(self, value, required=True, new_group=False):
if self.initialized:
if required:
self._and_join(new_group)
else:
self._or_join(new_group)
self._term(value)
return self
|
Add a fulltext search term to the query.
Warning:
Do not use this method with any other query-building helpers. This method
is only for building fulltext queries (in non-advanced mode). Using other
helpers, such as ``match_field()``, will cause the query to run in advanced mode.
If a fulltext term query is run in advanced mode, it will have unexpected
results.
Arguments:
value (str): The term to match.
required (bool): If ``True``, will add term with ``AND``.
If ``False``, will use ``OR``. **Default:** ``True``.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self
|
codesearchnet
|
def validate(
self, nanopub: Mapping[str, Any]
) -> Tuple[bool, List[Tuple[str, str]]]:
(is_valid, messages) = validate_to_schema(nanopub, self.nanopub_schema)
if not is_valid:
return messages
if nanopub["nanopub"]["type"]["name"].upper() == "BEL":
bel_version = nanopub["nanopub"]["type"]["version"]
else:
is_valid = False
return (
is_valid,
f"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}",
)
all_messages = []
bel_obj = bel.lang.belobj.BEL(bel_version, self.endpoint)
for edge in nanopub["nanopub"]["edges"]:
bel_statement = f"{edge['subject']} {edge['relation']} {edge['object']}"
parse_obj = bel_obj.parse(bel_statement)
if not parse_obj.valid:
all_messages.extend(
(
"ERROR",
f"BEL statement parse error {parse_obj.error}, {parse_obj.err_visual}",
)
)
for context in nanopub["nanopub"]["context"]:
(is_valid, messages) = self.validate_context(context)
all_messages.extend(messages)
is_valid = True
for _type, msg in all_messages:
if _type == "ERROR":
is_valid = False
return (is_valid, all_messages)
|
Validates using the nanopub schema
Args:
nanopub (Mapping[str, Any]): nanopub dict
Returns:
Tuple[bool, List[Tuple[str, str]]]:
bool: Is valid? Yes = True, No = False
List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg)
e.g. [('WARNING', "Context ID not found")]
|
juraj-google-style
|
def _PrintAnalysisStatusUpdateWindow(self, processing_status):
if self._stdout_output_writer:
self._ClearScreen()
output_text = 'plaso - {0:s} version {1:s}\n\n'.format(
self._tool_name, plaso.__version__)
self._output_writer.Write(output_text)
self._PrintAnalysisStatusHeader(processing_status)
table_view = views.CLITabularTableView(column_names=[
'Identifier', 'PID', 'Status', 'Memory', 'Events', 'Tags',
'Reports'], column_sizes=[23, 7, 15, 15, 15, 15, 0])
self._AddsAnalysisProcessStatusTableRow(
processing_status.foreman_status, table_view)
for worker_status in processing_status.workers_status:
self._AddsAnalysisProcessStatusTableRow(worker_status, table_view)
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
if processing_status.aborted:
self._output_writer.Write(
'Processing aborted - waiting for clean up.\n\n')
if self._stdout_output_writer:
sys.stdout.flush()
|
Prints an analysis status update in window mode.
Args:
processing_status (ProcessingStatus): processing status.
|
juraj-google-style
|
def learn(self, grad_arr, fix_opt_flag=False):
if grad_arr.ndim > 3:
grad_arr = grad_arr.reshape((
grad_arr.shape[0],
grad_arr.shape[1],
-1
))
delta_arr, grads_list = self.__lstm_model.back_propagation(self.__pred_arr, grad_arr)
if fix_opt_flag is False:
self.__lstm_model.optimize(
grads_list,
self.__learning_rate,
1
)
return delta_arr
|
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients.
|
juraj-google-style
|
def available_writers(as_dict=False):
writers = []
for writer_configs in configs_for_writer():
try:
writer_info = read_writer_config(writer_configs)
except (KeyError, IOError, yaml.YAMLError):
LOG.warning('Could not import writer config from: %s', writer_configs)
LOG.debug('Error loading YAML', exc_info=True)
continue
writers.append((writer_info if as_dict else writer_info['name']))
return writers
|
Available writers based on current configuration.
Args:
as_dict (bool): Optionally return writer information as a dictionary.
Default: False
Returns: List of available writer names. If `as_dict` is `True` then
a list of dictionaries including additionally writer information
is returned.
|
codesearchnet
|
def poke(exposes):
def _poke(store, objname, obj, container, visited=None, _stack=None):
try:
sub_container = store.newContainer(objname, obj, container)
except (SystemExit, KeyboardInterrupt):
raise
except:
raise ValueError('generic poke not supported by store')
for iobjname in exposes:
try:
iobj = getattr(obj, iobjname)
except AttributeError:
pass
else:
store.poke(iobjname, iobj, sub_container, visited=visited, _stack=_stack)
return _poke
|
Default serializer factory.
Arguments:
exposes (iterable): attributes to serialized.
Returns:
callable: serializer (`poke` routine).
|
codesearchnet
|
def createDomains(tlds, nicks=None, nicksFile=None):
domain_candidates = []
if (nicks != None):
for n in nicks:
for t in tlds:
tmp = {'domain': (n + t['tld']), 'type': t['type'], 'tld': t['tld']}
domain_candidates.append(tmp)
elif (nicksFile != None):
with open(nicksFile, 'r') as iF:
nicks = iF.read().splitlines()
for n in nicks:
for t in tlds:
tmp = {'domain': (n + t['tld']), 'type': t['type'], 'tld': t['tld']}
domain_candidates.append(tmp)
return domain_candidates
|
Method that globally permits to generate the domains to be checked.
Args:
-----
tlds: List of tlds.
nicks: List of aliases.
nicksFile: The filepath to the aliases file.
Returns:
--------
list: list of domains to be checked.
|
codesearchnet
|
def RemoveTransaction(self, tx):
if (BC.Default() is None):
return False
if (not BC.Default().ContainsTransaction(tx.Hash)):
return False
if (tx.Hash.ToBytes() in self.MemPool):
del self.MemPool[tx.Hash.ToBytes()]
return True
return False
|
Remove a transaction from the memory pool if it is found on the blockchain.
Args:
tx (neo.Core.TX.Transaction): instance.
Returns:
bool: True if successfully removed. False otherwise.
|
codesearchnet
|
def GetDateRange(self):
start = self.start_date
end = self.end_date
for (date, (exception_type, _)) in self.date_exceptions.items():
if (exception_type == self._EXCEPTION_TYPE_REMOVE):
continue
if ((not start) or (date < start)):
start = date
if ((not end) or (date > end)):
end = date
if (start is None):
start = end
elif (end is None):
end = start
return (start, end)
|
Return the range over which this ServicePeriod is valid.
The range includes exception dates that add service outside of
(start_date, end_date), but doesn't shrink the range if exception
dates take away service at the edges of the range.
Returns:
A tuple of "YYYYMMDD" strings, (start date, end date) or (None, None) if
no dates have been given.
|
codesearchnet
|
def group(self, group_type=None, owner=None, **kwargs):
group = None
if (not group_type):
return Group(self.tcex, None, None, owner=owner, **kwargs)
name = kwargs.pop('name', None)
group_type = group_type.upper()
if (group_type == 'ADVERSARY'):
group = Adversary(self.tcex, name, owner=owner, **kwargs)
if (group_type == 'CAMPAIGN'):
group = Campaign(self.tcex, name, owner=owner, **kwargs)
if (group_type == 'DOCUMENT'):
group = Document(self.tcex, name, kwargs.pop('file_name', None), owner=owner, **kwargs)
if (group_type == 'EVENT'):
group = Event(self.tcex, name, owner=owner, **kwargs)
if (group_type == 'EMAIL'):
group = Email(self.tcex, name, kwargs.pop('to', None), kwargs.pop('from_addr', None), kwargs.pop('subject', None), kwargs.pop('body', None), kwargs.pop('header', None), owner=owner, **kwargs)
if (group_type == 'INCIDENT'):
group = Incident(self.tcex, name, owner=owner, **kwargs)
if (group_type == 'INTRUSION SET'):
group = IntrusionSet(self.tcex, name, owner=owner, **kwargs)
if (group_type == 'REPORT'):
group = Report(self.tcex, name, owner=owner, **kwargs)
if (group_type == 'SIGNATURE'):
group = Signature(self.tcex, name, kwargs.pop('file_name', None), kwargs.pop('file_type', None), kwargs.pop('file_text', None), owner=owner, **kwargs)
if (group_type == 'THREAT'):
group = Threat(self.tcex, name, owner=owner, **kwargs)
if (group_type == 'TASK'):
group = Task(self.tcex, name, kwargs.pop('status', 'Not Started'), kwargs.pop('due_date', None), kwargs.pop('reminder_date', None), kwargs.pop('escalation_date', None), owner=owner, **kwargs)
return group
|
Create the Group TI object.
Args:
owner:
group_type:
**kwargs:
Return:
|
codesearchnet
|
def init_feed_dict(self):
return self._init_feed_dict
|
Return the feed dictionary used when evaluating the `init_op`.
Returns:
A feed dictionary or `None`.
|
github-repos
|
def rmse(y, p):
assert y.shape == p.shape
return np.sqrt(mse(y, p))
|
Root Mean Squared Error (RMSE).
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
e (numpy.float64): RMSE
|
juraj-google-style
|
def get_tag_html(tag_id):
tag_data = get_lazy_tag_data(tag_id)
tag = tag_data['tag']
args = tag_data['args']
kwargs = tag_data['kwargs']
(lib, tag_name) = get_lib_and_tag_name(tag)
args_str = ''
if args:
for arg in args:
if isinstance(arg, six.string_types):
args_str += "'{0}' ".format(arg)
else:
args_str += '{0} '.format(arg)
kwargs_str = ''
if kwargs:
for (name, value) in kwargs.items():
if isinstance(value, six.string_types):
kwargs_str += "{0}='{1}' ".format(name, value)
else:
kwargs_str += '{0}={1} '.format(name, value)
html = '{{% load {lib} %}}{{% {tag_name} {args}{kwargs}%}}'.format(lib=lib, tag_name=tag_name, args=args_str, kwargs=kwargs_str)
return html
|
Returns the Django HTML to load the tag library and render the tag.
Args:
tag_id (str): The tag id for the to return the HTML for.
|
codesearchnet
|
def __init__(self, pqc: tf.Tensor, qubits: List[cirq.GridQubit], symbol_names: tf.Tensor, value_layers_inputs: List[Union[tf.Variable, List[tf.Variable]]], value_layers: List[List[tf.keras.layers.Layer]], name: Union[None, str]=None):
super().__init__(name=name)
self._pqc = pqc
self._qubits = sorted(qubits)
self._symbol_names = symbol_names
self._value_layers = value_layers
self._value_layers_inputs = value_layers_inputs
raw_bit_circuit = circuit_utils.bit_circuit(self.qubits)
bit_symbol_names = list(sorted(tfq.util.get_circuit_symbols(raw_bit_circuit)))
self._bit_symbol_names = tf.constant([str(x) for x in bit_symbol_names])
self._bit_circuit = tfq.convert_to_tensor([raw_bit_circuit])
|
Initializes a QuantumCircuit.
Args:
pqc: TFQ string representation of a parameterized quantum circuit.
qubits: The qubits on which `pqc` acts.
symbol_names: Strings which are used to specify the order in which the
values in `self.symbol_values` should be placed inside of the circuit.
value_layers_inputs: Inputs to the `value_layers` argument.
value_layers: The concatenation of the layers in entry `i` yields a
trainable map from `value_layers_inputs[i]` to the `i` entry in the list
of intermediate values. The list of intermediate values is concatenated
to yield the values to substitute into the circuit.
name: Optional name for the model.
|
github-repos
|
def VerifyStructure(self, parser_mediator, lines):
if self._VERIFICATION_REGEX.match(lines):
return True
return False
|
Verifies whether content corresponds to a Zsh extended_history file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if the line was successfully parsed.
|
juraj-google-style
|
def __init__(self, _args):
super(TcExInit, self).__init__(_args)
self.base_url = (
'https:
).format(self.args.branch)
|
Initialize Class properties.
Args:
_args (namespace): The argparser args Namespace.
|
juraj-google-style
|
def _CheckMacOSPaths(self, filename, artifact_definition, source, paths):
result = True
paths_with_private = []
paths_with_symbolic_link_to_private = []
for path in paths:
path_lower = path.lower()
path_segments = path_lower.split(source.separator)
if not path_segments:
logging.warning((
'Empty path defined by artifact definition: {0:s} in file: '
'{1:s}').format(artifact_definition.name, filename))
result = False
elif len(path_segments) == 1:
continue
elif path_segments[1] in self._MACOS_PRIVATE_SUB_PATHS:
paths_with_symbolic_link_to_private.append(path)
elif path_segments[1] == 'private' and len(path_segments) >= 2:
if path_segments[2] in self._MACOS_PRIVATE_SUB_PATHS:
paths_with_private.append(path)
else:
logging.warning((
'Unsupported private path: {0:s} defined by artifact definition: '
'{1:s} in file: {2:s}').format(
path, artifact_definition.name, filename))
result = False
for private_path in paths_with_private:
if private_path[8:] not in paths_with_symbolic_link_to_private:
logging.warning((
'Missing symbolic link: {0:s} for path: {1:s} defined by artifact '
'definition: {2:s} in file: {3:s}').format(
private_path[8:], private_path, artifact_definition.name,
filename))
result = False
for path in paths_with_symbolic_link_to_private:
private_path = '/private{0:s}'.format(path)
if private_path not in paths_with_private:
logging.warning((
'Missing path: {0:s} for symbolic link: {1:s} defined by artifact '
'definition: {2:s} in file: {3:s}').format(
private_path, path, artifact_definition.name, filename))
result = False
return result
|
Checks if the paths are valid MacOS paths.
Args:
filename (str): name of the artifacts definition file.
artifact_definition (ArtifactDefinition): artifact definition.
source (SourceType): source definition.
paths (list[str]): paths to validate.
Returns:
bool: True if the MacOS paths is valid.
|
juraj-google-style
|
def num_samples(self, sr=None):
native_sr = self.sampling_rate
num_samples = units.seconds_to_sample(self.duration, native_sr)
if sr is not None:
ratio = float(sr) / native_sr
num_samples = int(np.ceil(num_samples * ratio))
return num_samples
|
Return the number of samples.
Args:
sr (int): Calculate the number of samples with the given
sampling-rate. If None use the native sampling-rate.
Returns:
int: Number of samples
|
juraj-google-style
|
def np_doc_only(np_fun_name, np_fun=None):
np_fun_name, np_fun = _prepare_np_fun_name_and_fun(np_fun_name, np_fun)
def decorator(f):
f.__doc__ = _np_doc_helper(f, np_fun, np_fun_name=np_fun_name)
return f
return decorator
|
Attachs numpy docstring to a function.
This differs from np_doc in that it doesn't check for a match in signature.
Args:
np_fun_name: name for the np_fun symbol. At least one of np_fun or
np_fun_name shoud be set.
np_fun: (optional) the numpy function whose docstring will be used.
Returns:
A function decorator that attaches the docstring from `np_fun` to the
decorated function.
|
github-repos
|
def expand(self, url):
expand_url = f'{self.api_url}v3/expand'
params = {
'shortUrl': url,
'access_token': self.api_key,
'format': 'txt',
}
response = self._get(expand_url, params=params)
if response.ok:
return response.text.strip()
raise ExpandingErrorException(response.content)
|
Expand implementation for Bit.ly
Args:
url: the URL you want to shorten
Returns:
A string containing the expanded URL
Raises:
ExpandingErrorException: If the API Returns an error as response
|
juraj-google-style
|
def simple_stack(self, opcode=None):
if opcode is not None:
return (frame_state.SimpleFrame(opcode),)
elif self.frame:
return (frame_state.SimpleFrame(self.frame.current_opcode),)
else:
return ()
|
Get a stack of simple frames.
Args:
opcode: Optionally, an opcode to create a stack for.
Returns:
If an opcode is provided, a stack with a single frame at that opcode.
Otherwise, the VM's current stack converted to simple frames.
|
github-repos
|
def json(cls, message):
if (type(message) is OrderedDict):
pprint(dict(message))
else:
pprint(message)
|
Print a nice JSON output
Args:
message: the message to print
|
codesearchnet
|
def sketch_fasta(fasta_path, outdir):
genome_name = genome_name_from_fasta_path(fasta_path)
outpath = os.path.join(outdir, genome_name)
args = ['mash', 'sketch', '-o', outpath, fasta_path]
logging.info('Running Mash sketch with command: %s', ' '.join(args))
p = Popen(args)
p.wait()
sketch_path = outpath + '.msh'
assert os.path.exists(sketch_path), 'Mash sketch for genome {} was not created at {}'.format(
genome_name,
sketch_path)
return sketch_path
|
Create a Mash sketch from an input fasta file
Args:
fasta_path (str): input fasta file path. Genome name in fasta filename
outdir (str): output directory path to write Mash sketch file to
Returns:
str: output Mash sketch file path
|
juraj-google-style
|
def _Aff4Read(aff4_obj, offset, length):
length = (length or (_Aff4Size(aff4_obj) - offset))
aff4_obj.Seek(offset)
return aff4_obj.Read(length)
|
Reads contents of given AFF4 file.
Args:
aff4_obj: An AFF4 stream instance to retrieve contents for.
offset: An offset to start the reading from.
length: A number of bytes to read. Reads the whole file if 0.
Returns:
Contents of specified AFF4 stream.
Raises:
TypeError: If `aff4_obj` is not an instance of AFF4 stream.
|
codesearchnet
|
def build_relative_position(query_layer, key_layer, bucket_size: int=-1, max_position: int=-1):
query_size = query_layer.size(-2)
key_size = key_layer.size(-2)
q_ids = torch.arange(query_size, dtype=torch.long, device=query_layer.device)
k_ids = torch.arange(key_size, dtype=torch.long, device=key_layer.device)
rel_pos_ids = q_ids[:, None] - k_ids[None, :]
if bucket_size > 0 and max_position > 0:
rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
rel_pos_ids = rel_pos_ids.to(torch.long)
rel_pos_ids = rel_pos_ids[:query_size, :]
rel_pos_ids = rel_pos_ids.unsqueeze(0)
return rel_pos_ids
|
Build relative position according to the query and key
We assume the absolute position of query \(P_q\) is range from (0, query_size) and the absolute position of key
\(P_k\) is range from (0, key_size), The relative positions from query to key is \(R_{q \rightarrow k} = P_q -
P_k\)
Args:
query_size (int): the length of query
key_size (int): the length of key
bucket_size (int): the size of position bucket
max_position (int): the maximum allowed absolute position
device (`torch.device`): the device on which tensors will be created.
Return:
`torch.LongTensor`: A tensor with shape [1, query_size, key_size]
|
github-repos
|
def open(self, mode='r', encoding=None):
access_type = self._get_access_type(mode)
if access_type == 't' and encoding is not None and encoding != self.encoded_with:
warnings.warn('Attempting to decode %s as "%s", but encoding is declared as "%s"'
% (self, encoding, self.encoded_with))
if encoding is None:
encoding = self.encoded_with
buffer = io.BytesIO(self._contents)
if access_type == 'b':
return buffer
else:
return io.TextIOWrapper(buffer, encoding=encoding)
|
Return file-like object
Args:
mode (str): access mode (only reading modes are supported)
encoding (str): text decoding method for text access (default: system default)
Returns:
io.BytesIO OR io.TextIOWrapper: buffer accessing the file as bytes or characters
|
juraj-google-style
|
def __init__(self, platform, device):
self._platform = platform
self._device = device
if (self._platform, self._device) not in _context_cache:
context = cl.Context([device])
_context_cache[(self._platform, self._device)] = context
self._context = _context_cache[(self._platform, self._device)]
self._queue = cl.CommandQueue(self._context, device=device)
|
Storage unit for an OpenCL environment.
Args:
platform (pyopencl platform): An PyOpenCL platform.
device (pyopencl device): An PyOpenCL device
|
juraj-google-style
|
def rt_is_equiv_dense(rt):
return math_ops.reduce_all([math_ops.equal(math_ops.reduce_variance(math_ops.cast(row_lens, backend.floatx())), constant_op.constant([0.0])) for row_lens in rt.nested_row_lengths()])
|
Returns true if this RaggedTensor has the same row_lengths across
all ragged dimensions and thus can be converted to a dense tensor
without loss of information.
Args:
rt: RaggedTensor.
|
github-repos
|
def execute_before(self, sensor_graph, scope_stack):
parent = scope_stack[-1]
alloc = parent.allocator
connect_stream = alloc.allocate_stream(DataStream.UnbufferedType, attach=True)
disconnect_stream = alloc.allocate_stream(DataStream.UnbufferedType, attach=True)
latch_stream = alloc.allocate_stream(DataStream.ConstantType, attach=True)
latch_on_stream = alloc.allocate_stream(DataStream.ConstantType, attach=True)
latch_off_stream = alloc.allocate_stream(DataStream.ConstantType, attach=True)
sensor_graph.add_node(u"({} always) => {} using copy_latest_a".format(user_connected, connect_stream))
sensor_graph.add_node(u"({} always) => {} using copy_latest_a".format(user_disconnected, disconnect_stream))
sensor_graph.add_node(u"({} always && {} when value=={}) => {} using copy_latest_a".format(latch_on_stream, connect_stream, self.slot_id.address, latch_stream))
sensor_graph.add_node(u"({} always && {} when value=={}) => {} using copy_latest_a".format(latch_off_stream, disconnect_stream, self.slot_id.address, latch_stream))
sensor_graph.add_constant(latch_on_stream, 1)
sensor_graph.add_constant(latch_off_stream, 0)
sensor_graph.add_constant(latch_stream, 0)
new_scope = GatedClockScope(sensor_graph, scope_stack, (latch_stream, InputTrigger(u'value', u'==', 1)))
new_scope.add_identifier('connect', connect_stream)
new_scope.add_identifier('disconnect', disconnect_stream)
scope_stack.append(new_scope)
|
Execute statement before children are executed.
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources.
|
juraj-google-style
|
def simulate(self, action):
with tf.name_scope("environment/simulate"):
if action.dtype in (tf.float16, tf.float32, tf.float64):
action = tf.check_numerics(action, "action")
def step(action):
step_response = self._batch_env.step(action)
if len(step_response) == 3:
(observ, reward, done) = step_response
else:
(observ, reward, done, _) = step_response
return (observ, reward.astype(np.float32), done)
observ, reward, done = tf.py_func(
step, [action],
[self.observ_dtype, tf.float32, tf.bool], name="step")
reward = tf.check_numerics(reward, "reward")
reward.set_shape((len(self),))
done.set_shape((len(self),))
with tf.control_dependencies([self._observ.assign(observ)]):
return tf.identity(reward), tf.identity(done)
|
Step the batch of environments.
The results of the step can be accessed from the variables defined below.
Args:
action: Tensor holding the batch of actions to apply.
Returns:
Operation.
|
juraj-google-style
|
def rewrite_filters_in_optional_blocks(ir_blocks):
new_ir_blocks = []
optional_context_depth = 0
for block in ir_blocks:
new_block = block
if isinstance(block, CoerceType):
raise AssertionError(u'Found a CoerceType block after all such blocks should have been lowered to Filter blocks: {}'.format(ir_blocks))
elif (isinstance(block, Traverse) and block.optional):
optional_context_depth += 1
elif (isinstance(block, Backtrack) and block.optional):
optional_context_depth -= 1
if (optional_context_depth < 0):
raise AssertionError(u'Reached negative optional context depth for blocks: {}'.format(ir_blocks))
elif (isinstance(block, Filter) and (optional_context_depth > 0)):
null_check = BinaryComposition(u'=', LocalField('@this'), NullLiteral)
new_block = Filter(BinaryComposition(u'||', null_check, block.predicate))
else:
pass
new_ir_blocks.append(new_block)
return new_ir_blocks
|
In optional contexts, add a check for null that allows non-existent optional data through.
Optional traversals in Gremlin represent missing optional data by setting the current vertex
to null until the exit from the optional scope. Therefore, filtering and type coercions
(which should have been lowered into filters by this point) must check for null before
applying their filtering predicates. Since missing optional data isn't filtered,
the new filtering predicate should be "(it == null) || existing_predicate".
Args:
ir_blocks: list of IR blocks to lower into Gremlin-compatible form
Returns:
new list of IR blocks with this lowering step applied
|
codesearchnet
|
def find_slot(self, wanted, slots=None):
for slot in self.find_slots(wanted, slots):
return slot
return None
|
Searches the given slots or, if not given,
active hotbar slot, hotbar, inventory, open window in this order.
Args:
wanted: function(Slot) or Slot or itemID or (itemID, metadata)
Returns:
Optional[Slot]: The first slot containing the item
or None if not found.
|
codesearchnet
|
def find_executable(executable):
logger = logging.getLogger(__name__)
logger.debug("Checking executable '%s'...", executable)
executable_path = _find_executable(executable)
found = (executable_path is not None)
if found:
logger.debug("Executable '%s' found: '%s'", executable, executable_path)
else:
logger.debug("Executable '%s' not found", executable)
return executable_path
|
Finds executable in PATH
Returns:
string or None
|
codesearchnet
|
def build_hpo_term(hpo_info):
try:
hpo_id = hpo_info['hpo_id']
except KeyError:
raise KeyError("Hpo terms has to have a hpo_id")
LOG.debug("Building hpo term %s", hpo_id)
try:
description = hpo_info['description']
except KeyError:
raise KeyError("Hpo terms has to have a description")
hpo_obj = HpoTerm(
hpo_id = hpo_id,
description = description
)
hgnc_ids = hpo_info.get('genes', set())
if hgnc_ids:
hpo_obj['genes'] = list(hgnc_ids)
return hpo_obj
|
Build a hpo_term object
Check that the information is correct and add the correct hgnc ids to the
array of genes.
Args:
hpo_info(dict)
Returns:
hpo_obj(scout.models.HpoTerm): A dictionary with hpo information
|
juraj-google-style
|
def __init__(self, usage=None, data=None):
super(TransactionAttribute, self).__init__()
self.Usage = usage
self.Data = data
|
Create an instance.
Args:
usage (neo.Core.TX.TransactionAttribute.TransactionAttributeUsage):
data (bytes):
|
juraj-google-style
|
def ParseArguments(args):
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
|
Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
|
juraj-google-style
|
def make_timebar(progress=0, duration=0):
duration_string = api_music.duration_to_string(duration)
if duration <= 0:
return "---"
time_counts = int(round((progress / duration) * TIMEBAR_LENGTH))
if time_counts > TIMEBAR_LENGTH:
time_counts = TIMEBAR_LENGTH
if duration > 0:
bar = "│" + (TIMEBAR_PCHAR * time_counts) + (TIMEBAR_ECHAR * (TIMEBAR_LENGTH - time_counts)) + "│"
time_bar = "{} {}".format(bar, duration_string)
else:
time_bar = duration_string
return time_bar
|
Makes a new time bar string
Args:
progress: How far through the current song we are (in seconds)
duration: The duration of the current song (in seconds)
Returns:
timebar (str): The time bar string
|
juraj-google-style
|
def ensure_mingw_drive(win32_path):
(win32_drive, _path) = splitdrive(win32_path)
mingw_drive = ('/' + win32_drive[:(- 1)].lower())
mingw_path = (mingw_drive + _path)
return mingw_path
|
r""" replaces windows drives with mingw style drives
Args:
win32_path (str):
CommandLine:
python -m utool.util_path --test-ensure_mingw_drive
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> win32_path = r'C:/Program Files/Foobar'
>>> result = ensure_mingw_drive(win32_path)
>>> print(result)
/c/Program Files/Foobar
|
codesearchnet
|
def _refresh_grpc(operations_stub, operation_name):
request_pb = operations_pb2.GetOperationRequest(name=operation_name)
return operations_stub.GetOperation(request_pb)
|
Refresh an operation using a gRPC client.
Args:
operations_stub (google.longrunning.operations_pb2.OperationsStub):
The gRPC operations stub.
operation_name (str): The name of the operation.
Returns:
google.longrunning.operations_pb2.Operation: The operation.
|
juraj-google-style
|
def list(self):
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if ('StackSummaries' in response):
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
if (stack_status != 'DELETE_COMPLETE'):
print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName']))
next_token = response.get('NextToken', None)
if next_token:
response = self._cloudFormation.list_stacks(NextToken=next_token)
else:
interested = False
return True
|
List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
|
codesearchnet
|
def convert_tensorflow(nlp: Pipeline, opset: int, output: Path):
if not is_tf_available():
raise Exception('Cannot convert because TF is not installed. Please install tensorflow first.')
print("/!\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\")
try:
import tensorflow as tf
import tf2onnx
from tf2onnx import __version__ as t2ov
print(f'Using framework TensorFlow: {tf.version.VERSION}, tf2onnx: {t2ov}')
input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, 'tf')
nlp.model.predict(tokens.data)
input_signature = [tf.TensorSpec.from_tensor(tensor, name=key) for key, tensor in tokens.items()]
model_proto, _ = tf2onnx.convert.from_keras(nlp.model, input_signature, opset=opset, output_path=output.as_posix())
except ImportError as e:
raise Exception(f'Cannot import {e.name} required to convert TF model to ONNX. Please install {e.name} first. {e}')
|
Export a TensorFlow backed pipeline to ONNX Intermediate Representation (IR)
Args:
nlp: The pipeline to be exported
opset: The actual version of the ONNX operator set to use
output: Path where will be stored the generated ONNX model
Notes: TensorFlow cannot export model bigger than 2GB due to internal constraint from TensorFlow
|
github-repos
|
def parse_content_type(headers: MutableMapping) -> Tuple[Optional[str], str]:
content_type = headers.get("content-type")
if not content_type:
return None, "utf-8"
else:
type_, parameters = cgi.parse_header(content_type)
encoding = parameters.get("charset", "utf-8")
return type_, encoding
|
Find content-type and encoding of the response
Args:
headers: Response headers
Returns:
:py:class:`tuple` (content-type, encoding)
|
juraj-google-style
|
def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.return_dict
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
rngs = {}
if dropout_rng is not None:
rngs['dropout'] = dropout_rng
def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
encode_module = module._get_encoder_module()
return encode_module(input_ids, attention_mask, position_ids, **kwargs)
return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)
|
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration
>>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
>>> encoder_outputs = model.encode(**inputs)
```
|
github-repos
|
def decode(pieces, sequence_length, model_file=None, model_proto=None, reverse=False, name=None):
return _gen_sentencepiece_processor_op.sentencepiece_decode(pieces, sequence_length, model_file=model_file, model_proto=model_proto, reverse=reverse, name=name)
|
Decode pieces into postprocessed text.
Args:
pieces: A 2D int32 or string tensor [batch_size x max_length] of
encoded sequences.
sequence_length: A 1D int32 tensor [batch_size] representing the
length of pieces.
model_file: The sentencepiece model file path.
model_proto: The sentencepiece model serialized proto.
Either `model_file` or `model_proto` must be set.
reverse: Reverses the tokenized sequence (Default = false)
name: The name argument that is passed to the op function.
Returns:
text: A 1D string tensor of decoded string.
|
codesearchnet
|
def send_message(channel_id, message):
channel = client.get_channel(channel_id)
if channel is None:
logger.info("{} is not a channel".format(channel_id))
return
data = datatools.get_data()
if not data["discord"]["servers"][channel.server.id][modulename]["activated"]:
logger.info("This module has been disabled in {} ({})".format(channel.server.name, channel.server.id))
try:
runcoro(client.send_message(channel, message))
except Exception as e:
logger.exception(e)
|
Send a message to a channel
Args:
channel_id (str): The id of the channel to send the message to
message (str): The message to send to the channel
|
juraj-google-style
|
def count_moves_in_game_range(self, game_begin, game_end):
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(game_begin),
ROWCOUNT_PREFIX.format(game_end),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, MOVE_COUNT, MOVE_COUNT))
return sum([int(r.cell_value(METADATA, MOVE_COUNT)) for r in rows])
|
Count the total moves in a game range.
Args:
game_begin: integer, starting game
game_end: integer, ending game
Uses the `ct_` keyspace for rapid move summary.
|
juraj-google-style
|
def format(self, compact: bool=False, verbose: bool=True, root_indent: int=0, **kwargs) -> str:
|
Formats this object into a string representation.
Args:
compact: If True, this object will be formatted into a single line.
verbose: If True, this object will be formatted with verbosity.
Subclasses should define `verbosity` on their own.
root_indent: The start indent level for this object if the output is a
multi-line string.
**kwargs: Subclass specific keyword arguments.
Returns:
A string of formatted object.
|
github-repos
|
def scale(self, width: int, height: int) -> None:
lib.TCOD_image_scale(self.image_c, width, height)
(self.width, self.height) = (width, height)
|
Scale this Image to the new width and height.
Args:
width (int): The new width of the Image after scaling.
height (int): The new height of the Image after scaling.
|
codesearchnet
|
def generate_name(self, name_format=DEFAULT_FILE_NAME_FORMAT):
if (len(self.segments) > 0):
return (self.segments[0].points[0].time.strftime(name_format) + '.gpx')
else:
return 'EmptyTrack'
|
Generates a name for the track
The name is generated based on the date of the first point of the
track, or in case it doesn't exist, "EmptyTrack"
Args:
name_format (str, optional): Name formar to give to the track, based on
its start time. Defaults to DEFAULT_FILE_NAME_FORMAT
Returns:
str
|
codesearchnet
|
def _remove_curly_braces(text):
current_pos = 0
depth = 0
ret = ""
for match in re.finditer("[{}]", text):
if depth == 0:
ret += text[current_pos:match.start()]
depth += 1 if text[match.start()] == "{" else -1
current_pos = match.end()
if depth != 0:
pass
else:
ret += text[current_pos:]
return ret
|
Remove everything in curly braces.
Curly braces may be nested, so we keep track of depth.
Args:
text: a string
Returns:
a string
|
juraj-google-style
|
def dump_migration_session_state(raw):
class BlockStyle(str):
pass
class SessionDumper(yaml.SafeDumper):
pass
def str_block_formatter(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
SessionDumper.add_representer(BlockStyle, str_block_formatter)
raw = deepcopy(raw)
for step in raw:
step['output'] = BlockStyle(step['output'])
step['traceback'] = BlockStyle(step['traceback'])
return yaml.dump(raw, Dumper=SessionDumper)
|
Serialize a migration session state to yaml using nicer formatting
Args:
raw: object to serialize
Returns: string (of yaml)
Specifically, this forces the "output" member of state step dicts (e.g.
state[0]['output']) to use block formatting. For example, rather than this:
- migration: [app, migration_name]
output: "line 1\nline2\nline3"
You get this:
- migration: [app, migration_name]
output: |
line 1
line 2
line 3
|
codesearchnet
|
def find_all(self, kw: YangIdentifier,
pref: YangIdentifier = None) -> List["Statement"]:
return [c for c in self.substatements
if c.keyword == kw and c.prefix == pref]
|
Return the list all substatements with the given keyword and prefix.
Args:
kw: Statement keyword (local part for extensions).
pref: Keyword prefix (``None`` for built-in statements).
|
juraj-google-style
|
def _set_root(self, request):
if request.state_root:
root = request.state_root
else:
head = self._get_chain_head()
root = head.state_root_hash
try:
self._tree.set_merkle_root(root)
except KeyError as e:
LOGGER.debug('Unable to find root "%s" in database', e)
raise _ResponseFailed(self._status.NO_ROOT)
return root
|
Sets the root of the merkle tree, returning any head id used.
Note:
This method will fail if `_tree` has not been set
Args:
request (object): The parsed protobuf request object
Returns:
str: the state root of the head block used to specify the root
Raises:
ResponseFailed: Failed to set the root if the merkle tree
|
codesearchnet
|
def assert_proper_iterable(values):
unintentional_iterables = (tensor_lib.Tensor, sparse_tensor.SparseTensor, np.ndarray) + compat.bytes_or_text_types
if isinstance(values, unintentional_iterables):
raise TypeError('Expected argument "values" to be a "proper" iterable. Found: %s' % type(values))
if not hasattr(values, '__iter__'):
raise TypeError('Expected argument "values" to be iterable. Found: %s' % type(values))
|
Static assert that values is a "proper" iterable.
`Ops` that expect iterables of `Tensor` can call this to validate input.
Useful since `Tensor`, `ndarray`, byte/text type are all iterables themselves.
Args:
values: Object to be checked.
Raises:
TypeError: If `values` is not iterable or is one of
`Tensor`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`.
|
github-repos
|
def create_from_json(cls, json_data):
block = Block()
block_info = json_data["block_info"]
block.block_id = block_info["block_id"]
block.num_bins = block_info["num_bins"] if "num_bins" in block_info else None
block.property_type = block_info["property_type"] if "property_type" in block_info else None
block.meta = json_data["meta"] if "meta" in json_data else None
block.component_results = _create_component_results(json_data, "block_info")
return block
|
Deserialize block json data into a Block object
Args:
json_data (dict): The json data for this block
Returns:
Block object
|
juraj-google-style
|
def complete_acquaintance_strategy(qubit_order: Sequence[ops.Qid], acquaintance_size: int=0) -> circuits.Circuit:
if (acquaintance_size < 0):
raise ValueError('acquaintance_size must be non-negative.')
elif (acquaintance_size == 0):
return circuits.Circuit(device=UnconstrainedAcquaintanceDevice)
if (acquaintance_size > len(qubit_order)):
return circuits.Circuit(device=UnconstrainedAcquaintanceDevice)
if (acquaintance_size == len(qubit_order)):
return circuits.Circuit.from_ops(acquaint(*qubit_order), device=UnconstrainedAcquaintanceDevice)
strategy = circuits.Circuit.from_ops((acquaint(q) for q in qubit_order), device=UnconstrainedAcquaintanceDevice)
for size_to_acquaint in range(2, (acquaintance_size + 1)):
expose_acquaintance_gates(strategy)
replace_acquaintance_with_swap_network(strategy, qubit_order, size_to_acquaint)
return strategy
|
Returns an acquaintance strategy capable of executing a gate corresponding
to any set of at most acquaintance_size qubits.
Args:
qubit_order: The qubits on which the strategy should be defined.
acquaintance_size: The maximum number of qubits to be acted on by
an operation.
Returns:
An circuit capable of implementing any set of k-local
operation.
|
codesearchnet
|
def workflow_stages(self) -> List[WorkflowStage]:
workflow_stages = []
stages = DB.get_hash_value(self.key, 'workflow_stages')
for index in range(len(ast.literal_eval(stages))):
workflow_stages.append(WorkflowStage(self.id, index))
return workflow_stages
|
Return list of workflow stages.
Returns:
dict, resources of a specified pb
|
codesearchnet
|
async def create_artifact(context, path, target_path, content_type, content_encoding, storage_type='s3', expires=None):
payload = {'storageType': storage_type, 'expires': (expires or get_expiration_arrow(context).isoformat()), 'contentType': content_type}
args = [get_task_id(context.claim_task), get_run_id(context.claim_task), target_path, payload]
tc_response = (await context.temp_queue.createArtifact(*args))
skip_auto_headers = [aiohttp.hdrs.CONTENT_TYPE]
loggable_url = get_loggable_url(tc_response['putUrl'])
log.info('uploading {path} to {url}...'.format(path=path, url=loggable_url))
with open(path, 'rb') as fh:
async with async_timeout.timeout(context.config['artifact_upload_timeout']):
async with context.session.put(tc_response['putUrl'], data=fh, headers=_craft_artifact_put_headers(content_type, content_encoding), skip_auto_headers=skip_auto_headers, compress=False) as resp:
log.info('create_artifact {}: {}'.format(path, resp.status))
response_text = (await resp.text())
log.info(response_text)
if (resp.status not in (200, 204)):
raise ScriptWorkerRetryException('Bad status {}'.format(resp.status))
|
Create an artifact and upload it.
This should support s3 and azure out of the box; we'll need some tweaking
if we want to support redirect/error artifacts.
Args:
context (scriptworker.context.Context): the scriptworker context.
path (str): the path of the file to upload.
target_path (str):
content_type (str): Content type (MIME type) of the artifact. Values can be found via
scriptworker.artifacts.guess_content_type_and_encoding()
content_encoding (str): Encoding (per mimetypes' library) of the artifact. None is for no encoding. Values can
be found via scriptworker.artifacts.guess_content_type_and_encoding()
storage_type (str, optional): the taskcluster storage type to use.
Defaults to 's3'
expires (str, optional): datestring of when the artifact expires.
Defaults to None.
Raises:
ScriptWorkerRetryException: on failure.
|
codesearchnet
|
def _get_batches_of_transformed_samples(self, index_array):
raise NotImplementedError
|
Gets a batch of transformed samples.
Args:
index_array: Array of sample indices to include in batch.
Returns:
A batch of transformed samples.
|
github-repos
|
def send(msg_type, send_async=False, *args, **kwargs):
message = message_factory(msg_type, *args, **kwargs)
try:
if send_async:
message.send_async()
else:
message.send()
except MessageSendError as e:
err_exit('Unable to send message: ', e)
|
Constructs a message class and sends the message.
Defaults to sending synchronously. Set send_async=True to send
asynchronously.
Args:
:msg_type: (str) the type of message to send, i.e. 'Email'
:send_async: (bool) default is False, set True to send asynchronously.
:kwargs: (dict) keywords arguments that are required for the
various message types. See docstrings for each type.
i.e. help(messages.Email), help(messages.Twilio), etc.
Example:
>>> kwargs = {
from_: 'me@here.com',
to: 'you@there.com',
auth: 'yourPassword',
subject: 'Email Subject',
body: 'Your message to send',
attachments: ['filepath1', 'filepath2'],
}
>>> messages.send('email', **kwargs)
Message sent...
|
codesearchnet
|
def create_model(text_in, timesteps, phase):
with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=1e-05):
with tf.device('/cpu:0'):
embedded = text_in.embedding_lookup(CHARS, [EMBEDDING_SIZE])
lstm = embedded.cleave_sequence(timesteps).sequence_lstm(LOWER).sequence_lstm(UPPER)
return lstm.squash_sequence().dropout(keep_prob=0.8, phase=phase).fully_connected(CHARS, activation_fn=None)
|
Creates a 2 layer LSTM model with dropout.
Args:
text_in: The input text as ASCII ordinals in a Tensor.
timesteps: The number of timesteps in the sequence.
phase: Phase controls whether or not dropout is active. In training mode
we want to perform dropout, but in test we want to disable it.
Returns:
The logits.
|
codesearchnet
|
def update(self, resource, id_or_uri):
return self._client.update(resource=resource, uri=id_or_uri)
|
Updates a registered Device Manager.
Args:
resource (dict): Object to update.
id_or_uri: Can be either the Device manager ID or URI.
Returns:
dict: The device manager resource.
|
codesearchnet
|
def get_hostname(url):
if (url not in URLHelper.__cache):
URLHelper.__cache[url] = urlparse(url)
parts = URLHelper.__cache[url].netloc.split('.')
if (len(parts) == 1):
return parts[0]
else:
return '.'.join(parts[(- 2):(- 1)])
|
Get the hostname of the given URL.
Args:
url (str): The URL to get the hostname from.
Returns:
str: The hostname
|
codesearchnet
|
def __batch_update(self, train_events, test_events, n_epoch):
for epoch in range(n_epoch):
if n_epoch != 1:
np.random.shuffle(train_events)
for e in train_events:
self.rec.update(e, batch_train=True)
MPR = self.__batch_evaluate(test_events)
if self.debug:
logger.debug('epoch %2d: MPR = %f' % (epoch + 1, MPR))
|
Batch update called by the fitting method.
Args:
train_events (list of Event): Positive training events.
test_events (list of Event): Test events.
n_epoch (int): Number of epochs for the batch training.
|
juraj-google-style
|
def similar(self, **kwargs):
path = self._get_id_path('similar')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Get the similar TV series for a specific TV series id.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any TV method.
Returns:
A dict respresentation of the JSON returned from the API.
|
codesearchnet
|
def check_valid_cpc_status(method, uri, cpc):
status = cpc.properties.get('status', None)
if (status is None):
return
valid_statuses = ['active', 'service-required', 'degraded', 'exceptions']
if (status not in valid_statuses):
if uri.startswith(cpc.uri):
raise ConflictError(method, uri, reason=1, message='The operation cannot be performed because the targeted CPC {} has a status that is not valid for the operation: {}'.format(cpc.name, status))
else:
raise ConflictError(method, uri, reason=6, message='The operation cannot be performed because CPC {} hosting the targeted resource has a status that is not valid for the operation: {}'.format(cpc.name, status))
|
Check that the CPC is in a valid status, as indicated by its 'status'
property.
If the Cpc object does not have a 'status' property set, this function does
nothing (in order to make the mock support easy to use).
Raises:
ConflictError with reason 1: The CPC itself has been targeted by the
operation.
ConflictError with reason 6: The CPC is hosting the resource targeted by
the operation.
|
codesearchnet
|
def _get_arg_parser(func, types, args_and_defaults, delimiter_chars):
_LOG.debug("Creating ArgumentParser for '%s'", func.__name__)
(description, arg_help) = _prepare_doc(
func, [x for (x, _) in args_and_defaults], delimiter_chars)
parser = argparse.ArgumentParser(description=description)
for ((arg, default), arg_type) in zip_longest(args_and_defaults, types):
help_msg = arg_help[arg]
if default is NoDefault:
arg_type = arg_type or identity_type
if arg_type == bool:
_LOG.debug("Adding optional flag %s.%s", func.__name__, arg)
parser.add_argument("--%s" % arg, default=True, required=False,
action="store_false",
help="%s. Defaults to True if not specified"
% help_msg)
else:
_LOG.debug("Adding positional argument %s.%s", func.__name__,
arg)
parser.add_argument(arg, help=help_msg, type=arg_type)
else:
if default is None and arg_type is None:
raise ParseThisError("To use default value of 'None' you need "
"to specify the type of the argument '{}' "
"for the method '{}'"
.format(arg, func.__name__))
arg_type = arg_type or type(default)
if arg_type == bool:
action = "store_false" if default else "store_true"
_LOG.debug("Adding optional flag %s.%s", func.__name__, arg)
parser.add_argument("--%s" % arg, help=help_msg,
default=default, action=action)
else:
_LOG.debug(
"Adding optional argument %s.%s", func.__name__, arg)
parser.add_argument("--%s" % arg, help=help_msg,
default=default, type=arg_type)
return parser
|
Return an ArgumentParser for the given function. Arguments are defined
from the function arguments and their associated defaults.
Args:
func: function for which we want an ArgumentParser
types: types to which the command line arguments should be converted to
args_and_defaults: list of 2-tuples (arg_name, arg_default)
delimiter_chars: characters used to separate the parameters from their
help message in the docstring
|
juraj-google-style
|
def coarse_grain(G, ncg):
if ncg <= 1:
return G
G = numpy.asarray(G)
nbin, remainder = divmod(G.shape[-1], ncg)
if remainder != 0:
nbin += 1
return numpy.transpose([
numpy.sum(G[..., i:i+ncg], axis=-1) / G[..., i:i+ncg].shape[-1]
for i in numpy.arange(0, ncg * nbin, ncg)
])
|
Coarse-grain last index of array ``G``.
Bin the last index of array ``G`` in bins of width ``ncg``, and
replace each bin by its average. Return the binned results.
Args:
G: Array to be coarse-grained.
ncg: Bin width for coarse-graining.
|
juraj-google-style
|
def parse_flux_bounds(entry):
lower_bound = None
upper_bound = None
for parameter in entry.kinetic_law_reaction_parameters:
pid, name, value, units = parameter
if pid == 'UPPER_BOUND' or name == 'UPPER_BOUND':
upper_bound = value
elif pid == 'LOWER_BOUND' or name == 'LOWER_BOUND':
lower_bound = value
return lower_bound, upper_bound
|
Return flux bounds for reaction entry.
Detect flux bounds that are specified using the non-standardized
kinetic law parameters which are used by many pre-FBC SBML models. The
flux bounds are returned as a pair of lower, upper bounds. The returned
bound is None if undefined.
Args:
entry: :class:`SBMLReactionEntry`.
|
juraj-google-style
|
def _probe_characteristics_finished(self, result):
handle = result['context']['handle']
conn_id = result['context']['connection_id']
conndata = self._get_connection(handle, 'preparing')
if (conndata is None):
self._logger.info('Connection disconnected before probe_char... finished, conn_id=%d', conn_id)
return
callback = conndata['callback']
if (result['result'] is False):
conndata['failed'] = True
conndata['failure_reason'] = 'Could not probe GATT characteristics'
self.disconnect_async(conn_id, self._on_connection_failed)
return
services = result['return_value']['services']
if (TileBusService not in services):
conndata['failed'] = True
conndata['failure_reason'] = 'TileBus service not present in GATT services'
self.disconnect_async(conn_id, self._on_connection_failed)
return
conndata['chars_done_time'] = time.time()
service_time = (conndata['services_done_time'] - conndata['connect_time'])
char_time = (conndata['chars_done_time'] - conndata['services_done_time'])
total_time = (service_time + char_time)
conndata['state'] = 'connected'
conndata['services'] = services
conndata['parser'] = IOTileReportParser(report_callback=self._on_report, error_callback=self._on_report_error)
conndata['parser'].context = conn_id
del conndata['disconnect_handler']
with self.count_lock:
self.connecting_count -= 1
self._logger.info('Total time to connect to device: %.3f (%.3f enumerating services, %.3f enumerating chars)', total_time, service_time, char_time)
callback(conndata['connection_id'], self.id, True, None)
|
Callback when BLE adapter has finished probing services and characteristics for a device
Args:
result (dict): Result from the probe_characteristics command
|
codesearchnet
|
def process_response(self, req, resp, resource):
if isinstance(resp.body, dict):
try:
resp.body = json.dumps(resp.body)
except(nameError):
resp.status = falcon.HTTP_500
|
Post-processing of the response (after routing).
Args:
req: Request object.
resp: Response object.
resource: Resource object to which the request was
routed. May be None if no route was found
for the request.
|
juraj-google-style
|
def parse_individual(sample):
ind_info = {}
if ('sample_id' not in sample):
raise PedigreeError("One sample is missing 'sample_id'")
sample_id = sample['sample_id']
if ('sex' not in sample):
raise PedigreeError(("Sample %s is missing 'sex'" % sample_id))
sex = sample['sex']
if (sex not in REV_SEX_MAP):
log.warning("'sex' is only allowed to have values from {}".format(', '.join(list(REV_SEX_MAP.keys()))))
raise PedigreeError(('Individual %s has wrong formated sex' % sample_id))
if ('phenotype' not in sample):
raise PedigreeError(("Sample %s is missing 'phenotype'" % sample_id))
phenotype = sample['phenotype']
if (phenotype not in REV_PHENOTYPE_MAP):
log.warning("'phenotype' is only allowed to have values from {}".format(', '.join(list(REV_PHENOTYPE_MAP.keys()))))
raise PedigreeError(('Individual %s has wrong formated phenotype' % sample_id))
ind_info['individual_id'] = sample_id
ind_info['display_name'] = sample.get('sample_name', sample['sample_id'])
ind_info['sex'] = sex
ind_info['phenotype'] = phenotype
ind_info['father'] = sample.get('father')
ind_info['mother'] = sample.get('mother')
ind_info['confirmed_parent'] = sample.get('confirmed_parent')
ind_info['confirmed_sex'] = sample.get('confirmed_sex')
ind_info['predicted_ancestry'] = sample.get('predicted_ancestry')
bam_file = sample.get('bam_path')
if bam_file:
ind_info['bam_file'] = bam_file
mt_bam = sample.get('mt_bam')
if mt_bam:
ind_info['mt_bam'] = mt_bam
analysis_type = sample.get('analysis_type')
if analysis_type:
ind_info['analysis_type'] = analysis_type
ind_info['capture_kits'] = ([sample.get('capture_kit')] if ('capture_kit' in sample) else [])
vcf2cytosure = sample.get('vcf2cytosure')
if vcf2cytosure:
ind_info['vcf2cytosure'] = vcf2cytosure
tumor_type = sample.get('tumor_type')
if tumor_type:
ind_info['tumor_type'] = tumor_type
tumor_mutational_burden = sample.get('tmb')
if tumor_mutational_burden:
ind_info['tmb'] = tumor_mutational_burden
msi = sample.get('msi')
if msi:
ind_info['msi'] = msi
tumor_purity = sample.get('tumor_purity')
if tumor_purity:
ind_info['tumor_purity'] = tumor_purity
return ind_info
|
Parse individual information
Args:
sample (dict)
Returns:
{
'individual_id': str,
'father': str,
'mother': str,
'display_name': str,
'sex': str,
'phenotype': str,
'bam_file': str,
'vcf2cytosure': str,
'analysis_type': str,
'capture_kits': list(str),
}
|
codesearchnet
|
def get_or_generate_vocabulary(data_dir, tmp_dir, data_prefix, max_page_size_exp, approx_vocab_size=32768, strip=True):
num_pages_for_vocab_generation = (approx_vocab_size
vocab_file = vocab_filename(approx_vocab_size, strip)
def my_generator(data_prefix):
'Line generator for vocab.'
count = 0
for page in corpus_page_generator(all_corpus_files(data_prefix)[::(- 1)], tmp_dir, max_page_size_exp):
revisions = page['revisions']
if revisions:
text = get_text(revisions[(- 1)], strip=strip)
(yield text)
count += 1
if ((count % 100) == 0):
tf.logging.info(('reading pages for vocab %d' % count))
if (count > num_pages_for_vocab_generation):
break
return generator_utils.get_or_generate_vocab_inner(data_dir, vocab_file, approx_vocab_size, my_generator(data_prefix))
|
Get or generate the vocabulary.
Args:
data_dir: a string
tmp_dir: a string
data_prefix: a string
max_page_size_exp: an integer
approx_vocab_size: an integer
strip: a boolean
Returns:
a TextEncoder
|
codesearchnet
|
def _format_src_url(self, path, caller_system):
path = ('%s/%s' % (self._endpoint, self.relpath(path)))
if (caller_system is not self):
try:
path = ('%s?%s' % (path, self._storage_parameters['sas_token']))
except KeyError:
pass
return path
|
Ensure path is absolute and use the correct URL format for use with
cross Azure storage account copy function.
Args:
path (str): Path or URL.
caller_system (pycosio.storage.azure._AzureBaseSystem subclass):
System calling this method (Can be another Azure system).
Returns:
str: URL.
|
codesearchnet
|
def HandleSimpleResponses(
self, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
return self._AcceptResponses(b'OKAY', info_cb, timeout_ms=timeout_ms)
|
Accepts normal responses from the device.
Args:
timeout_ms: Timeout in milliseconds to wait for each response.
info_cb: Optional callback for text sent from the bootloader.
Returns:
OKAY packet's message.
|
juraj-google-style
|
def check_array_lengths(inputs, targets, weights=None):
def is_tensor_or_composite_tensor(x):
return tensor_util.is_tf_type(x) or is_composite_or_composite_value(x)
def set_of_lengths(x):
if x is None:
return {}
else:
return set([y.shape[0] for y in x if y is not None and (not is_tensor_or_composite_tensor(y))])
set_x = set_of_lengths(inputs)
set_y = set_of_lengths(targets)
set_w = set_of_lengths(weights)
if len(set_x) > 1:
raise ValueError('All input arrays (x) should have the same number of samples. Got array shapes: ' + str([x.shape for x in inputs]))
if len(set_y) > 1:
raise ValueError('All target arrays (y) should have the same number of samples. Got array shapes: ' + str([y.shape for y in targets]))
if set_x and set_y and (list(set_x)[0] != list(set_y)[0]):
raise ValueError('Input arrays should have the same number of samples as target arrays. Found ' + str(list(set_x)[0]) + ' input samples and ' + str(list(set_y)[0]) + ' target samples.')
if len(set_w) > 1:
raise ValueError('All sample_weight arrays should have the same number of samples. Got array shapes: ' + str([w.shape for w in weights]))
if set_y and set_w and (list(set_y)[0] != list(set_w)[0]):
raise ValueError('Sample_weight arrays should have the same number of samples as target arrays. Got ' + str(list(set_y)[0]) + ' input samples and ' + str(list(set_w)[0]) + ' target samples.')
|
Does user input validation for numpy arrays.
Args:
inputs: list of Numpy arrays of inputs.
targets: list of Numpy arrays of targets.
weights: list of Numpy arrays of sample weights.
Raises:
ValueError: in case of incorrectly formatted data.
|
github-repos
|
def users(self, proc):
ret = {}
if (self.first_column in ['USER', 'UID']):
for row in self.data:
if (proc == row[self.command_name]):
if (row[self.first_column] not in ret):
ret[row[self.first_column]] = []
ret[row[self.first_column]].append(row['PID'])
return ret
|
Searches for all users running a given command.
Returns:
dict: each username as a key to a list of PIDs (as strings) that
are running the given process.
``{}`` if neither ``USER`` nor ``UID`` is found or ``proc`` is not found.
.. note::
'proc' must match the entire command and arguments.
|
codesearchnet
|
def check_channel(fcn):
def wrapper(*args, **kwargs):
if (not isinstance(args[1], ChannelResource)):
raise RuntimeError('resource must be an instance of intern.resource.boss.ChannelResource.')
if (not args[1].cutout_ready):
raise PartialChannelResourceError('ChannelResource not fully initialized. Use intern.remote.BossRemote.get_channel({}, {}, {})'.format(args[1].name, args[1].coll_name, args[1].exp_name))
return fcn(*args, **kwargs)
return wrapper
|
Decorator that ensures a valid channel passed in.
Args:
fcn (function): Function that has a ChannelResource as its second argument.
Returns:
(function): Wraps given function with one that checks for a valid channel.
|
codesearchnet
|
def bessel_i0(x, name=None):
with ops.name_scope(name, 'bessel_i0', [x]):
return gen_special_math_ops.bessel_i0(x)
|
Computes the Bessel i0 function of `x` element-wise.
Modified Bessel function of order 0.
It is preferable to use the numerically stabler function `i0e(x)` instead.
>>> tf.math.special.bessel_i0([-1., -0.5, 0.5, 1.]).numpy()
array([1.26606588, 1.06348337, 1.06348337, 1.26606588], dtype=float32)
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(scipy)
Equivalent to scipy.special.i0
@end_compatibility
|
github-repos
|
def differential(P, Q):
P, Q = Poly(P), Poly(Q)
if not chaospy.poly.is_decomposed(Q):
differential(chaospy.poly.decompose(Q)).sum(0)
if Q.shape:
return Poly([differential(P, q) for q in Q])
if Q.dim>P.dim:
P = chaospy.poly.setdim(P, Q.dim)
else:
Q = chaospy.poly.setdim(Q, P.dim)
qkey = Q.keys[0]
A = {}
for key in P.keys:
newkey = numpy.array(key) - numpy.array(qkey)
if numpy.any(newkey<0):
continue
A[tuple(newkey)] = P.A[key]*numpy.prod([fac(key[i], \
exact=True)/fac(newkey[i], exact=True) \
for i in range(P.dim)])
return Poly(B, P.dim, P.shape, P.dtype)
|
Polynomial differential operator.
Args:
P (Poly):
Polynomial to be differentiated.
Q (Poly):
Polynomial to differentiate by. Must be decomposed. If polynomial
array, the output is the Jacobian matrix.
|
juraj-google-style
|
def _extract_namespace_ast_node(self, desc):
if ((len(desc) == 0) or (not isinstance(desc[0], AstNamespace))):
if self._debug:
self._logger.info('Description: %r', desc)
raise InvalidSpec('First declaration in a stone must be a namespace. Possibly caused by preceding errors.', desc[0].lineno, desc[0].path)
for item in desc[1:]:
if isinstance(item, AstNamespace):
raise InvalidSpec('Only one namespace declaration per file.', item[0].lineno, item[0].path)
return desc.pop(0)
|
Checks that the namespace is declared first in the spec, and that only
one namespace is declared.
Args:
desc (List[stone.stone.parser.ASTNode]): All AST nodes in a spec
file in the order they were defined.
Return:
stone.frontend.ast.AstNamespace: The namespace AST node.
|
codesearchnet
|
def modify_lattice(self, new_lattice):
self._lattice = new_lattice
for site in self._sites:
site.lattice = new_lattice
|
Modify the lattice of the structure. Mainly used for changing the
basis.
Args:
new_lattice (Lattice): New lattice
|
codesearchnet
|
def FindEnumTypeByName(self, full_name):
full_name = _NormalizeFullyQualifiedName(full_name)
if (full_name not in self._enum_descriptors):
self.FindFileContainingSymbol(full_name)
return self._enum_descriptors[full_name]
|
Loads the named enum descriptor from the pool.
Args:
full_name: The full name of the enum descriptor to load.
Returns:
The enum descriptor for the named type.
|
codesearchnet
|
def run_board(args):
init_config(args)
from backend.collector import CollectorService
service = CollectorService(args.logdir, args.reload_interval, standalone=False, log_level=args.log_level)
service.run()
logger.info(('Try to start automlboard on port %s\n' % args.port))
command = [os.path.join(root_path, 'manage.py'), 'runserver', ('0.0.0.0:%s' % args.port), '--noreload']
execute_from_command_line(command)
|
Run main entry for AutoMLBoard.
Args:
args: args parsed from command line
|
codesearchnet
|
async def game(self, short_name, *, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None):
result = types.InputBotInlineResultGame(id=(id or ''), short_name=short_name, send_message=(await self._message(text=text, parse_mode=parse_mode, link_preview=link_preview, geo=geo, period=period, contact=contact, game=game, buttons=buttons)))
if (id is None):
result.id = hashlib.sha256(bytes(result)).hexdigest()
return result
|
Creates a new inline result of game type.
Args:
short_name (`str`):
The short name of the game to use.
|
codesearchnet
|
def save_array_types(self, fname):
type_defs = {'arrays': sorted(list(self.array_types))}
with open(fname, 'wt') as fh:
pprint(type_defs, stream=fh)
|
Save array type registry to a file
Args:
fname (str): Name of file to save array database to
|
juraj-google-style
|
def encode_bqm_as_qp(solver, linear, quadratic):
active = active_qubits(linear, quadratic)
nan = float('nan')
lin = [uniform_get(linear, qubit, 0 if qubit in active else nan)
for qubit in solver._encoding_qubits]
lin = base64.b64encode(struct.pack('<' + ('d' * len(lin)), *lin))
quad = [quadratic.get((q1,q2), 0) + quadratic.get((q2,q1), 0)
for (q1,q2) in solver._encoding_couplers
if q1 in active and q2 in active]
quad = base64.b64encode(struct.pack('<' + ('d' * len(quad)), *quad))
return {
'format': 'qp',
'lin': lin.decode('utf-8'),
'quad': quad.decode('utf-8')
}
|
Encode the binary quadratic problem for submission to a given solver,
using the `qp` format for data.
Args:
solver (:class:`dwave.cloud.solver.Solver`):
The solver used.
linear (dict[variable, bias]/list[variable, bias]):
Linear terms of the model.
quadratic (dict[(variable, variable), bias]):
Quadratic terms of the model.
Returns:
encoded submission dictionary
|
juraj-google-style
|
def unbroadcast_tfe_to(tensor, shape):
axis = utils.create_unbroadcast_axis(shape, shape_as_list(tensor))
return tf.reshape(tf.reduce_sum(tensor, axis=axis), shape)
|
Reverse the broadcasting operation.
See utils.py.
Args:
tensor: A Tensor.
shape: A shape that could have been broadcasted to the shape of tensor.
Returns:
Tensor with dimensions summed to match `shape`.
|
juraj-google-style
|
def id_to_piece(input, model_file=None, model_proto=None, name=None):
return _gen_sentencepiece_processor_op.sentencepiece_id_to_piece(input, model_file=model_file, model_proto=model_proto, name=name)
|
Converts vocabulary id into piece.
Args:
input: An arbitrary tensor of int32.
model_file: The sentencepiece model file path.
model_proto: The sentencepiece model serialized proto.
Either `model_file` or `model_proto` must be set.
name: The name argument that is passed to the op function.
Returns:
A tensor of string with the same shape as input.
|
codesearchnet
|
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(RevokeResponsePayload, self).read(
istream,
kmip_version=kmip_version
)
tstream = BytearrayStream(istream.read(self.length))
self.unique_identifier = attributes.UniqueIdentifier()
self.unique_identifier.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
self.validate()
|
Read the data encoding the RevokeResponsePayload object and decode it
into its constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
|
juraj-google-style
|
def variant_export_lines(store, case_obj, variants_query):
export_variants = []
for variant in variants_query:
variant_line = []
position = variant['position']
change = variant['reference']+'>'+variant['alternative']
variant_line.append(variant['rank_score'])
variant_line.append(variant['chromosome'])
variant_line.append(position)
variant_line.append(change)
variant_line.append('_'.join([str(position), change]))
gene_list = variant.get('genes')
gene_ids = []
gene_names = []
hgvs_c = []
if len(gene_list) > 0:
for gene_obj in gene_list:
hgnc_id = gene_obj['hgnc_id']
gene_name = gene(store, hgnc_id)['symbol']
gene_ids.append(hgnc_id)
gene_names.append(gene_name)
hgvs_nucleotide = '-'
transcripts_list = gene_obj.get('transcripts')
for transcript_obj in transcripts_list:
if transcript_obj.get('is_canonical') and transcript_obj.get('is_canonical') is True:
hgvs_nucleotide = str(transcript_obj.get('coding_sequence_name'))
hgvs_c.append(hgvs_nucleotide)
variant_line.append(';'.join( str(x) for x in gene_ids))
variant_line.append(';'.join( str(x) for x in gene_names))
variant_line.append(';'.join( str(x) for x in hgvs_c))
else:
while i < 4:
variant_line.append('-')
i = i+1
variant_gts = variant['samples']
for individual in case_obj['individuals']:
for variant_gt in variant_gts:
if individual['individual_id'] == variant_gt['sample_id']:
variant_line.append(variant_gt['allele_depths'][0])
variant_line.append(variant_gt['allele_depths'][1])
variant_line.append(variant_gt['genotype_quality'])
variant_line = [str(i) for i in variant_line]
export_variants.append(",".join(variant_line))
return export_variants
|
Get variants info to be exported to file, one list (line) per variant.
Args:
store(scout.adapter.MongoAdapter)
case_obj(scout.models.Case)
variants_query: a list of variant objects, each one is a dictionary
Returns:
export_variants: a list of strings. Each string of the list corresponding to the fields
of a variant to be exported to file, separated by comma
|
juraj-google-style
|
def save(f, arr, vocab):
itr = iter(vocab)
(word, idx) = next(itr)
_write_line(f, arr[idx], word)
for (word, idx) in itr:
f.write(b'\n')
_write_line(f, arr[idx], word)
|
Save word embedding file.
Args:
f (File): File to write the vectors. File should be open for writing
ascii.
arr (numpy.array): Numpy array with ``float`` dtype.
vocab (iterable): Each element is pair of a word (``bytes``) and ``arr``
index (``int``). Word should be encoded to str apriori.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.