code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def add_to_query(self, query):
self.handle = win32pdh.AddCounter(query, self.path)
|
Add the current path to the query
Args:
query (obj):
The handle to the query to add the counter
|
juraj-google-style
|
def search(self, filters):
records = self.__model__.search(self.__five9__, filters)
return self.__class__(
self.__five9__, self.__model__, records,
)
|
Search Five9 given a filter.
Args:
filters (dict): A dictionary of search strings, keyed by the name
of the field to search.
Returns:
Environment: An environment representing the recordset.
|
juraj-google-style
|
def start_apppool(name):
ps_cmd = ['Start-WebAppPool', r"'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
return cmd_ret['retcode'] == 0
|
Start an IIS application pool.
.. versionadded:: 2017.7.0
Args:
name (str): The name of the App Pool to start.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.start_apppool name='MyTestPool'
|
juraj-google-style
|
def get_pull_request_number(task, source_env_prefix):
pull_request = _extract_from_env_in_payload(task, (source_env_prefix + '_PULL_REQUEST_NUMBER'))
if (pull_request is not None):
pull_request = int(pull_request)
return pull_request
|
Get what Github pull request created the graph.
Args:
obj (ChainOfTrust or LinkOfTrust): the trust object to inspect
source_env_prefix (str): The environment variable prefix that is used
to get repository information.
Returns:
int: the pull request number.
None: if not defined for this task.
|
codesearchnet
|
def info(self, **kwargs):
path = self._get_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Get the basic movie information for a specific movie id.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any movie method.
Returns:
A dict representation of the JSON returned from the API.
|
codesearchnet
|
def PureMultiHeadedAttention(x, params, num_heads=8, dropout=0.0, mode='train', **kwargs):
del params
rng = kwargs.get('rng', None)
((q, k, v), mask) = x
feature_depth = q.shape[(- 1)]
assert ((feature_depth % num_heads) == 0)
head_depth = (feature_depth
nbatch = np.shape(q)[0]
def SplitHeads(x):
return np.transpose(np.reshape(x, (nbatch, (- 1), num_heads, head_depth)), (0, 2, 1, 3))
def JoinHeads(x):
return np.reshape(np.transpose(x, (0, 2, 1, 3)), (nbatch, (- 1), (num_heads * head_depth)))
return JoinHeads(DotProductAttention(SplitHeads(q), SplitHeads(k), SplitHeads(v), mask, dropout=dropout, mode=mode, rng=rng))
|
Pure transformer-style multi-headed attention.
Args:
x: inputs ((q, k, v), mask)
params: parameters (none)
num_heads: int: number of attention heads
dropout: float: dropout rate
mode: str: 'train' or 'eval'
**kwargs: other arguments including the rng
Returns:
Pure Multi-headed attention layer (no Dense transforms on input).
|
codesearchnet
|
def controlled_by(self, *control_qubits: Qid) -> 'Gate':
from cirq.ops import ControlledGate
return ControlledGate(self, control_qubits, (len(control_qubits) if (control_qubits is not None) else 1))
|
Returns a controlled version of this gate.
Args:
control_qubits: Optional qubits to control the gate by.
|
codesearchnet
|
def to_diff_dict(self) -> Dict[str, Any]:
config_dict = self.to_dict()
default_config_dict = GenerationConfig().to_dict()
serializable_config_dict = {}
for key, value in config_dict.items():
if key not in default_config_dict or key == 'transformers_version' or value != default_config_dict[key]:
serializable_config_dict[key] = value
self.dict_torch_dtype_to_str(serializable_config_dict)
return serializable_config_dict
|
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
|
github-repos
|
def fetcher(date=datetime.today(), url_pattern=URL_PATTERN):
api_url = url_pattern % date.strftime('%Y-%m-%d')
headers = {'Referer': 'http:
raw_result = requests.get(api_url, headers=headers).json()
return raw_result
|
Fetch json data from n.pl
Args:
date (date) - default today
url_patter (string) - default URL_PATTERN
Returns:
dict - data from api
|
juraj-google-style
|
def query(self,
sparql,
mode="get",
namespace=None,
rtn_format="json",
**kwargs):
namespace = pick(namespace, self.namespace)
if kwargs.get("log_level"):
log.setLevel(kwargs['log_level'])
if kwargs.get("debug"):
log.setLevel(logging.DEBUG)
if rtn_format not in self.qry_formats:
raise KeyError("rtn_format was '%s'. Allowed values are %s" % \
(rtn_format, self.qry_results_formats))
url = self._make_url(namespace)
if 'prefix' not in sparql.lower():
sparql = "%s\n%s" % (NSM.prefix(), sparql)
if mode == "get":
data = {"query": sparql}
elif mode == "update":
data = {"update": sparql}
else:
raise NotImplementedError("'mode' != to ['get', 'update']")
headers = {'Accept': self.qry_formats[rtn_format]}
start = datetime.datetime.now()
try:
result = requests.post(url, data=data, headers=headers)
except requests.exceptions.ConnectionError:
result = requests.post(self._make_url(namespace, self.local_url),
data=data,
headers=headers)
log.debug(format_multiline(["",
"url='{url}'",
,
"**** SPAQRL QUERY ****",
"",
"{sparql}",
"Query Time: {q_time}"],
url=url,
mode=mode,
namespace=namespace,
rtn_format=rtn_format,
sparql=sparql,
q_time=(datetime.datetime.now()-start),
**kwargs))
if result.status_code == 200:
try:
if rtn_format == "json":
bindings = result.json().get('results',
{}).get('bindings', [])
elif rtn_format == 'xml':
xml_doc = etree.XML(result.text)
bindings = xml_doc.findall("results/bindings")
else:
bindings = result.text
try:
log.debug("result count: %s", len(bindings))
except TypeError:
pass
return bindings
except json.decoder.JSONDecodeError:
if mode == 'update':
return BeautifulSoup(result.text, 'lxml').get_text()
return result.text
else:
raise SyntaxError("%s\n\n%s\n\n%s" % (sparql,
add_sparql_line_nums(sparql),
result.text[result.text.find("java."):]))
|
Runs a sparql query and returns the results
Args:
-----
sparql: the sparql query to run
namespace: the namespace to run the sparql query against
mode: ['get'(default), 'update'] the type of sparql query
rtn_format: ['json'(default), 'xml'] format of query results
Kwargs:
-------
debug(bool): If True sets logging level to debug
|
juraj-google-style
|
def _sample(self, initial_states: tf.Tensor, counts: tf.Tensor):
circuits = self.circuit(initial_states)
num_circuits = tf.shape(circuits)[0]
tiled_values = tf.tile(tf.expand_dims(self.circuit.symbol_values, 0), [num_circuits, 1])
num_samples_mask = tf.cast((tf.ragged.range(counts) + 1).to_tensor(), tf.bool)
num_samples_mask = tf.map_fn(tf.random.shuffle, num_samples_mask)
samples = self._sample_layer(circuits, symbol_names=self.circuit.symbol_names, symbol_values=tiled_values, repetitions=tf.expand_dims(tf.math.reduce_max(counts), 0))
return tf.ragged.boolean_mask(samples, num_samples_mask)
|
Returns bitstring samples from the QNN.
Args:
initial_states: Shape [batch_size, num_qubits] of dtype `tf.int8`.
These are the initial states of each qubit in the circuit.
counts: Shape [batch_size] of dtype `tf.int32` such that `counts[i]` is
the number of samples to draw from `(qnn)|initial_states[i]>`.
Returns:
ragged_samples: `tf.RaggedTensor` of DType `tf.int8` structured such
that `ragged_samples[i]` contains `counts[i]` bitstrings drawn from
`(qnn)|initial_states[i]>`.
|
github-repos
|
def __init__(self, identifier, value):
super(VolumeAttribute, self).__init__()
self.identifier = identifier
self.value = value
|
Initializes the volume attribute object.
Args:
identifier (str): identifier of the attribute within the volume.
value (object): value of the attribute.
|
juraj-google-style
|
def concatenate(samplesets, defaults=None):
itertup = iter(samplesets)
try:
first = next(itertup)
except StopIteration:
raise ValueError('samplesets must contain at least one SampleSet')
vartype = first.vartype
variables = first.variables
records = [first.record]
records.extend(_iter_records(itertup, vartype, variables))
record = recfunctions.stack_arrays(records, defaults=defaults, asrecarray=True, usemask=False)
return SampleSet(record, variables, {}, vartype)
|
Combine SampleSets.
Args:
samplesets (iterable[:obj:`.SampleSet`):
An iterable of sample sets.
defaults (dict, optional):
Dictionary mapping data vector names to the corresponding default values.
Returns:
:obj:`.SampleSet`: A sample set with the same vartype and variable order as the first
given in `samplesets`.
Examples:
>>> a = dimod.SampleSet.from_samples(([-1, +1], 'ab'), dimod.SPIN, energy=-1)
>>> b = dimod.SampleSet.from_samples(([-1, +1], 'ba'), dimod.SPIN, energy=-1)
>>> ab = dimod.concatenate((a, b))
>>> ab.record.sample
array([[-1, 1],
[ 1, -1]], dtype=int8)
|
codesearchnet
|
def _create_topk_unique(inputs, k):
height = inputs.shape[0]
width = inputs.shape[1]
neg_inf_r0 = tf.constant((- np.inf), dtype=tf.float32)
ones = tf.ones([height, width], dtype=tf.float32)
neg_inf_r2 = (ones * neg_inf_r0)
inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs)
tmp = inputs
topk_r2 = tf.zeros([height, k], dtype=tf.float32)
for i in range(k):
kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True)
k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0), [height, 1])
topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2)
ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width]))
tmp = tf.where(ge_r2, neg_inf_r2, inputs)
log2_ceiling = int(math.ceil(math.log(float(int(width)), 2)))
next_power_of_two = (1 << log2_ceiling)
count_mask = (next_power_of_two - 1)
mask_r0 = tf.constant(count_mask)
mask_r2 = tf.fill([height, k], mask_r0)
topk_r2_s32 = tf.bitcast(topk_r2, tf.int32)
topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2)
return (topk_r2, topk_indices_r2)
|
Creates the top k values in sorted order with indices.
Args:
inputs: A tensor with rank of 2. [batch_size, original_size].
k: An integer, number of top elements to select.
Returns:
topk_r2: A tensor, the k largest elements. [batch_size, k].
topk_indices_r2: A tensor, indices of the top k values. [batch_size, k].
|
codesearchnet
|
def place_line(self,
device: 'cirq.google.XmonDevice',
length: int) -> GridQubitLineTuple:
seqs = AnnealSequenceSearch(device, self.seed).search(self.trace_func)
return GridQubitLineTuple.best_of(seqs, length)
|
Runs line sequence search.
Args:
device: Chip description.
length: Required line length.
Returns:
List of linear sequences on the chip found by simulated annealing
method.
|
juraj-google-style
|
def relative_entropy(rho, sigma):
log_rho = tf.linalg.logm(tf.cast(rho, tf.complex128))
log_sigma = tf.linalg.logm(tf.cast(sigma, tf.complex128))
return optimized_trace_matmul(rho, tf.subtract(log_rho, log_sigma))
|
Calculate the relative entropy between the two given density matrices.
D(rho||sigma) = Tr[rho(log(rho) - log(sigma))]
= tf.linalg.trace(
tf.matmul(rho,
tf.linalg.logm(rho) - tf.linalg.logm(sigma)))
Args:
rho: 2-D `tf.Tensor` of dtype `complex64` representing the left density
matrix in the fidelity calculation.
sigma: 2-D `tf.Tensor` of dtype `complex64` representing the right density
matrix in the fidelity calculation.
Returns:
A tf.Tensor float64 fidelity scalar between the two given density
matrices.
|
github-repos
|
def _unbatch(self) -> TypeSpec:
raise NotImplementedError(f'{type(self).__name__}._unbatch')
|
Returns a TypeSpec representing a single element this TypeSpec.
Returns:
A `TypeSpec` representing a single element of objects with this TypeSpec.
|
github-repos
|
def max_variance_genes(data, nbins=5, frac=0.2):
indices = []
if sparse.issparse(data):
(means, var) = sparse_mean_var(data)
else:
means = data.mean(1)
var = data.var(1)
mean_indices = means.argsort()
n_elements = int((data.shape[0] / nbins))
frac_elements = int((n_elements * frac))
for i in range(nbins):
bin_i = mean_indices[(i * n_elements):((i + 1) * n_elements)]
if (i == (nbins - 1)):
bin_i = mean_indices[(i * n_elements):]
var_i = var[bin_i]
var_sorted = var_i.argsort()
top_var_indices = var_sorted[(len(bin_i) - frac_elements):]
ind = bin_i[top_var_indices]
ind = [index for index in ind if (var[index] > 0)]
indices.extend(ind)
return indices
|
This function identifies the genes that have the max variance
across a number of bins sorted by mean.
Args:
data (array): genes x cells
nbins (int): number of bins to sort genes by mean expression level. Default: 10.
frac (float): fraction of genes to return per bin - between 0 and 1. Default: 0.1
Returns:
list of gene indices (list of ints)
|
codesearchnet
|
def eval_algorithm(closing, low, high):
if ((high - low) == 0):
return (100 * (closing - low))
else:
return ((100 * (closing - low)) / (high - low))
|
Evaluates the SO algorithm
Args:
closing: Float of current closing price.
low: Float of lowest low closing price throughout some duration.
high: Float of highest high closing price throughout some duration.
Returns:
Float SO between 0 and 100.
|
codesearchnet
|
def assert_image_exists(self, pattern, timeout=20.0, **kwargs):
pattern = self.d.pattern_open(pattern)
match_kwargs = kwargs.copy()
match_kwargs.pop('safe', None)
match_kwargs.update({
'timeout': timeout,
'safe': True,
})
res = self.d.wait(pattern, **match_kwargs)
is_success = res is not None
message = 'assert image exists'
if res:
x, y = res.pos
kwargs['position'] = {'x': x, 'y': y}
message = 'image exists\npos %s\nconfidence=%.2f\nmethod=%s' % (res.pos, res.confidence, res.method)
else:
res = self.d.match(pattern)
if res is None:
message = 'Image not found'
else:
th = kwargs.get('threshold') or pattern.threshold or self.image_match_threshold
message = 'Matched: %s\nPosition: %s\nConfidence: %.2f\nThreshold: %.2f' % (
res.matched, res.pos, res.confidence, th)
kwargs['target'] = self._save_screenshot(pattern, name_prefix='target')
kwargs['screenshot'] = self.last_screenshot
kwargs.update({
'action': 'assert_image_exists',
'message': message,
'success': is_success,
})
self._add_assert(**kwargs)
|
Assert if image exists
Args:
- pattern: image filename # not support pattern for now
- timeout (float): seconds
- safe (bool): not raise assert error even throung failed.
|
juraj-google-style
|
def transform_feature(self, transformation_cache, state_manager):
pass
|
Returns intermediate representation (usually a `Tensor`).
Uses `transformation_cache` to create an intermediate representation
(usually a `Tensor`) that other feature columns can use.
Example usage of `transformation_cache`:
Let's say a Feature column depends on raw feature ('raw') and another
`FeatureColumn` (input_fc). To access corresponding `Tensor`s,
transformation_cache will be used as follows:
```python
raw_tensor = transformation_cache.get('raw', state_manager)
fc_tensor = transformation_cache.get(input_fc, state_manager)
```
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Transformed feature `Tensor`.
|
github-repos
|
def _parse_alt_url(html_chunk):
url_list = html_chunk.find("a", fn=has_param("href"))
url_list = map(lambda x: x.params["href"], url_list)
url_list = filter(lambda x: not x.startswith("autori/"), url_list)
if not url_list:
return None
return normalize_url(BASE_URL, url_list[0])
|
Parse URL from alternative location if not found where it should be.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
str: Book's URL.
|
juraj-google-style
|
def plotallanvar(data, dt, tmax=10, ax=None, **kwargs):
if (ax is None):
ax = plt.gca()
(tk, allanvar) = allan_variance(data, dt, tmax)
ax.loglog(tk, allanvar, **kwargs)
ax.set_xlabel('Time [s]')
ax.set_ylabel('Allan Variance')
ax.legend()
|
Plot Allan variance.
Args:
data (np.ndarray): Input data.
dt (float): Time between each data.
tmax (float): Maximum time.
ax (matplotlib.axes): Axis the figure is plotted on.
kwargs (optional): Plot options passed to ax.plot().
|
codesearchnet
|
def hwvtep_attach_vlan_vid(self, **kwargs):
name = kwargs.pop('name')
mac = kwargs.pop('mac')
vlan = kwargs.pop('vlan')
name_args = dict(name=name, vid=vlan, mac=mac)
method_name = 'overlay_gateway_attach_vlan_mac'
method_class = self._brocade_tunnels
gw_attr = getattr(method_class, method_name)
config = gw_attr(**name_args)
output = self._callback(config)
return output
|
Identifies exported VLANs in VXLAN gateway configurations.
Args:
name (str): overlay_gateway name
vlan(str): vlan_id range
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None
|
codesearchnet
|
class InputFeatures:
input_ids: List[int]
attention_mask: Optional[List[int]] = None
token_type_ids: Optional[List[int]] = None
label: Optional[Union[int, float]] = None
def to_json_string(self):
return json.dumps(dataclasses.asdict(self)) + '\n'
|
A single set of features of data. Property names are the same names as the corresponding inputs to a model.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded)
tokens.
token_type_ids: (Optional) Segment token indices to indicate first and second
portions of the inputs. Only some models use them.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
|
github-repos
|
def close(self):
self._dll.JLINKARM_Close()
if self._lock is not None:
del self._lock
self._lock = None
return None
|
Closes the open J-Link.
Args:
self (JLink): the ``JLink`` instance
Returns:
``None``
Raises:
JLinkException: if there is no connected JLink.
|
juraj-google-style
|
def normalize_words(self, ord=2, inplace=False):
if (ord == 2):
ord = None
vectors = (self.vectors.T / np.linalg.norm(self.vectors, ord, axis=1))
if inplace:
self.vectors = vectors.T
return self
return Embedding(vectors=vectors.T, vocabulary=self.vocabulary)
|
Normalize embeddings matrix row-wise.
Args:
ord: normalization order. Possible values {1, 2, 'inf', '-inf'}
|
codesearchnet
|
def make_id():
global _simple_id
if settings.simple_ids(True):
with _simple_id_lock:
_simple_id += 1
return str(_simple_id)
else:
return make_globally_unique_id()
|
Return a new unique ID for a Bokeh object.
Normally this function will return simple monotonically increasing integer
IDs (as strings) for identifying Bokeh objects within a Document. However,
if it is desirable to have globally unique for every object, this behavior
can be overridden by setting the environment variable ``BOKEH_SIMPLE_IDS=no``.
Returns:
str
|
codesearchnet
|
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0]
return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
|
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel
Transformer sequence pair mask has the following format:
```
2 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
```
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
|
github-repos
|
def _GetAuthToken(self, email, password):
account_type = "GOOGLE"
if self.host.endswith(".google.com") and not force_google_account:
account_type = "HOSTED"
req = self._CreateRequest(
url="https:
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=") for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict)
else:
raise
|
Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
|
juraj-google-style
|
def get_dimension_index(self, name, value):
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
|
Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
|
juraj-google-style
|
def _get_course_content_from_ecommerce(course_id, site_code=None):
api = get_ecommerce_client(site_code=site_code)
try:
api_response = api.courses(course_id).get()
except Exception:
logger.exception('An error occurred while retrieving data for course run [%s] from the Catalog API.', course_id, exc_info=True)
return {}
return {'title': api_response.get('name'), 'verification_deadline': api_response.get('verification_deadline')}
|
Get course information using the Ecommerce course api.
In case of error returns empty response.
Arguments:
course_id (str): course key of the course
site_code (str): site code
Returns:
course information from Ecommerce
|
codesearchnet
|
def create_bird_config_files(bird_configuration):
for ip_version in bird_configuration:
config_file = bird_configuration[ip_version]['config_file']
try:
touch(config_file)
except OSError as exc:
raise ValueError('failed to create {f}:{e}'.format(f=config_file, e=exc))
if bird_configuration[ip_version]['keep_changes']:
history_dir = os.path.join(os.path.dirname(config_file), 'history')
try:
os.mkdir(history_dir)
except FileExistsError:
pass
except OSError as exc:
raise ValueError('failed to make directory {d} for keeping a history of changes for {b}:{e}'.format(d=history_dir, b=config_file, e=exc))
else:
print('{d} is created'.format(d=history_dir))
|
Create bird configuration files per IP version.
Creates bird configuration files if they don't exist. It also creates the
directories where we store the history of changes, if this functionality is
enabled.
Arguments:
bird_configuration (dict): A dictionary with settings for bird.
Returns:
None
Raises:
ValueError if we can't create bird configuration files and the
directory to store the history of changes in bird configuration file.
|
codesearchnet
|
def interleave(args):
arg_iters = list(map(iter, args))
cycle_iter = it.cycle(arg_iters)
for iter_ in cycle_iter:
(yield six.next(iter_))
|
r"""
zip followed by flatten
Args:
args (tuple): tuple of lists to interleave
SeeAlso:
You may actually be better off doing something like this:
a, b, = args
ut.flatten(ut.bzip(a, b))
ut.flatten(ut.bzip([1, 2, 3], ['-']))
[1, '-', 2, '-', 3, '-']
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> import utool as ut
>>> args = ([1, 2, 3, 4, 5], ['A', 'B', 'C', 'D', 'E', 'F', 'G'])
>>> genresult = interleave(args)
>>> result = ut.repr4(list(genresult), nl=False)
>>> print(result)
[1, 'A', 2, 'B', 3, 'C', 4, 'D', 5, 'E']
|
codesearchnet
|
def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None):
with tf.variable_scope('sigmoid_accuracy_one_hot', values=[logits, labels]):
del weights_fn
predictions = tf.nn.sigmoid(logits)
labels = tf.argmax(labels, (- 1))
predictions = tf.argmax(predictions, (- 1))
(_, accuracy) = tf.metrics.accuracy(labels=labels, predictions=predictions)
return (accuracy, tf.constant(1.0))
|
Calculate accuracy for a set, given one-hot labels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
accuracy (scalar), weights
|
codesearchnet
|
def call(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, decoder_input_ids: tf.Tensor | None=None, decoder_attention_mask: tf.Tensor | None=None, decoder_position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, decoder_head_mask: tf.Tensor | None=None, cross_attn_head_mask: tf.Tensor | None=None, encoder_outputs: Optional[TFBaseModelOutput]=None, past_key_values: List[tf.Tensor] | None=None, inputs_embeds: tf.Tensor | None=None, decoder_inputs_embeds: tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]:
if labels is not None:
labels = tf.where(labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels)
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)
lm_logits = self.bias_layer(lm_logits)
masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
if not return_dict:
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
|
labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
|
github-repos
|
def remove_unused_links(self, used):
unused = []
self._execute("SELECT * FROM {}".format(self.LINK_STATE_TABLE))
for row in self.cursor:
relpath, inode, mtime = row
inode = self._from_sqlite(inode)
path = os.path.join(self.root_dir, relpath)
if path in used:
continue
if not os.path.exists(path):
continue
actual_inode = get_inode(path)
actual_mtime, _ = get_mtime_and_size(path)
if inode == actual_inode and mtime == actual_mtime:
logger.debug("Removing '{}' as unused link.".format(path))
remove(path)
unused.append(relpath)
for relpath in unused:
cmd = 'DELETE FROM {} WHERE path = "{}"'
self._execute(cmd.format(self.LINK_STATE_TABLE, relpath))
|
Removes all saved links except the ones that are used.
Args:
used (list): list of used links that should not be removed.
|
juraj-google-style
|
def data_period_start_day(self, value=None):
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError(
'value {} need to be of type str '
'for field `data_period_start_day`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `data_period_start_day`')
self._data_period_start_day = value
|
Corresponds to IDD Field `data_period_start_day`
Args:
value (str): value for IDD Field `data_period_start_day`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def get_asset_url(self, path):
url = self.root_url + '/assets/' + path
if path in self.asset_hash:
url += '?' + self.asset_hash[path]
return url
|
Get the URL of an asset. If asset hashes are added and one exists for
the path, it will be appended as a query string.
Args:
path (str): Path to the file, relative to your "assets" directory.
|
juraj-google-style
|
def _container_strategy(self):
container_strategy = self._container_strategy_weakref()
assert container_strategy is not None
return container_strategy
|
Get the containing `tf.distribute.Strategy`.
This should not generally be needed except when creating a new
`ReplicaContext` and to validate that the caller is in the correct
`scope()`.
Returns:
The `tf.distribute.Strategy` such that `strategy.extended` is `self`.
|
github-repos
|
def get_project_name(project_id, projects):
for project in projects:
if project_id == project.id:
return project.name
|
Retrieves project name for given project id
Args:
projects: List of projects
project_id: project id
Returns: Project name or None if there is no match
|
juraj-google-style
|
def files_info(self, *, id: str, **kwargs) -> SlackResponse:
kwargs.update({"id": id})
return self.api_call("files.info", http_verb="GET", params=kwargs)
|
Gets information about a team file.
Args:
id (str): The file id. e.g. 'F1234467890'
|
juraj-google-style
|
def get_equivalent_qpoints(self, index):
if self.qpoints[index].label is None:
return [index]
list_index_qpoints = []
for i in range(self.nb_qpoints):
if self.qpoints[i].label == self.qpoints[index].label:
list_index_qpoints.append(i)
return list_index_qpoints
|
Returns the list of qpoint indices equivalent (meaning they are the
same frac coords) to the given one.
Args:
index: the qpoint index
Returns:
a list of equivalent indices
TODO: now it uses the label we might want to use coordinates instead
(in case there was a mislabel)
|
juraj-google-style
|
def parse(self, text, key=None):
try:
data = json.loads(text)
except ValueError as e:
raise ValueError("%s: Value: [%s]" % (e, text))
if data and key:
if key not in data:
raise ValueError("Invalid response (key %s not found): %s" % (key, data))
data = data[key]
return data
|
Parses a response.
Args:
text (str): Text to parse
Kwargs:
key (str): Key to look for, if any
Returns:
Parsed value
Raises:
ValueError
|
juraj-google-style
|
def _extract_response_xml(self, domain, response):
attributes = {}
alexa_keys = {'POPULARITY': 'TEXT', 'REACH': 'RANK', 'RANK': 'DELTA'}
try:
xml_root = ET.fromstring(response._content)
for xml_child in xml_root.findall('SD
if xml_child.tag in alexa_keys and \
alexa_keys[xml_child.tag] in xml_child.attrib:
attributes[xml_child.tag.lower(
)] = xml_child.attrib[alexa_keys[xml_child.tag]]
except ParseError:
pass
attributes['domain'] = domain
return {'attributes': attributes}
|
Extract XML content of an HTTP response into dictionary format.
Args:
response: HTML Response objects
Returns:
A dictionary: {alexa-ranking key : alexa-ranking value}.
|
juraj-google-style
|
def List(self, request, global_params=None):
config = self.GetMethodConfig('List')
return self._RunMethod(config, request, global_params=global_params)
|
Lists existing `BuildTrigger`s. This API is experimental.
Args:
request: (CloudbuildProjectsLocationsTriggersListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListBuildTriggersResponse) The response message.
|
github-repos
|
def add_tensor_filter(self, filter_name, tensor_filter):
if self._session_wrapper:
self._session_wrapper.add_tensor_filter(filter_name, tensor_filter)
else:
self._pending_tensor_filters[filter_name] = tensor_filter
|
Add a tensor filter.
See doc of `LocalCLIDebugWrapperSession.add_tensor_filter()` for details.
Override default behavior to accommodate the possibility of this method
being
called prior to the initialization of the underlying
`LocalCLIDebugWrapperSession` object.
Args:
filter_name: See doc of `LocalCLIDebugWrapperSession.add_tensor_filter()`
for details.
tensor_filter: See doc of
`LocalCLIDebugWrapperSession.add_tensor_filter()` for details.
|
github-repos
|
def render_template(template, out_dir='.', context=None):
template_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..',
'templates',
template
)
files = []
empty_dirs = []
for (dirpath, _, filenames) in os.walk(template_directory):
if len(filenames) == 0:
empty_dirs.append(os.path.relpath(dirpath, template_directory))
else:
files.extend([os.path.join(dirpath, filepath) for filepath in filenames])
for source_file in files:
with open(source_file, 'r') as file:
template = Template(file.read())
template_rendered = template.render(**(context or {}))
source_relpath = os.path.relpath(source_file, template_directory)
filename = os.path.join(out_dir, source_relpath)
filename_rendered = Template(filename).render(**context)
source_dir = os.path.dirname(filename_rendered)
if not os.path.exists(source_dir):
os.makedirs(source_dir)
with open(filename_rendered, 'w') as target_file:
target_file.write(template_rendered)
for dirpath in empty_dirs:
try:
dirname = os.path.join(out_dir, dirpath)
dirname_rendered = Template(dirname).render(**context)
if not os.path.exists(dirname_rendered):
os.makedirs(dirname_rendered)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(dirpath):
pass
else:
raise
|
This function renders the template desginated by the argument to the
designated directory using the given context.
Args:
template (string) : the source template to use (relative to ./templates)
out_dir (string) : the name of the output directory
context (dict) : the template rendering context
|
juraj-google-style
|
def __init__(
self, session, storage_type=definitions.STORAGE_TYPE_SESSION, task=None):
super(StorageWriter, self).__init__()
self._first_written_event_source_index = 0
self._serializers_profiler = None
self._session = session
self._storage_profiler = None
self._storage_type = storage_type
self._task = task
self._written_event_source_index = 0
self.number_of_analysis_reports = 0
self.number_of_event_sources = 0
self.number_of_event_tags = 0
self.number_of_events = 0
self.number_of_warnings = 0
|
Initializes a storage writer.
Args:
session (Session): session the storage changes are part of.
storage_type (Optional[str]): storage type.
task(Optional[Task]): task.
|
juraj-google-style
|
def get_string(self, significant_figures=6):
ph = "{:.%df}" % significant_figures
lines = []
for bound, d in zip(self.bounds, "xyz"):
fillers = bound + [d] * 2
bound_format = " ".join([ph] * 2 + [" {}lo {}hi"])
lines.append(bound_format.format(*fillers))
if self.tilt:
tilt_format = " ".join([ph] * 3 + [" xy xz yz"])
lines.append(tilt_format.format(*self.tilt))
return "\n".join(lines)
|
Returns the string representation of simulation box in LAMMPS
data file format.
Args:
significant_figures (int): No. of significant figures to
output for box settings. Default to 6.
Returns:
String representation
|
juraj-google-style
|
def _init_from_proto(self, context_def, import_scope=None):
assert isinstance(context_def, control_flow_pb2.CondContextDef)
g = ops.get_default_graph()
self._name = ops.prepend_name_scope(context_def.context_name, import_scope)
self._pred = g.as_graph_element(ops.prepend_name_scope(context_def.pred_name, import_scope))
self._pivot = g.as_graph_element(ops.prepend_name_scope(context_def.pivot_name, import_scope))
self._branch = context_def.branch
super(CondContext, self).__init__(values_def=context_def.values_def, import_scope=import_scope)
|
Creates a new `CondContext` from protocol buffer.
Args:
context_def: `CondContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
|
github-repos
|
def filter(self, nodes):
filtered_dag = DAG()
for node in nodes:
filtered_dag.add_node_if_not_exists(node)
for edge in self.all_downstreams(node):
filtered_dag.add_node_if_not_exists(edge)
for node, edges in self.graph.items():
if node in filtered_dag.graph:
filtered_dag.graph[node] = edges
return filtered_dag
|
Returns a new DAG with only the given nodes and their
dependencies.
Args:
nodes (list): The nodes you are interested in.
Returns:
:class:`stacker.dag.DAG`: The filtered graph.
|
juraj-google-style
|
def get(self, ldap_dn):
self.base_dn = ldap_dn
self.sub_tree = BASE
return self.first()
|
Return an LDAP entry by DN
Args:
ldap_dn (str): LDAP DN
|
juraj-google-style
|
def is_copy_constructor(constructor):
assert isinstance(constructor, calldef_members.constructor_t)
args = constructor.arguments
parent = constructor.parent
if len(args) != 1:
return False
arg = args[0]
if not isinstance(arg.decl_type, cpptypes.compound_t):
return False
if not type_traits.is_reference(arg.decl_type):
return False
if not type_traits.is_const(arg.decl_type.base):
return False
un_aliased = type_traits.remove_alias(arg.decl_type.base)
if not isinstance(un_aliased.base, cpptypes.declarated_t):
return False
return id(un_aliased.base.declaration) == id(parent)
|
Check if the declaration is a copy constructor,
Args:
constructor (declarations.constructor_t): the constructor
to be checked.
Returns:
bool: True if this is a copy constructor, False instead.
|
juraj-google-style
|
def _read_mptcp_prio(self, bits, size):
temp = (self._read_unpack(1) if size else None)
data = dict(subtype='MP_PRIO', prio=dict(res=(b'\x00' * 3), backup=(True if int(bits[3]) else False), addrid=temp))
return data
|
Read Change Subflow Priority option.
Positional arguments:
* bits - str, 4-bit data
* size - int, length of option
Returns:
* dict -- extracted Change Subflow Priority (MP_PRIO) option
Structure of MP_PRIO [RFC 6824]:
1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+---------------+---------------+-------+-----+-+--------------+
| Kind | Length |Subtype| |B| AddrID (opt) |
+---------------+---------------+-------+-----+-+--------------+
Octets Bits Name Description
0 0 tcp.opt.kind Kind (30)
1 8 tcp.opt.length Length (3/4)
2 16 tcp.opt.mp.subtype Subtype (5)
2 23 tcp.opt.mp.prio.backup Backup Path (B)
3 24 tcp.opt.mp.prio.addrid Address ID (optional)
|
codesearchnet
|
def fermi_fourier_trans_inverse_4(qubits):
yield fswap(qubits[1], qubits[2]),
yield fermi_fourier_trans_2(qubits[0], qubits[1])
yield fermi_fourier_trans_2(qubits[2], qubits[3])
yield fswap(qubits[1], qubits[2])
yield fermi_fourier_trans_2(qubits[0], qubits[1])
yield cirq.S(qubits[2])
yield fermi_fourier_trans_2(qubits[2], qubits[3])
yield fswap(qubits[1], qubits[2])
|
The reverse fermionic Fourier transformation implemented on 4 qubits
on a line, which maps the momentum picture to the position picture.
Using the fast Fourier transformation algorithm, the circuit can be
decomposed into 2-mode fermionic Fourier transformation, the fermionic
SWAP gates, and single-qubit rotations.
Args:
qubits: list of four qubits
|
juraj-google-style
|
def load_variables_from_checkpoint(sess, start_checkpoint):
saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables())
saver.restore(sess, start_checkpoint)
|
Utility function to centralize checkpoint restoration.
Args:
sess: TensorFlow session.
start_checkpoint: Path to saved checkpoint on disk.
|
github-repos
|
def do_get(self, uri):
self.validate_resource_uri(uri)
return self._connection.get(uri)
|
Helps to make get requests
Args:
uri: URI of the resource
Returns:
Returns: Returns the resource data
|
codesearchnet
|
def nmf_ensemble(data, k, n_runs=10, W_list=[], **nmf_params):
nmf = NMF(k)
if (len(W_list) == 0):
W_list = []
for i in range(n_runs):
W = nmf.fit_transform(data)
W_list.append(W)
W_stacked = np.hstack(W_list)
nmf_w = nmf.fit_transform(W_stacked)
nmf_h = nmf.components_
H_new = data.T.dot(nmf_w).T
nmf2 = NMF(k, init='custom')
nmf_w = nmf2.fit_transform(data, W=nmf_w, H=H_new)
H_new = nmf2.components_
return (nmf_w, H_new)
|
Runs an ensemble method on the list of NMF W matrices...
Args:
data: genes x cells array (should be log + cell-normalized)
k: number of classes
n_runs (optional): number of random initializations of state estimation
M_list (optional): list of M arrays from state estimation
se_params (optional): optional poisson_estimate_state params
Returns:
W_new
H_new
|
codesearchnet
|
def _create_query(node, context):
visited_nodes = [node]
output_columns = _get_output_columns(visited_nodes, context)
filters = _get_filters(visited_nodes, context)
selectable = sql_context_helpers.get_node_selectable(node, context)
query = select(output_columns).select_from(selectable).where(and_(*filters))
return query
|
Create a query from a SqlNode.
Args:
node: SqlNode, the current node.
context: CompilationContext, global compilation state and metadata.
Returns:
Selectable, selectable of the generated query.
|
juraj-google-style
|
def __init__(self, run_object):
run_obj_type = self.get_run_object_type(run_object)
if run_obj_type == 'module':
self.init_module(run_object)
elif run_obj_type == 'package':
self.init_package(run_object)
else:
self.init_function(run_object)
|
Initializes profiler.
Args:
run_object: object to be profiled.
|
juraj-google-style
|
def get_topics_strings(topics_words, alpha, vocabulary, topics_to_print=10, words_per_topic=10):
alpha = np.squeeze(alpha, axis=0)
highest_weight_topics = np.argsort((- alpha), kind='mergesort')
top_words = np.argsort((- topics_words), axis=1)
res = []
for topic_idx in highest_weight_topics[:topics_to_print]:
l = ['index={} alpha={:.2f}'.format(topic_idx, alpha[topic_idx])]
l += [vocabulary[word] for word in top_words[(topic_idx, :words_per_topic)]]
res.append(' '.join(l))
return np.array(res)
|
Returns the summary of the learned topics.
Arguments:
topics_words: KxV tensor with topics as rows and words as columns.
alpha: 1xK tensor of prior Dirichlet concentrations for the
topics.
vocabulary: A mapping of word's integer index to the corresponding string.
topics_to_print: The number of topics with highest prior weight to
summarize.
words_per_topic: Number of wodrs per topic to return.
Returns:
summary: A np.array with strings.
|
codesearchnet
|
def successful_request(self, now):
self._successful_requests.add(now, 1)
|
Notifies the throttler of a successful request.
Must be called once for each request (for which throttle_request was
previously called) that succeeded.
Args:
now: int, time in ms since the epoch
|
github-repos
|
def update(cls, session, record):
cls._check_implements('update')
data = record.to_api()
del data['id']
data['reload'] = True
return cls(
'/%s/%s.json' % (cls.__endpoint__, record.id),
data=data,
request_type=RequestPaginator.PUT,
singleton=True,
session=session,
)
|
Update a record.
Args:
session (requests.sessions.Session): Authenticated session.
record (helpscout.BaseModel): The record to
be updated.
Returns:
helpscout.BaseModel: Freshly updated record.
|
juraj-google-style
|
def encipher_vigenere(plaintext, plain_vocab, key):
ciphertext = []
layers = [ShiftEncryptionLayer(plain_vocab, i) for i in range(len(plain_vocab))]
for (i, sentence) in enumerate(plaintext):
cipher_sentence = []
for (j, character) in enumerate(sentence):
key_idx = key[(j % len(key))]
encrypted_char = layers[key_idx].encrypt_character(character)
cipher_sentence.append(encrypted_char)
ciphertext.append(cipher_sentence)
return ciphertext
|
Encrypt plain text with given key.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
key (list of Integer): key to encrypt cipher using Vigenere table.
Returns:
ciphertext (list of Strings): encrypted plain text.
|
codesearchnet
|
def __init__(
self, datetime_value, date_time_description, data_type=None,
time_zone=None):
year, month, day_of_month, hours, minutes, seconds, _, _, _ = (
datetime_value.utctimetuple())
time_elements_tuple = (
year, month, day_of_month, hours, minutes, seconds,
datetime_value.microsecond)
date_time = dfdatetime_time_elements.TimeElementsInMicroseconds(
time_elements_tuple=time_elements_tuple)
super(PythonDatetimeEvent, self).__init__(
date_time, date_time_description, data_type=data_type,
time_zone=time_zone)
|
Initializes an event.
Args:
datetime_value (datetime.datetime): date and time values.
date_time_description (str): description of the meaning of the date and
time values.
data_type (Optional[str]): event data type. If the data type is not set
it is derived from the DATA_TYPE class attribute.
time_zone (Optional[datetime.tzinfo]): time zone.
|
juraj-google-style
|
def serialize_to_normalized_compact_json(py_obj):
return json.dumps(py_obj, sort_keys=True, separators=(',', ':'), cls=ToJsonCompatibleTypes)
|
Serialize a native object to normalized, compact JSON.
The JSON string is normalized by sorting any dictionary keys. It will be on a single
line without whitespace between elements.
Args:
py_obj: object
Any object that can be represented in JSON. Some types, such as datetimes are
automatically converted to strings.
Returns:
str: normalized, compact JSON string.
|
codesearchnet
|
def assert_integer_form(x, data=None, summarize=None, message=None, int_dtype=None, name='assert_integer_form'):
with ops.name_scope(name, values=[x, data]):
x = ops.convert_to_tensor(x, name='x')
if x.dtype.is_integer:
return control_flow_ops.no_op()
message = message or '{} has non-integer components'.format(x)
if int_dtype is None:
try:
int_dtype = {dtypes.float16: dtypes.int16, dtypes.float32: dtypes.int32, dtypes.float64: dtypes.int64}[x.dtype.base_dtype]
except KeyError:
raise TypeError('Unrecognized type {}'.format(x.dtype.name))
return check_ops.assert_equal(x, math_ops.cast(math_ops.cast(x, int_dtype), x.dtype), data=data, summarize=summarize, message=message, name=name)
|
Assert that x has integer components (or floats equal to integers).
Args:
x: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
int_dtype: A `tf.dtype` used to cast the float to. The default (`None`)
implies the smallest possible signed int will be used for casting.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.
|
github-repos
|
def get_input_shape_and_dtype(layer):
def _is_graph_model(layer):
return hasattr(layer, '_is_graph_network') and layer._is_graph_network or layer.__class__.__name__ == 'Sequential'
while _is_graph_model(layer):
if not layer.layers:
raise ValueError('An empty Model cannot be used as a Layer.')
layer = layer.layers[0]
if getattr(layer, '_batch_input_shape', None):
return (layer._batch_input_shape, layer.dtype)
return (None, None)
|
Retrieves input shape and input dtype of layer if applicable.
Args:
layer: Layer (or model) instance.
Returns:
Tuple (input_shape, input_dtype). Both could be None if the layer
does not have a defined input shape.
Raises:
ValueError: in case an empty Sequential or Functional model is passed.
|
github-repos
|
def post(self, path, params=None, timeout=None, event_timeout=None):
future = self.post_async(path, params)
self.wait_all_futures(
future, timeout=timeout, event_timeout=event_timeout)
return future.result()
|
Synchronously calls a method
Args:
path (list): The path to post to
params (dict): parameters for the call
timeout (float): time in seconds to wait for responses, wait
forever if None
event_timeout: maximum time in seconds to wait between each response
event, wait forever if None
Returns:
the result from 'method'
|
juraj-google-style
|
def override_binary_operator_helper(func, op_name, clazz_object=tensor_lib.Tensor):
@traceback_utils.filter_traceback
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
try:
x, y = maybe_promote_tensors(x, y)
return func(x, y, name=name)
except (TypeError, ValueError) as e:
if hasattr(type(y), '__r%s__' % op_name):
try:
r_op = getattr(y, '__r%s__' % op_name)
out = r_op(x)
if out is NotImplemented:
raise
return out
except (TypeError, ValueError):
raise e
else:
raise
@traceback_utils.filter_traceback
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name='y')
return clazz_object(sp_x.indices, func(sp_x.indices, sp_x.values, sp_x.dense_shape, y, name=name), sp_x.dense_shape)
@traceback_utils.filter_traceback
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
y, x = maybe_promote_tensors(y, x, force_same_dtype=True)
return func(x, y, name=name)
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is tensor_lib.Tensor:
clazz_object._override_operator('__%s__' % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator('__r%s__' % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator('__%s__' % op_name, binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
|
Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
|
github-repos
|
def _EnforceProcessMemoryLimit(self, memory_limit):
if resource:
if memory_limit is None:
memory_limit = 4 * 1024 * 1024 * 1024
elif memory_limit == 0:
memory_limit = resource.RLIM_INFINITY
resource.setrlimit(resource.RLIMIT_DATA, (memory_limit, memory_limit))
|
Enforces a process memory limit.
Args:
memory_limit (int): maximum number of bytes the process is allowed
to allocate, where 0 represents no limit and None a default of
4 GiB.
|
juraj-google-style
|
def ws010(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `ws010`'.format(value))
self._ws010 = value
|
Corresponds to IDD Field `ws010`
Wind speed corresponding to 1.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `ws010`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def _create_rand_mask_from_inputs(from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, batch_size, from_seq_length, from_block_size):
num_windows = from_seq_length
rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)])
rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size)
rand_mask = torch.einsum('blq,bhlk->bhlqk', from_blocked_mask[:, 1:-1], rand_mask)
return rand_mask
|
Create 3D attention mask from a 2D tensor mask.
Args:
from_blocked_mask: 2D Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
to_blocked_mask: int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
rand_attn: [batch_size, num_attention_heads,
from_seq_length//from_block_size-2, num_rand_blocks]
num_attention_heads: int. Number of attention heads.
num_rand_blocks: int. Number of random chunks per row.
batch_size: int. Batch size for computation.
from_seq_length: int. length of from sequence.
from_block_size: int. size of block in from sequence.
Returns:
float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2,
from_block_size, num_rand_blocks*to_block_size].
|
github-repos
|
def insert_data(self, remove_data=False, db_type='sqlite'):
if self.update_source:
import msp2db
self.c.execute(
"INSERT INTO library_spectra_source (id, name, parsing_software) VALUES"
" ({a}, '{b}', 'msp2db-v{c}')".format(a=self.current_id_origin, b=self.source, c=msp2db.__version__))
self.conn.commit()
if self.compound_info_all:
self.compound_info_all = _make_sql_compatible(self.compound_info_all)
cn = ', '.join(self.compound_info.keys()) + ',created_at,updated_at'
insert_query_m(self.compound_info_all, columns=cn, conn=self.conn, table='metab_compound',
db_type=db_type)
self.meta_info_all = _make_sql_compatible(self.meta_info_all)
cn = 'id,' + ', '.join(self.meta_info.keys()) + ',library_spectra_source_id, inchikey_id'
insert_query_m(self.meta_info_all, columns=cn, conn=self.conn, table='library_spectra_meta',
db_type=db_type)
cn = "id, mz, i, other, library_spectra_meta_id"
insert_query_m(self.spectra_all, columns=cn, conn=self.conn, table='library_spectra', db_type=db_type)
if self.spectra_annotation_all:
cn = "id, mz, tentative_formula, mass_error, library_spectra_meta_id"
insert_query_m(self.spectra_annotation_all, columns=cn, conn=self.conn,
table='library_spectra_annotation', db_type=db_type)
if remove_data:
self.meta_info_all = []
self.spectra_all = []
self.spectra_annotation_all = []
self.compound_info_all = []
self._get_current_ids(source=False)
|
Insert data stored in the current chunk of parsing into the selected database
Args:
remove_data (boolean): Remove the data stored within the LibraryData object for the current chunk of
processing
db_type (str): The type of database to submit to
either 'sqlite', 'mysql' or 'django_mysql' [default sqlite]
|
juraj-google-style
|
def write_config_file(self, parsed_namespace, output_file_paths, exit_after=False):
for output_file_path in output_file_paths:
try:
with open(output_file_path, 'w') as output_file:
pass
except IOError as e:
raise ValueError(("Couldn't open %s for writing: %s" % (output_file_path, e)))
if output_file_paths:
config_items = self.get_items_for_config_file_output(self._source_to_settings, parsed_namespace)
file_contents = self._config_file_parser.serialize(config_items)
for output_file_path in output_file_paths:
with open(output_file_path, 'w') as output_file:
output_file.write(file_contents)
message = ('Wrote config file to ' + ', '.join(output_file_paths))
if exit_after:
self.exit(0, message)
else:
print(message)
|
Write the given settings to output files.
Args:
parsed_namespace: namespace object created within parse_known_args()
output_file_paths: any number of file paths to write the config to
exit_after: whether to exit the program after writing the config files
|
codesearchnet
|
def from_epsg_code(code):
code = str(code)
proj4 = utils.crscode_to_string('epsg', code, 'proj4')
crs = from_proj4(proj4)
return crs
|
Load crs object from epsg code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The EPSG code as an integer.
Returns:
- A CS instance of the indicated type.
|
codesearchnet
|
def delete_variants(self, case_id, variant_type, category=None):
category = (category or '')
LOG.info('Deleting old {0} {1} variants for case {2}'.format(variant_type, category, case_id))
query = {'case_id': case_id, 'variant_type': variant_type}
if category:
query['category'] = category
result = self.variant_collection.delete_many(query)
LOG.info('{0} variants deleted'.format(result.deleted_count))
|
Delete variants of one type for a case
This is used when a case is reanalyzed
Args:
case_id(str): The case id
variant_type(str): 'research' or 'clinical'
category(str): 'snv', 'sv' or 'cancer'
|
codesearchnet
|
def GetForwardedIps(self, interface, interface_ip=None):
args = ['ls', 'table', 'local', 'type', 'local']
options = self._CreateRouteOptions(dev=interface)
result = self._RunIpRoute(args=args, options=options)
result = re.sub(r'local\s', r'', result)
return self.ParseForwardedIps(result.split())
|
Retrieve the list of configured forwarded IP addresses.
Args:
interface: string, the output device to query.
interface_ip: string, current interface ip address.
Returns:
list, the IP address strings.
|
juraj-google-style
|
def get_fractional_coords(self, cart_coords: Vector3Like) -> np.ndarray:
return dot(cart_coords, self.inv_matrix)
|
Returns the fractional coordinates given cartesian coordinates.
Args:
cart_coords (3x1 array): Cartesian coords.
Returns:
Fractional coordinates.
|
codesearchnet
|
def image_summary(seqs, name, num=None):
seqs = tf.clip_by_value(seqs, 0.0, 1.0)
seqs = tf.unstack(seqs[:num])
joined_seqs = [tf.concat(tf.unstack(seq), 1) for seq in seqs]
joined_seqs = tf.expand_dims(tf.concat(joined_seqs, 0), 0)
tf.compat.v2.summary.image(name, joined_seqs, max_outputs=1, step=tf.compat.v1.train.get_or_create_global_step())
|
Visualizes sequences as TensorBoard summaries.
Args:
seqs: A tensor of shape [n, t, h, w, c].
name: String name of this summary.
num: Integer for the number of examples to visualize. Defaults to
all examples.
|
codesearchnet
|
def bqm_index_labelled_input(var_labels_arg_name, samples_arg_names):
def index_label_decorator(f):
@wraps(f)
def _index_label(sampler, bqm, **kwargs):
if (not hasattr(bqm, 'linear')):
raise TypeError('expected input to be a BinaryQuadraticModel')
linear = bqm.linear
var_labels = kwargs.get(var_labels_arg_name, None)
has_samples_input = any(((kwargs.get(arg_name, None) is not None) for arg_name in samples_arg_names))
if (var_labels is None):
if all(((v in linear) for v in range(len(bqm)))):
return f(sampler, bqm, **kwargs)
if has_samples_input:
err_str = 'Argument `{}` must be provided if any of the samples arguments {} are provided and the bqm is not already index-labelled'.format(var_labels_arg_name, samples_arg_names)
raise ValueError(err_str)
try:
inverse_mapping = dict(enumerate(sorted(linear)))
except TypeError:
inverse_mapping = dict(enumerate(linear))
var_labels = {v: i for (i, v) in iteritems(inverse_mapping)}
else:
inverse_mapping = {i: v for (v, i) in iteritems(var_labels)}
response = f(sampler, bqm.relabel_variables(var_labels, inplace=False), **kwargs)
return response.relabel_variables(inverse_mapping, inplace=True)
return _index_label
return index_label_decorator
|
Returns a decorator which ensures bqm variable labeling and all other
specified sample-like inputs are index labeled and consistent.
Args:
var_labels_arg_name (str):
The name of the argument that the user should use to pass in an
index labeling for the bqm.
samples_arg_names (list[str]):
The names of the expected sample-like inputs which should be
indexed according to the labels passed to the argument
`var_labels_arg_name`.
Returns:
Function decorator.
|
codesearchnet
|
def has_no_narrow_neurite_section(neuron, neurite_filter, radius_threshold=0.05, considered_section_min_length=50):
considered_sections = (sec for sec in iter_sections(neuron, neurite_filter=neurite_filter) if (sec.length > considered_section_min_length))
def narrow_section(section):
'Select narrow sections'
return (section.points[(:, COLS.R)].mean() < radius_threshold)
bad_ids = [(section.id, section.points[1]) for section in considered_sections if narrow_section(section)]
return CheckResult((len(bad_ids) == 0), bad_ids)
|
Check if the neuron has dendrites with narrow sections
Arguments:
neuron(Neuron): The neuron object to test
neurite_filter(callable): filter the neurites by this callable
radius_threshold(float): radii below this are considered narro
considered_section_min_length(float): sections with length below
this are not taken into account
Returns:
CheckResult with result. result.info contains the narrow section ids and their
first point
|
codesearchnet
|
def assertNear(self, f1, f2, err, msg=None):
self.assertTrue(f1 == f2 or math.fabs(f1 - f2) <= err, '%f != %f +/- %f%s' % (f1, f2, err, ' (%s)' % msg if msg is not None else ''))
|
Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
|
github-repos
|
def split(state, num):
state = tf_np.asarray(state, dtype=_RNG_KEY_DTYPE)
state = _key2seed(state)
try:
states = stateless_random_ops.stateless_split(state, num)
except AttributeError as e:
states = stateless_split(state, num)
states = array_ops_stack.unstack(states, num)
states = nest.map_structure(_seed2key, states)
return states
|
Creates new independent RNG states from an existing state.
Args:
state: the existing state.
num: the number of the new states.
Returns:
A tuple of new states.
|
github-repos
|
def from_cif_file(cif_file, source='', comment=''):
r = CifParser(cif_file)
structure = r.get_structures()[0]
return Header(structure, source, comment)
|
Static method to create Header object from cif_file
Args:
cif_file: cif_file path and name
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: User comment that goes in header
Returns:
Header Object
|
codesearchnet
|
def get_bin_edges_from_axis(axis) -> np.ndarray:
bins = range(1, axis.GetNbins() + 1)
bin_edges = np.empty(len(bins) + 1)
bin_edges[:-1] = [axis.GetBinLowEdge(i) for i in bins]
bin_edges[-1] = axis.GetBinUpEdge(axis.GetNbins())
return bin_edges
|
Get bin edges from a ROOT hist axis.
Note:
Doesn't include over- or underflow bins!
Args:
axis (ROOT.TAxis): Axis from which the bin edges should be extracted.
Returns:
Array containing the bin edges.
|
juraj-google-style
|
def resolution(self, indicator=None):
self._request_entity = 'dnsResolution'
self._request_uri = '{}/dnsResolutions'.format(self._request_uri)
if (indicator is not None):
self._request_uri = '{}/{}/dnsResolutions'.format(self._api_uri, indicator)
|
Update the URI to retrieve host resolutions for the provided indicator.
Args:
indicator (string): The indicator to retrieve resolutions.
|
codesearchnet
|
def download_url(self, url, **kwargs):
if (self.baseurl and (':
url = join(self.baseurl, url)
return self.resolver.download_to_directory(self.directory, url, **kwargs)
|
Download a URL to the workspace.
Args:
url (string): URL to download to directory
**kwargs : See :py:mod:`ocrd.resolver.Resolver`
Returns:
The local filename of the downloaded file
|
codesearchnet
|
def decode(data):
decoded = None
try:
decoded = yaml.load(data)
except Exception, e:
e = e.message if e.message else str(e)
raise MetaParsingException("Can't parse your YAML data: %s" % e)
decoded = validator.check_structure(decoded)
return decoded
|
Handles decoding of the YAML `data`.
Args:
data (str): Data which will be decoded.
Returns:
dict: Dictionary with decoded data.
|
juraj-google-style
|
def getListOfBases():
downer = Downloader()
data = downer.download((ALEPH_URL + '/F/?func=file&file_name=base-list'))
dom = dhtmlparser.parseString(data.lower())
base_links = filter((lambda x: (('href' in x.params) and ('local_base' in x.params['href']))), dom.find('a'))
base_links = map((lambda x: x.params['href'].replace('?', '&', 1).split('&')), base_links)
bases = map((lambda link: filter((lambda base: ('local_base=' in base)), link)[0]), base_links)
bases = map((lambda x: x.split('=')[1].strip()), bases)
return list(set(bases))
|
This function is here mainly for purposes of unittest
Returns:
list of str: Valid bases as they are used as URL parameters in links at
Aleph main page.
|
codesearchnet
|
def get_padding_bias(x):
with tf.name_scope('attention_bias'):
padding = get_padding(x)
attention_bias = (padding * _NEG_INF)
attention_bias = tf.expand_dims(tf.expand_dims(attention_bias, axis=1), axis=1)
return attention_bias
|
Calculate bias tensor from padding values in tensor.
Bias tensor that is added to the pre-softmax multi-headed attention logits,
which has shape [batch_size, num_heads, length, length]. The tensor is zero at
non-padding locations, and -1e9 (negative infinity) at padding locations.
Args:
x: int tensor with shape [batch_size, length]
Returns:
Attention bias tensor of shape [batch_size, 1, 1, length].
|
codesearchnet
|
def report_filter(config, auth, body, filters):
new_body = body.copy()
for f, d in filters.items():
for v in get_rows(config, auth, d):
if f == 'accountId':
new_body['accountId'] = v
elif f == 'activity':
new_body['reachCriteria']['activities'].setdefault('filters', []).append({'kind': 'dfareporting
else:
new_body.setdefault('criteria', {}).setdefault('dimensionFilters', []).append({'kind': 'dfareporting
return new_body
|
Adds filters to a report body
Filters cannot be easily added to the reports without templateing, this allows
filters to be passed as lists.
Values are specified using get_rows(...) helper, see
starthinker/util/data/__init__.py.
To specify a filter, use the official filter name and a list of values.
For exmaple:
```
filters = {
"accountId": {
"values": 789
},
"advertiser": {
"values":[1234, 5678, 91011]
}
}
```
Args:
* auth: (string) Either user or service.
* body: (json) the report body ( with or without filters )
* filters: (json) a dictionary of filters to apply ( see above examples )
Returns:
* body: ( json ) modified report body
|
github-repos
|
def _FindFileContainingSymbolInDb(self, symbol):
try:
file_proto = self._internal_db.FindFileContainingSymbol(symbol)
except KeyError as error:
if self._descriptor_db:
file_proto = self._descriptor_db.FindFileContainingSymbol(symbol)
else:
raise error
if (not file_proto):
raise KeyError(('Cannot find a file containing %s' % symbol))
return self._ConvertFileProtoToFileDescriptor(file_proto)
|
Finds the file in descriptor DB containing the specified symbol.
Args:
symbol: The name of the symbol to search for.
Returns:
A FileDescriptor that contains the specified symbol.
Raises:
KeyError: if the file cannot be found in the descriptor database.
|
codesearchnet
|
def _PrintSessionsDetails(self, storage_reader):
for session_number, session in enumerate(storage_reader.GetSessions()):
session_identifier = uuid.UUID(hex=session.identifier)
session_identifier = '{0!s}'.format(session_identifier)
start_time = 'N/A'
if session.start_time is not None:
start_time = timelib.Timestamp.CopyToIsoFormat(session.start_time)
completion_time = 'N/A'
if session.completion_time is not None:
completion_time = timelib.Timestamp.CopyToIsoFormat(
session.completion_time)
enabled_parser_names = 'N/A'
if session.enabled_parser_names:
enabled_parser_names = ', '.join(sorted(session.enabled_parser_names))
command_line_arguments = session.command_line_arguments or 'N/A'
parser_filter_expression = session.parser_filter_expression or 'N/A'
preferred_encoding = session.preferred_encoding or 'N/A'
if isinstance(preferred_encoding, py2to3.BYTES_TYPE):
preferred_encoding = preferred_encoding.decode('utf-8')
if session.artifact_filters:
artifact_filters_string = ', '.join(session.artifact_filters)
else:
artifact_filters_string = 'N/A'
filter_file = session.filter_file or 'N/A'
title = 'Session: {0:s}'.format(session_identifier)
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, title=title)
table_view.AddRow(['Start time', start_time])
table_view.AddRow(['Completion time', completion_time])
table_view.AddRow(['Product name', session.product_name])
table_view.AddRow(['Product version', session.product_version])
table_view.AddRow(['Command line arguments', command_line_arguments])
table_view.AddRow(['Parser filter expression', parser_filter_expression])
table_view.AddRow(['Enabled parser and plugins', enabled_parser_names])
table_view.AddRow(['Preferred encoding', preferred_encoding])
table_view.AddRow(['Debug mode', session.debug_mode])
table_view.AddRow(['Artifact filters', artifact_filters_string])
table_view.AddRow(['Filter file', filter_file])
table_view.Write(self._output_writer)
if self._verbose:
self._PrintPreprocessingInformation(storage_reader, session_number + 1)
self._PrintParsersCounter(
session.parsers_counter, session_identifier=session_identifier)
self._PrintAnalysisReportCounter(
session.analysis_reports_counter,
session_identifier=session_identifier)
self._PrintEventLabelsCounter(
session.event_labels_counter,
session_identifier=session_identifier)
|
Prints the details of the sessions.
Args:
storage_reader (BaseStore): storage.
|
juraj-google-style
|
def eigh(x):
if any_symbolic_tensors((x,)):
return Eigh().symbolic_call(x)
return _eigh(x)
|
Computes the eigenvalues and eigenvectors of a complex Hermitian.
Args:
x: Input tensor of shape `(..., M, M)`.
Returns:
A tuple of two tensors: a tensor of shape `(..., M)` containing
eigenvalues and a tensor of shape `(..., M, M)` containing eigenvectors.
|
github-repos
|
def restore(self, fade=False):
if self.is_coordinator:
transport_info = self.device.get_current_transport_info()
if transport_info is not None:
if transport_info['current_transport_state'] == 'PLAYING':
self.device.pause()
self._restore_queue()
if self.is_playing_queue and self.playlist_position > 0:
if self.playlist_position is not None:
self.playlist_position -= 1
self.device.play_from_queue(self.playlist_position, False)
if self.track_position is not None:
if self.track_position != "":
self.device.seek(self.track_position)
self.device.play_mode = self.play_mode
self.device.cross_fade = self.cross_fade
elif self.is_playing_cloud_queue:
pass
else:
if self.media_uri != "":
self.device.play_uri(
self.media_uri, self.media_metadata, start=False)
self.device.mute = self.mute
self.device.bass = self.bass
self.device.treble = self.treble
self.device.loudness = self.loudness
if self.volume == 100:
fixed_vol = self.device.renderingControl.GetOutputFixed(
[('InstanceID', 0)])['CurrentFixed']
else:
fixed_vol = False
if not fixed_vol:
if fade:
self.device.volume = 0
self.device.ramp_to_volume(self.volume)
else:
self.device.volume = self.volume
if self.is_coordinator:
if self.transport_state == 'PLAYING':
self.device.play()
elif self.transport_state == 'STOPPED':
self.device.stop()
|
Restore the state of a device to that which was previously saved.
For coordinator devices restore everything. For slave devices
only restore volume etc., not transport info (transport info
comes from the slave's coordinator).
Args:
fade (bool): Whether volume should be faded up on restore.
|
juraj-google-style
|
def _ConvertScalarFieldValue(value, field, require_str=False):
if field.cpp_type in _INT_TYPES:
return _ConvertInteger(value)
elif field.cpp_type in _FLOAT_TYPES:
return _ConvertFloat(value)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
return _ConvertBool(value, require_str)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
return base64.b64decode(value)
else:
if _UNPAIRED_SURROGATE_PATTERN.search(value):
raise ParseError('Unpaired surrogate')
return value
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
enum_value = field.enum_type.values_by_name.get(value, None)
if enum_value is None:
try:
number = int(value)
enum_value = field.enum_type.values_by_number.get(number, None)
except ValueError:
raise ParseError('Invalid enum value {0} for enum type {1}.'.format(
value, field.enum_type.full_name))
if enum_value is None:
raise ParseError('Invalid enum value {0} for enum type {1}.'.format(
value, field.enum_type.full_name))
return enum_value.number
|
Convert a single scalar field value.
Args:
value: A scalar value to convert the scalar field value.
field: The descriptor of the field to convert.
require_str: If True, the field value must be a str.
Returns:
The converted scalar field value
Raises:
ParseError: In case of convert problems.
|
juraj-google-style
|
def record_markdown(text, cellid):
from acorn.logging.database import record
from time import time
ekey = "nb-{}".format(cellid)
global _cellid_map
if cellid not in _cellid_map:
from acorn.logging.database import active_db
from difflib import SequenceMatcher
from acorn.logging.diff import cascade
taskdb = active_db()
if ekey not in taskdb.entities:
possible = [k for k in taskdb.entities if k[0:3] == "nb-"]
maxkey, maxvalue = None, 0.
for pkey in possible:
sequence = [e["c"] for e in taskdb.entities[pkey]]
state = ''.join(cascade(sequence))
matcher = SequenceMatcher(a=state, b=text)
ratio = matcher.quick_ratio()
if ratio > maxvalue and ratio > 0.5:
maxkey, maxvalue = pkey, ratio
if maxkey is not None:
ekey = pkey
_cellid_map[cellid] = ekey
ekey = _cellid_map[cellid]
entry = {
"m": "md",
"a": None,
"s": time(),
"r": None,
"c": text,
}
record(ekey, entry, diff=True)
|
Records the specified markdown text to the acorn database.
Args:
text (str): the *raw* markdown text entered into the cell in the ipython
notebook.
|
juraj-google-style
|
def _parse_plugin_data_as(content, data_oneof_field):
plugin_data = plugin_data_pb2.HParamsPluginData.FromString(content)
if plugin_data.version != PLUGIN_DATA_VERSION:
raise error.HParamsError(
'Only supports plugin_data version: %s; found: %s in: %s' %
(PLUGIN_DATA_VERSION, plugin_data.version, plugin_data))
if not plugin_data.HasField(data_oneof_field):
raise error.HParamsError(
'Expected plugin_data.%s to be set. Got: %s' %
(data_oneof_field, plugin_data))
return getattr(plugin_data, data_oneof_field)
|
Returns a data oneof's field from plugin_data.content.
Raises HParamsError if the content doesn't have 'data_oneof_field' set or
this file is incompatible with the version of the metadata stored.
Args:
content: The SummaryMetadata.plugin_data.content to use.
data_oneof_field: string. The name of the data oneof field to return.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.