code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def lengths_to_area_mask(feature_length, length, max_area_size):
paddings = tf.cast(tf.expand_dims(tf.logical_not(tf.sequence_mask(feature_length, maxlen=length)), 2), tf.float32)
(_, _, area_sum, _, _) = compute_area_features(paddings, max_area_width=max_area_size)
mask = tf.squeeze(tf.logical_not(tf.cast(area_sum, tf.bool)), [2])
return mask
|
Generates a non-padding mask for areas based on lengths.
Args:
feature_length: a tensor of [batch_size]
length: the length of the batch
max_area_size: the maximum area size considered
Returns:
mask: a tensor in shape of [batch_size, num_areas]
|
codesearchnet
|
def single_node_env(args):
if isinstance(args, list):
sys.argv = args
elif args.argv:
sys.argv = args.argv
num_gpus = args.num_gpus if 'num_gpus' in args else 1
util.single_node_env(num_gpus)
|
Sets up environment for a single-node TF session.
Args:
:args: command line arguments as either argparse args or argv list
|
juraj-google-style
|
def __init__(self, message, color, exc=None):
super(Status, self).__init__()
self.msg = message
self.color = color
self.exc = exc
|
Initialize the exception.
Args:
message: A six character status message to display on the terminal.
color: An ANSI color code value to use while displaying the
message.
exc: An exception that caused the non-standard status message. If
exc is supplied, it will be raised after the status message is
displayed.
|
juraj-google-style
|
def _binary_2d_label_to_2d_sparse_value(labels):
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(np.array(indices, np.int64), np.array(values, np.int64), np.array(shape, np.int64))
|
Convert dense 2D binary indicator to sparse ID.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator, shape [batch_size, num_classes].
Returns:
`SparseTensorValue` of shape [batch_size, num_classes], where num_classes
is the number of `1` values in each row of `labels`. Values are indices
of `1` values along the last dimension of `labels`.
|
github-repos
|
def parse_original_feature_from_example(example, feature_name):
feature = get_example_features(example)[feature_name]
feature_type = feature.WhichOneof('kind')
original_value = proto_value_for_feature(example, feature_name)
return OriginalFeatureList(feature_name, original_value, feature_type)
|
Returns an `OriginalFeatureList` for the specified feature_name.
Args:
example: An example.
feature_name: A string feature name.
Returns:
A filled in `OriginalFeatureList` object representing the feature.
|
codesearchnet
|
def _deconstruct_single_qubit_matrix_into_gate_turns(mat: np.ndarray) -> Tuple[(float, float, float)]:
(pre_phase, rotation, post_phase) = linalg.deconstruct_single_qubit_matrix_into_angles(mat)
tau = (2 * np.pi)
xy_turn = (rotation / tau)
xy_phase_turn = (0.25 - (pre_phase / tau))
total_z_turn = ((post_phase + pre_phase) / tau)
return (_signed_mod_1(xy_turn), _signed_mod_1(xy_phase_turn), _signed_mod_1(total_z_turn))
|
Breaks down a 2x2 unitary into gate parameters.
Args:
mat: The 2x2 unitary matrix to break down.
Returns:
A tuple containing the amount to rotate around an XY axis, the phase of
that axis, and the amount to phase around Z. All results will be in
fractions of a whole turn, with values canonicalized into the range
[-0.5, 0.5).
|
codesearchnet
|
def expand_groups(grp):
p = re.compile(r"(?P<name>.+)\[(?P<start>\d+)-(?P<end>\d+)\]")
m = p.match(grp)
if m is not None:
s = int(m.group('start'))
e = int(m.group('end'))
n = m.group('name')
return list(map(lambda x: n + str(x), range(s, e + 1)))
else:
return [grp]
|
Expand group names.
Args:
grp (string): group names to expand
Returns:
list of groups
Examples:
* grp[1-3] will be expanded to [grp1, grp2, grp3]
* grp1 will be expanded to [grp1]
|
juraj-google-style
|
def setup_callbacks(self, callbacks, monitors):
assert isinstance(callbacks, list), callbacks
assert isinstance(monitors, list), monitors
describe_trainable_vars()
self.register_callback(MaintainStepCounter())
for cb in callbacks:
self.register_callback(cb)
for cb in self._callbacks:
assert not isinstance(cb, MonitorBase), "Monitor cannot be pre-registered for now!"
registered_monitors = []
for m in monitors:
if self.register_callback(m):
registered_monitors.append(m)
self.monitors = Monitors(registered_monitors)
self.register_callback(self.monitors)
logger.info("Setup callbacks graph ...")
self._callbacks = Callbacks(self._callbacks)
self._callbacks.setup_graph(weakref.proxy(self))
|
Setup callbacks and monitors. Must be called after the main graph is built.
Args:
callbacks ([Callback]):
monitors ([MonitorBase]):
|
juraj-google-style
|
def __init__(self, location, field_type):
super(OutputContextField, self).__init__(location, field_type)
self.location = location
self.field_type = field_type
self.validate()
|
Construct a new OutputContextField object for the field at the given location.
Args:
location: Location, specifying where the field was declared. The Location
must point to a property, and that property's value is output as the result.
field_type: GraphQL type object, specifying the type of the field being output
Returns:
new OutputContextField object
|
juraj-google-style
|
def language_from_str(language_def, metamodel):
if (type(language_def) is not text):
raise TextXError('textX accepts only unicode strings.')
if metamodel.debug:
metamodel.dprint('*** PARSING LANGUAGE DEFINITION ***')
if (metamodel.debug in textX_parsers):
parser = textX_parsers[metamodel.debug]
else:
parser = ParserPython(textx_model, comment_def=comment, ignore_case=False, reduce_tree=False, memoization=metamodel.memoization, debug=metamodel.debug, file=metamodel.file)
textX_parsers[metamodel.debug] = parser
try:
parse_tree = parser.parse(language_def)
except NoMatch as e:
(line, col) = parser.pos_to_linecol(e.position)
raise TextXSyntaxError(text(e), line, col)
lang_parser = visit_parse_tree(parse_tree, TextXVisitor(parser, metamodel))
metamodel.validate()
lang_parser.metamodel = metamodel
metamodel._parser_blueprint = lang_parser
if metamodel.debug:
PMDOTExporter().exportFile(lang_parser.parser_model, '{}_parser_model.dot'.format(metamodel.rootcls.__name__))
return lang_parser
|
Constructs parser and initializes metamodel from language description
given in textX language.
Args:
language_def (str): A language description in textX.
metamodel (TextXMetaModel): A metamodel to initialize.
Returns:
Parser for the new language.
|
codesearchnet
|
def _preprocess_grad(grad, body_graph_output, while_op_input, while_op_output):
if not _is_trainable(body_graph_output):
return None
if while_op_output.dtype in (dtypes.resource, dtypes.variant) and default_gradient.supports_default_grad(while_op_input) and (grad is None):
return _zeros_like(while_op_input, while_op_output)
if isinstance(grad, indexed_slices.IndexedSlices):
return ops.convert_to_tensor(grad)
return grad
|
Returns the initial gradient to be used for a given output tensor.
Args:
grad: the original gradient Tensor passed to the gradient function.
body_graph_output: the corresponding Tensor in the body graph.
while_op_input: the corresponding Tensor input of the While op.
while_op_output: the corresponding Tensor output of the While op.
Returns:
A Tensor or None.
|
github-repos
|
def _expand_value_set_url_using_service(self, value_set_url: str, value_set_version: Optional[str], terminology_service_url: str, auth: Optional[Union[Tuple[str, str], str]]) -> value_set_pb2.ValueSet:
params = {'url': value_set_url}
if value_set_version is not None:
params['valueSetVersion'] = value_set_version
session_ = self.create_session()
session_.headers.update({'Accept': 'application/json'})
if auth is not None:
if isinstance(auth, tuple) and len(auth) == 2:
logging.debug('Using Basic auth for auth')
session_.auth = auth
else:
logging.debug('Using Bearer token for auth')
session_.headers['Authorization'] = auth
logging.info('Expanding value set url: %s version: %s using terminology service: %s', value_set_url, value_set_version, terminology_service_url)
with session_ as session:
def request_func(offset: int) -> requests.Response:
return session.get(terminology_service_url, params={'offset': offset, **params})
expanded_value_set = _paginate_expand_value_set_request(request_func, value_set_url, value_set_version)
logging.info('Retrieved %d codes for value set url: %s version: %s using terminology service: %s', len(expanded_value_set.expansion.contains), value_set_url, value_set_version, terminology_service_url)
return expanded_value_set
|
Expands the value set using the requested terminology service.
Requests an expansion of the value set from the terminology
server at `terminology_service_url` for the given URL and version.
Args:
value_set_url: The url of the value set to expand.
value_set_version: The version of the value set to retrieve or None for
the latest version.
terminology_service_url: The url of the terminology service to use when
expanding `value_set_url`.
auth: A tuple of (user_name, password) to use when performing basic auth
with the terminology service or a singular token added to the
Authorization header or None if no authentication is required.
Returns:
The current definition of the value set from the server with its expanded
codes present.
|
github-repos
|
def __init__(self, name) -> None:
if not isinstance(name, str):
raise ValueError('name for name_scope must be a string.')
self._name = name
self._exit_fns = []
|
Initialize the context manager.
Args:
name: The prefix to use on all names created within the name scope.
Raises:
ValueError: If name is not a string.
|
github-repos
|
def _map_captures_to_created_tensors(original_captures, tensor_map, function):
export_captures = []
for exterior, interior in original_captures:
mapped_resource = tensor_map.get(exterior, None)
if mapped_resource is None:
_raise_untracked_capture_error(function.name, exterior, interior)
export_captures.append(mapped_resource)
return export_captures
|
Maps eager tensors captured by a function to Graph resources for export.
Args:
original_captures: A dictionary mapping from tensors captured by the
function to interior placeholders for those tensors (inside the function
body).
tensor_map: A dictionary mapping from resource tensors owned by the eager
context to resource tensors in the exported graph.
function: Function with the original captures. Only used when raising the
AssertionError.
Returns:
A list of stand-in tensors which belong to the exported graph, corresponding
to the function's captures.
Raises:
AssertionError: If the function references a resource which is not part of
`tensor_map`.
|
github-repos
|
def from_json(cls, data):
assert ('data_type' in data), 'Required keyword "data_type" is missing!'
keys = ('data_type', 'unit', 'analysis_period', 'metadata')
for key in keys:
if (key not in data):
data[key] = None
data_type = DataTypeBase.from_json(data['data_type'])
ap = AnalysisPeriod.from_json(data['analysis_period'])
return cls(data_type, data['unit'], ap, data['metadata'])
|
Create a header from a dictionary.
Args:
data: {
"data_type": {}, //Type of data (e.g. Temperature)
"unit": string,
"analysis_period": {} // A Ladybug AnalysisPeriod
"metadata": {}, // A dictionary of metadata
}
|
codesearchnet
|
def actnorm_3d(name, x, logscale_factor=3.):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x = tf.unstack(x, axis=1)
x_normed = []
for ind, x_step in enumerate(x):
x_step, _ = actnorm("actnorm_%d" % ind, x_step,
logscale_factor=logscale_factor)
x_normed.append(x_step)
return tf.stack(x_normed, axis=1), None
|
Applies actnorm to each time-step independently.
There are a total of 2*n_channels*n_steps parameters learnt.
Args:
name: variable scope.
x: 5-D Tensor, (NTHWC)
logscale_factor: Increases the learning rate of the scale by
logscale_factor.
Returns:
x: 5-D Tensor, (NTHWC) with the per-timestep, per-channel normalization.
|
juraj-google-style
|
def run_config(self, project, run=None, entity=None):
query = gql('\n query Model($name: String!, $entity: String!, $run: String!) {\n model(name: $name, entityName: $entity) {\n bucket(name: $run) {\n config\n commit\n patch\n files(names: ["wandb-metadata.json"]) {\n edges {\n node {\n url\n }\n }\n }\n }\n }\n }\n ')
response = self.gql(query, variable_values={'name': project, 'run': run, 'entity': entity})
if (response['model'] == None):
raise ValueError('Run {}/{}/{} not found'.format(entity, project, run))
run = response['model']['bucket']
commit = run['commit']
patch = run['patch']
config = json.loads((run['config'] or '{}'))
if (len(run['files']['edges']) > 0):
url = run['files']['edges'][0]['node']['url']
res = requests.get(url)
res.raise_for_status()
metadata = res.json()
else:
metadata = {}
return (commit, config, patch, metadata)
|
Get the relevant configs for a run
Args:
project (str): The project to download, (can include bucket)
run (str, optional): The run to download
entity (str, optional): The entity to scope this project to.
|
codesearchnet
|
def release(self, force=False):
if not self.islocked:
return
if self.owned_by_self or force:
os.remove(self.path)
else:
raise UnableToReleaseLockError(self)
|
Release lock.
To release a lock, we must already own the lock.
Arguments:
force (bool, optional): If true, ignore any existing lock owner.
Raises:
UnableToReleaseLockError: If the lock is claimed by another
process (not raised if force option is used).
|
juraj-google-style
|
def eps(self, nodeids=None):
if nodeids is None: nodeids = self._nodeids
_eps = self._eps
return [_eps[nodeid] for nodeid in nodeids]
|
Return the EPs with the given *nodeid*, or all EPs.
Args:
nodeids: an iterable of nodeids of EPs to return; if
`None`, return all EPs
|
juraj-google-style
|
def emit(signal, *args, **kwargs):
if signal not in __receivers:
return
receivers = __live_receivers(signal)
for func in receivers:
func(*args, **kwargs)
|
Emit a signal by serially calling each registered signal receiver for
the `signal`.
Note:
The receiver must accept the *args and/or **kwargs that have been
passed to it. There expected parameters are not dictated by
mixbox.
Args:
signal: A signal identifier or name.
*args: A variable-length argument list to pass to the receiver.
**kwargs: Keyword-arguments to pass to the receiver.
|
juraj-google-style
|
def distance_similarity(a, b, p, T=CLOSE_DISTANCE_THRESHOLD):
d = distance_to_line(a, b, p)
r = ((((- 1) / float(T)) * abs(d)) + 1)
return (r if (r > 0) else 0)
|
Computes the distance similarity between a line segment
and a point
Args:
a ([float, float]): x and y coordinates. Line start
b ([float, float]): x and y coordinates. Line end
p ([float, float]): x and y coordinates. Point to compute the distance
Returns:
float: between 0 and 1. Where 1 is very similar and 0 is completely different
|
codesearchnet
|
def tables_list(self, dataset_name, max_results=0, page_token=None):
url = (Api._ENDPOINT + (Api._TABLES_PATH % (dataset_name.project_id, dataset_name.dataset_id, '', '')))
args = {}
if (max_results != 0):
args['maxResults'] = max_results
if (page_token is not None):
args['pageToken'] = page_token
return google.datalab.utils.Http.request(url, args=args, credentials=self.credentials)
|
Issues a request to retrieve a list of tables.
Args:
dataset_name: the name of the dataset to enumerate.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
|
codesearchnet
|
def keras_model_to_graph_def(keras_layer):
input_to_layer = {}
model_name_to_output = {}
g = GraphDef()
prev_node_name = None
for (name_scope, layer) in _walk_layers(keras_layer):
if _is_model(layer):
(input_to_layer, model_name_to_output, prev_node_name) = _update_dicts(name_scope, layer, input_to_layer, model_name_to_output, prev_node_name)
continue
layer_config = layer.get('config')
node_name = _scoped_name(name_scope, layer_config.get('name'))
node_def = g.node.add()
node_def.name = node_name
if (layer.get('class_name') is not None):
keras_cls_name = layer.get('class_name').encode('ascii')
node_def.attr['keras_class'].s = keras_cls_name
if (layer_config.get('dtype') is not None):
tf_dtype = dtypes.as_dtype(layer_config.get('dtype'))
node_def.attr['dtype'].type = tf_dtype.as_datatype_enum
if (layer.get('inbound_nodes') is not None):
for maybe_inbound_node in layer.get('inbound_nodes'):
inbound_nodes = _norm_to_list_of_layers(maybe_inbound_node)
for [name, size, index, _] in inbound_nodes:
inbound_name = _scoped_name(name_scope, name)
inbound_node_names = model_name_to_output.get(inbound_name, [inbound_name])
node_def.input.append(inbound_node_names[index])
elif (prev_node_name is not None):
node_def.input.append(prev_node_name)
if (node_name in input_to_layer):
node_def.input.append(input_to_layer.get(node_name))
prev_node_name = node_def.name
return g
|
Returns a GraphDef representation of the Keras model in a dict form.
Note that it only supports models that implemented to_json().
Args:
keras_layer: A dict from Keras model.to_json().
Returns:
A GraphDef representation of the layers in the model.
|
codesearchnet
|
def copy_to(self, new_key, bucket=None):
if (bucket is None):
bucket = self._bucket
try:
new_info = self._api.objects_copy(self._bucket, self._key, bucket, new_key)
except Exception as e:
raise e
return Item(bucket, new_key, new_info, context=self._context)
|
Copies this item to the specified new key.
Args:
new_key: the new key to copy this item to.
bucket: the bucket of the new item; if None (the default) use the same bucket.
Returns:
An Item corresponding to new key.
Raises:
Exception if there was an error copying the item.
|
codesearchnet
|
def get_payments(self):
query = '\n query {\n user {\n payments {\n nmrAmount\n round {\n number\n openTime\n resolveTime\n resolvedGeneral\n resolvedStaking\n }\n tournament\n usdAmount\n }\n }\n }\n '
data = self.raw_query(query, authorization=True)['data']
payments = data['user']['payments']
for p in payments:
utils.replace(p['round'], 'openTime', utils.parse_datetime_string)
utils.replace(p['round'], 'resolveTime', utils.parse_datetime_string)
utils.replace(p, 'usdAmount', utils.parse_float_string)
utils.replace(p, 'nmrAmount', utils.parse_float_string)
return payments
|
Get all your payments.
Returns:
list of dicts: payments
For each payout in the list, a dict contains the following items:
* nmrAmount (`decimal.Decimal`)
* usdAmount (`decimal.Decimal`)
* tournament (`str`)
* round (`dict`)
* number (`int`)
* openTime (`datetime`)
* resolveTime (`datetime`)
* resolvedGeneral (`bool`)
* resolvedStaking (`bool`)
Example:
>>> api = NumerAPI(secret_key="..", public_id="..")
>>> api.get_payments()
[{'nmrAmount': Decimal('0.00'),
'round': {'number': 84,
'openTime': datetime.datetime(2017, 12, 2, 18, 0, tzinfo=tzutc()),
'resolveTime': datetime.datetime(2018, 1, 1, 18, 0, tzinfo=tzutc()),
'resolvedGeneral': True,
'resolvedStaking': True},
'tournament': 'staking',
'usdAmount': Decimal('17.44')},
...
]
|
codesearchnet
|
def split_heads(self, x):
with tf.name_scope('split_heads'):
batch_size = tf.shape(x)[0]
length = tf.shape(x)[1]
depth = (self.hidden_size
x = tf.reshape(x, [batch_size, length, self.num_heads, depth])
return tf.transpose(x, [0, 2, 1, 3])
|
Split x into different heads, and transpose the resulting value.
The tensor is transposed to insure the inner dimensions hold the correct
values during the matrix multiplication.
Args:
x: A tensor with shape [batch_size, length, hidden_size]
Returns:
A tensor with shape [batch_size, num_heads, length, hidden_size/num_heads]
|
codesearchnet
|
def List(self, request, global_params=None):
config = self.GetMethodConfig('List')
return self._RunMethod(config, request, global_params=global_params)
|
Retrieves table data from a specified set of rows. Requires the READER dataset role.
Args:
request: (BigqueryTabledataListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TableDataList) The response message.
|
github-repos
|
def render(self, bindings):
out = []
binding = False
for segment in self.segments:
if segment.kind == _BINDING:
if segment.literal not in bindings:
raise ValidationException(
('rendering error: value for key \'{}\' '
'not provided').format(segment.literal))
out.extend(PathTemplate(bindings[segment.literal]).segments)
binding = True
elif segment.kind == _END_BINDING:
binding = False
else:
if binding:
continue
out.append(segment)
path = _format(out)
self.match(path)
return path
|
Renders a string from a path template using the provided bindings.
Args:
bindings (dict): A dictionary of var names to binding strings.
Returns:
str: The rendered instantiation of this path template.
Raises:
ValidationError: If a key isn't provided or if a sub-template can't
be parsed.
|
juraj-google-style
|
def assert_lessthan(arr_test, arr_max, msg=''):
r
if util_arg.NO_ASSERTS:
return
arr1 = np.array(arr_test)
arr2 = np.array(arr_max)
error = arr_max - arr_test
passed = error >= 0
if not np.all(passed):
failed_xs = np.where(np.logical_not(passed))
failed_error = error.take(failed_xs)
failed_arr_test = arr1.take(failed_xs)
failed_arr_target = arr2.take(failed_xs)
msg_list = [
'FAILED ASSERT LESSTHAN',
msg,
' * failed_xs = %r' % (failed_xs,),
' * failed_error = %r' % (failed_error,),
' * failed_arr_test = %r' % (failed_arr_test,),
' * failed_arr_target = %r' % (failed_arr_target,),
]
msg = '\n'.join(msg_list)
raise AssertionError(msg)
return error
|
r"""
Args:
arr_test (ndarray or list):
arr_target (ndarray or list):
thresh (scalar or ndarray or list):
|
juraj-google-style
|
def _init_net_specs(conf):
for net_name, net_spec in conf.get('nets', {}).items():
net_spec['name'] = net_name
net_spec['mapping'] = {}
net_spec.setdefault('type', 'nat')
return conf
|
Given a configuration specification, initializes all the net
definitions in it so they can be used comfortably
Args:
conf (dict): Configuration specification
Returns:
dict: the adapted new conf
|
juraj-google-style
|
def ResourcePath(package_name, filepath):
if (not getattr(sys, 'frozen', None)):
target = _GetPkgResources(package_name, filepath)
if (target and os.access(target, os.R_OK)):
return target
target = os.path.join(sys.prefix, filepath)
if (target and os.access(target, os.R_OK)):
return target
return None
|
Computes a path to the specified package resource.
Args:
package_name: A name of the package where the resource is located.
filepath: A path to the resource relative to the package location.
Returns:
A path to the resource or `None` if the resource cannot be found.
|
codesearchnet
|
def resolution(self, indicator=None):
self._request_entity = 'dnsResolution'
self._request_uri = '{}/dnsResolutions'.format(self._request_uri)
if indicator is not None:
self._request_uri = '{}/{}/dnsResolutions'.format(self._api_uri, indicator)
|
Update the URI to retrieve host resolutions for the provided indicator.
Args:
indicator (string): The indicator to retrieve resolutions.
|
juraj-google-style
|
def repay_funding(self, amount, currency):
params = {'amount': amount, 'currency': currency}
return self._send_message('post', '/funding/repay', data=json.dumps(params))
|
Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
|
codesearchnet
|
def get_usedby_and_readonly(self, id):
uri = self.URI + "/" + id + "/usedby/readonly"
return self._client.get(uri)
|
Gets the build plans details os teh selected plan script as per the selected attributes.
Args:
id: ID of the Plan Script.
Returns:
array of build plans
|
juraj-google-style
|
def __init__(self, value, method=Method.PREFIX):
self.value = value
self.method = method
|
Init method.
Args:
value (str): value to match.
method (const): Method constant, matching method.
|
juraj-google-style
|
def getTokensForText(self, body, POStags=None):
return self._text.getTokensForText(self._retina, body, POStags)
|
Get tokenized input text
Args:
body, str: The text to be tokenized (required)
POStags, str: Specify desired POS types (optional)
Returns:
list of str
Raises:
CorticalioException: if the request was not successful
|
juraj-google-style
|
def macro_tpm(self, micro_tpm, check_independence=True):
if (not is_state_by_state(micro_tpm)):
micro_tpm = convert.state_by_node2state_by_state(micro_tpm)
macro_tpm = self.macro_tpm_sbs(micro_tpm)
if check_independence:
validate.conditionally_independent(macro_tpm)
return convert.state_by_state2state_by_node(macro_tpm)
|
Create a coarse-grained macro TPM.
Args:
micro_tpm (nd.array): The TPM of the micro-system.
check_independence (bool): Whether to check that the macro TPM is
conditionally independent.
Raises:
ConditionallyDependentError: If ``check_independence`` is ``True``
and the macro TPM is not conditionally independent.
Returns:
np.ndarray: The state-by-node TPM of the macro-system.
|
codesearchnet
|
def quarter_boundaries(quarter):
year, quarter = quarter.split('Q')
year = int(year)
quarter = int(quarter)
first_month_of_quarter = 3 * quarter - 2
last_month_of_quarter = 3 * quarter
first_day = date(year, first_month_of_quarter, 1)
last_day = date(year, last_month_of_quarter, monthrange(year, last_month_of_quarter)[1])
return first_day, last_day
|
Returns first and last day of a quarter
Args:
quarter (str) quarter, in format '2015Q1'
Returns: (tuple) datetime.dates for the first and last days of the quarter
|
juraj-google-style
|
def handle_error(self, item_session: ItemSession, error: BaseException) -> Actions:
if ((not self._ssl_verification) and isinstance(error, SSLVerificationError)):
self._statistics.increment_error(ProtocolError())
else:
self._statistics.increment_error(error)
self._waiter.increment()
action = self.consult_error_hook(item_session, error)
if (action == Actions.RETRY):
item_session.set_status(Status.error)
elif (action == Actions.FINISH):
item_session.set_status(Status.done)
elif (action == Actions.STOP):
raise HookStop('Script requested immediate stop.')
elif (self._ssl_verification and isinstance(error, SSLVerificationError)):
raise
elif (isinstance(error, ConnectionRefused) and (not self.retry_connrefused)):
item_session.set_status(Status.skipped)
elif (isinstance(error, DNSNotFound) and (not self.retry_dns_error)):
item_session.set_status(Status.skipped)
else:
item_session.set_status(Status.error)
return action
|
Process an error.
Returns:
A value from :class:`.hook.Actions`.
|
codesearchnet
|
def _ParseInfo2Record(self, parser_mediator, file_object, record_offset, record_size):
record_data = self._ReadData(file_object, record_offset, record_size)
record_map = self._GetDataTypeMap('recycler_info2_file_entry')
try:
record = self._ReadStructureFromByteStream(record_data, record_offset, record_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to map record data at offset: 0x{0:08x} with error: {1!s}'.format(record_offset, exception))
codepage = (parser_mediator.codepage or 'ascii')
ascii_filename = record.original_filename.split(b'\x00')[0]
try:
ascii_filename = ascii_filename.decode(codepage)
except UnicodeDecodeError:
ascii_filename = ascii_filename.decode(codepage, errors='replace')
parser_mediator.ProduceExtractionWarning('unable to decode original filename.')
unicode_filename = None
if (record_size > 280):
record_offset += 280
utf16_string_map = self._GetDataTypeMap('recycler_info2_file_entry_utf16le_string')
try:
unicode_filename = self._ReadStructureFromByteStream(record_data[280:], record_offset, utf16_string_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to map record data at offset: 0x{0:08x} with error: {1!s}'.format(record_offset, exception))
unicode_filename = unicode_filename.rstrip('\x00')
if (record.deletion_time == 0):
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
else:
date_time = dfdatetime_filetime.Filetime(timestamp=record.deletion_time)
event_data = WinRecycleBinEventData()
event_data.drive_number = record.drive_number
event_data.original_filename = (unicode_filename or ascii_filename)
event_data.file_size = record.original_file_size
event_data.offset = record_offset
event_data.record_index = record.index
if (ascii_filename != unicode_filename):
event_data.short_filename = ascii_filename
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_DELETED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses an INFO-2 record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
record_offset (int): record offset.
record_size (int): record size.
Raises:
ParseError: if the record cannot be read.
|
codesearchnet
|
def reply(self, status=200, new_response=False, **kw):
res = (Response(**kw) if new_response else self._response)
res.status((status or res._status))
res.mock = self
self._response = res
return res
|
Defines the mock response.
Arguments:
status (int, optional): response status code. Defaults to ``200``.
**kw (dict): optional keyword arguments passed to ``pook.Response``
constructor.
Returns:
pook.Response: mock response definition instance.
|
codesearchnet
|
def terminate(self, task_type, task_id):
with self._process_lock:
p = self._processes.get((task_type, task_id), None)
if p is None:
raise ValueError('{}-{} does not exist'.format(task_type, task_id))
self._terminated.add((task_type, task_id))
self._parent_to_sub_queue.put('terminate {} {}'.format(task_type, task_id))
p.join()
|
Terminates the process with `task_type` and `task_id`.
If auto_retart=True, the terminated task will be restarted unless the chief
has already exited with zero exit code.
Args:
task_type: the task type.
task_id: the task id.
|
github-repos
|
def print_serial_number_info(self, serial_number, print_to_screen=True):
r = self.select_serial_number_row(serial_number)
if r.empty:
warnings.warn('missing serial number')
return
txt1 = (80 * '=')
txt1 += '\n'
txt1 += f
txt1 = (80 * '-')
txt1 += '\n'
txt2 = ''
for (label, value) in zip(r.columns, r.values[0]):
if (label in self.headers):
txt1 += f
else:
txt2 += f
if print_to_screen:
print(txt1)
print((80 * '-'))
print(txt2)
print((80 * '='))
return
else:
return txt1
|
Print information about the run.
Args:
serial_number: serial number.
print_to_screen: runs the print statement if True,
returns txt if not.
Returns:
txt if print_to_screen is False, else None.
|
codesearchnet
|
def configure_app(app, config=None, config_obj=None):
app.config.from_object(config_obj or BaseConfig)
if config is not None:
app.config.from_pyfile(config)
|
Configure application instance.
Args:
app (Flask): initialized Flask app instance
config (Optional[path]): path to a Python module config file
config_obj (Optional[class]): Python config object
|
juraj-google-style
|
def GetStream(data=None):
if len(__mstreams_available__) == 0:
if data:
mstream = MemoryStream(data)
mstream.seek(0)
else:
mstream = MemoryStream()
__mstreams__.append(mstream)
return mstream
mstream = __mstreams_available__.pop()
if data is not None and len(data):
mstream.Cleanup()
mstream.write(data)
mstream.seek(0)
return mstream
|
Get a MemoryStream instance.
Args:
data (bytes, bytearray, BytesIO): (Optional) data to create the stream from.
Returns:
MemoryStream: instance.
|
juraj-google-style
|
def _create_or_validate_filenames_dataset(filenames, name=None):
if isinstance(filenames, data_types.DatasetV2):
element_type = dataset_ops.get_legacy_output_types(filenames)
if element_type != dtypes.string:
raise TypeError(f'The `filenames` argument must contain `tf.string` elements. Got a dataset of `{element_type!r}` elements.')
element_shape = dataset_ops.get_legacy_output_shapes(filenames)
if not element_shape.is_compatible_with(tensor_shape.TensorShape([])):
raise TypeError(f'The `filenames` argument must contain `tf.string` elements of shape [] (i.e. scalars). Got a dataset of element shape {element_shape!r}.')
else:
filenames = nest.map_structure(_normalise_fspath, filenames)
filenames = ops.convert_to_tensor(filenames, dtype_hint=dtypes.string)
if filenames.dtype != dtypes.string:
raise TypeError(f'The `filenames` argument must contain `tf.string` elements. Got `{filenames.dtype!r}` elements.')
filenames = array_ops.reshape(filenames, [-1], name='flat_filenames')
filenames = from_tensor_slices_op._TensorSliceDataset(filenames, is_files=True, name=name)
return filenames
|
Creates (or validates) a dataset of filenames.
Args:
filenames: Either a list or dataset of filenames. If it is a list, it is
convert to a dataset. If it is a dataset, its type and shape is validated.
name: (Optional.) A name for the tf.data operation.
Returns:
A dataset of filenames.
|
github-repos
|
def onehot_encode(dataset, char_indices, maxlen):
X = np.zeros((len(dataset), maxlen, len(char_indices.keys())))
for (i, sentence) in enumerate(dataset):
for (t, char) in enumerate(sentence):
X[(i, t, char_indices[char])] = 1
return X
|
One hot encode the tokens
Args:
dataset list of lists of tokens
char_indices dictionary of {key=character, value=index to use encoding vector}
maxlen int Length of each sample
Return:
np array of shape (samples, tokens, encoding length)
|
codesearchnet
|
def _ConditionalFormatMessages(self, event_values):
string_pieces = []
for map_index, attribute_name in enumerate(self._format_string_pieces_map):
if not attribute_name or attribute_name in event_values:
if attribute_name:
attribute = event_values.get(attribute_name, None)
if (not isinstance(attribute, (bool, float)) and
not isinstance(attribute, py2to3.INTEGER_TYPES) and
not attribute):
continue
string_pieces.append(self.FORMAT_STRING_PIECES[map_index])
format_string = self.FORMAT_STRING_SEPARATOR.join(string_pieces)
string_pieces = []
for map_index, attribute_name in enumerate(
self._format_string_short_pieces_map):
if not attribute_name or event_values.get(attribute_name, None):
string_pieces.append(self.FORMAT_STRING_SHORT_PIECES[map_index])
short_format_string = self.FORMAT_STRING_SEPARATOR.join(string_pieces)
return self._FormatMessages(
format_string, short_format_string, event_values)
|
Determines the conditional formatted message strings.
Args:
event_values (dict[str, object]): event values.
Returns:
tuple(str, str): formatted message string and short message string.
|
juraj-google-style
|
def add_property_orders(query_proto, *orders):
for order in orders:
proto = query_proto.order.add()
if (order[0] == '-'):
order = order[1:]
proto.direction = query_pb2.PropertyOrder.DESCENDING
else:
proto.direction = query_pb2.PropertyOrder.ASCENDING
proto.property.name = order
|
Add ordering constraint for the given datastore.Query proto message.
Args:
query_proto: datastore.Query proto message.
orders: list of propertype name string, default to ascending
order and set descending if prefixed by '-'.
Usage:
>>> add_property_orders(query_proto, 'foo') # sort by foo asc
>>> add_property_orders(query_proto, '-bar') # sort by bar desc
|
codesearchnet
|
def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
try:
from pycocotools import mask as coco_mask
except ImportError:
raise ImportError('Pycocotools is not installed in your environment.')
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = np.asarray(mask, dtype=np.uint8)
mask = np.any(mask, axis=2)
masks.append(mask)
if masks:
masks = np.stack(masks, axis=0)
else:
masks = np.zeros((0, height, width), dtype=np.uint8)
return masks
|
Convert a COCO polygon annotation to a mask.
Args:
segmentations (`List[List[float]]`):
List of polygons, each polygon represented by a list of x-y coordinates.
height (`int`):
Height of the mask.
width (`int`):
Width of the mask.
|
github-repos
|
def _get_backend_instance(self, backend_cls):
try:
backend_instance = backend_cls(provider=self)
except Exception as err:
raise QiskitError('Backend %s could not be instantiated: %s' %
(backend_cls, err))
return backend_instance
|
Return an instance of a backend from its class.
Args:
backend_cls (class): Backend class.
Returns:
BaseBackend: a backend instance.
Raises:
QiskitError: if the backend could not be instantiated.
|
juraj-google-style
|
def model_fn(hparams, seed):
rng = random.Random(seed)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(INPUT_SHAPE))
model.add(tf.keras.layers.Reshape(INPUT_SHAPE + (1,)))
conv_filters = 8
for _ in xrange(hparams[HP_CONV_LAYERS]):
model.add(tf.keras.layers.Conv2D(
filters=conv_filters,
kernel_size=hparams[HP_CONV_KERNEL_SIZE],
padding="same",
activation="relu",
))
model.add(tf.keras.layers.MaxPool2D(pool_size=2, padding="same"))
conv_filters *= 2
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(hparams[HP_DROPOUT], seed=rng.random()))
dense_neurons = 32
for _ in xrange(hparams[HP_DENSE_LAYERS]):
model.add(tf.keras.layers.Dense(dense_neurons, activation="relu"))
dense_neurons *= 2
model.add(tf.keras.layers.Dense(OUTPUT_CLASSES, activation="softmax"))
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=hparams[HP_OPTIMIZER],
metrics=["accuracy"],
)
return model
|
Create a Keras model with the given hyperparameters.
Args:
hparams: A dict mapping hyperparameters in `HPARAMS` to values.
seed: A hashable object to be used as a random seed (e.g., to
construct dropout layers in the model).
Returns:
A compiled Keras model.
|
juraj-google-style
|
def guess_strategy_type(file_name_or_ext):
if ('.' not in file_name_or_ext):
ext = file_name_or_ext
else:
(name, ext) = os.path.splitext(file_name_or_ext)
ext = ext.lstrip('.')
file_type_map = get_file_type_map()
return file_type_map.get(ext, None)
|
Guess strategy type to use for file by extension.
Args:
file_name_or_ext: Either a file name with an extension or just
an extension
Returns:
Strategy: Type corresponding to extension or None if there's no
corresponding strategy type
|
codesearchnet
|
def start_listener_thread(self, timeout_ms: int = 30000, exception_handler: Callable = None):
assert not self.should_listen and self.sync_thread is None, 'Already running'
self.should_listen = True
self.sync_thread = gevent.spawn(self.listen_forever, timeout_ms, exception_handler)
self.sync_thread.name = f'GMatrixClient.listen_forever user_id:{self.user_id}'
|
Start a listener greenlet to listen for events in the background.
Args:
timeout_ms: How long to poll the Home Server for before retrying.
exception_handler: Optional exception handler function which can
be used to handle exceptions in the caller thread.
|
juraj-google-style
|
def normalized_energy_at_conditions(self, pH, V):
return self.energy_at_conditions(pH, V) * self.normalization_factor
|
Energy at an electrochemical condition, compatible with
numpy arrays for pH/V input
Args:
pH (float): pH at condition
V (float): applied potential at condition
Returns:
energy normalized by number of non-O/H atoms at condition
|
juraj-google-style
|
def _verifyStackFrames(self, stack_frames):
self.assertTrue([frame for frame in stack_frames if frame[0] == _current_file_full_path])
|
Verify the correctness of the stack frames.
Currently, it simply asserts that the current file is found in the stack
frames.
TODO(cais): Perhaps implement a stricter check later.
Args:
stack_frames: The stack frames to verify.
|
github-repos
|
def parse_file(path, format=None, encoding='utf-8', force_types=True):
try:
with open(path, 'rb') as f:
return parse(f, format, encoding, force_types)
except EnvironmentError as e:
raise AnyMarkupError(e, traceback.format_exc())
|
A convenience wrapper of parse, which accepts path of file to parse.
Args:
path: path to file to parse
format: explicitly override the guessed `inp` markup format
encoding: file encoding, defaults to utf-8
force_types:
if `True`, integers, floats, booleans and none/null
are recognized and returned as proper types instead of strings;
if `False`, everything is converted to strings
if `None`, backend return value is used
Returns:
parsed `inp` (dict or list) containing unicode values
Raises:
AnyMarkupError if a problem occurs while parsing
|
codesearchnet
|
def create_new(cls, mapreduce_id, shard_number):
shard_id = cls.shard_id_from_number(mapreduce_id, shard_number)
state = cls(key_name=shard_id,
mapreduce_id=mapreduce_id)
return state
|
Create new shard state.
Args:
mapreduce_id: unique mapreduce id as string.
shard_number: shard number for which to create shard state.
Returns:
new instance of ShardState ready to put into datastore.
|
juraj-google-style
|
def _redistribute_builder(self, afi='ipv4', source=None):
if (source == 'connected'):
return getattr(self._rbridge, 'rbridge_id_router_router_bgp_address_family_{0}_{0}_unicast_default_vrf_af_{0}_uc_and_vrf_cmds_call_point_holder_redistribute_connected_redistribute_connected'.format(afi))
else:
raise AttributeError('Invalid source.')
|
Build BGP redistribute method.
Do not use this method directly. You probably want ``redistribute``.
Args:
source (str): Source for redistributing. (connected)
afi (str): Address family to configure. (ipv4, ipv6)
Returns:
Method to redistribute desired source.
Raises:
KeyError: if `source` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp._redistribute_builder(source='connected',
... afi='ipv4')
... dev.bgp._redistribute_builder(source='hodor',
... afi='ipv4') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError
|
codesearchnet
|
def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[List[List[int]]]=None, node_labels: Optional[List[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._encode_plus(text=text, xpaths=xpaths, text_pair=text_pair, node_labels=node_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
|
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (nodes of a single example) or a
list of list of strings (nodes of a batch of examples).
|
github-repos
|
def get_rows_fieldnames_from_query(
session: Union[Session, Engine, Connection],
query: Query) -> Tuple[Sequence[Sequence[Any]], Sequence[str]]:
result = session.execute(query)
fieldnames = result.keys()
rows = result.fetchall()
return rows, fieldnames
|
Returns results and column names from a query.
Args:
session: SQLAlchemy :class:`Session`, :class:`Engine`, or
:class:`Connection` object
query: SQLAlchemy :class:`Query`
Returns:
``(rows, fieldnames)`` where ``rows`` is the usual set of results and
``fieldnames`` are the name of the result columns/fields.
|
juraj-google-style
|
def ConvertToWireFormat(self, value):
output = _SerializeEntries(
(python_format, wire_format, value.type_descriptor)
for (python_format, wire_format) in value.wrapped_list)
return b"", b"", output
|
Convert to the wire format.
Args:
value: is of type RepeatedFieldHelper.
Returns:
A wire format representation of the value.
|
juraj-google-style
|
def _scalar_field_to_json(field, row_value):
converter = _SCALAR_VALUE_TO_JSON_ROW.get(field.field_type)
if converter is None:
return row_value
return converter(row_value)
|
Maps a field and value to a JSON-safe value.
Args:
field ( \
:class:`~google.cloud.bigquery.schema.SchemaField`, \
):
The SchemaField to use for type conversion and field name.
row_value (any):
Value to be converted, based on the field's type.
Returns:
any:
A JSON-serializable object.
|
juraj-google-style
|
class AriaProjectorMLP(nn.Module):
def __init__(self, in_features, hidden_features, output_dim):
super().__init__()
self.linear_in = nn.Linear(in_features, hidden_features, bias=False)
self.linear_out = nn.Linear(hidden_features, output_dim, bias=False)
self.act = ACT2FN['gelu_new']
def forward(self, hidden_states):
hidden_states = self.act(self.linear_in(hidden_states))
hidden_states = self.linear_out(hidden_states)
return hidden_states
|
Feed-Forward Network module for the Aria Projector.
Args:
in_features (`int`):
Input embedding dimension.
hidden_features (`int`):
Hidden dimension of the feed-forward network.
output_dim (`int`):
Output dimension.
|
github-repos
|
def Var(self, mu=None):
if (mu is None):
mu = self.Mean()
var = 0.0
for (x, p) in self.d.iteritems():
var += (p * ((x - mu) ** 2))
return var
|
Computes the variance of a PMF.
Args:
mu: the point around which the variance is computed;
if omitted, computes the mean
Returns:
float variance
|
codesearchnet
|
def __init__(self, all_batch_items=None, commit_count=None):
self._all_batch_items = all_batch_items
self._commit_count = commit_count
self.mutations = []
|
Fake ``google.cloud.datastore.batch.Batch`` object.
Args:
all_batch_items: (list) If set, will append all entities/keys added to
this batch.
commit_count: (list of int) If set, will increment commit_count[0] on
each ``commit``.
|
github-repos
|
def get_execution_host_info():
host = os.environ.get('HOSTNAME', None)
cluster = os.environ.get('SGE_O_HOST', None)
if (host is None):
try:
import socket
host = (host or socket.gethostname())
except:
pass
return ((host or 'unknown'), (cluster or 'unknown'))
|
Tries to return a tuple describing the execution host.
Doesn't work for all queueing systems
Returns:
(HOSTNAME, CLUSTER_NAME)
|
codesearchnet
|
def app(environ, start_response):
from wsgi import container
'Add Environ To Service Container\n Add the environ to the service container. The environ is generated by the\n the WSGI server above and used by a service provider to manipulate the\n incoming requests\n '
container.bind('Environ', environ)
'Execute All Service Providers That Require The WSGI Server\n Run all service provider boot methods if the wsgi attribute is true.\n '
try:
for provider in container.make('WSGIProviders'):
container.resolve(provider.boot)
except Exception as e:
container.make('ExceptionHandler').load_exception(e)
"We Are Ready For Launch\n If we have a solid response and not redirecting then we need to return\n a 200 status code along with the data. If we don't, then we'll have\n to return a 302 redirection to where ever the user would like go\n to next.\n "
start_response(container.make('Request').get_status_code(), container.make('Request').get_and_reset_headers())
'Final Step\n This will take the data variable from the Service Container and return\n it to the WSGI server.\n '
return iter([bytes(container.make('Response'), 'utf-8')])
|
The WSGI Application Server.
Arguments:
environ {dict} -- The WSGI environ dictionary
start_response {WSGI callable}
Returns:
WSGI Response
|
codesearchnet
|
def __init__(self, forecast_io):
if forecast_io.has_currently():
self.currently = forecast_io.get_currently()
for item in self.currently.keys():
setattr(self, item, self.currently[item])
|
Construct a new 'FIOCurrently' object.
Recieves an ForecastIO object and gets the currently weather conditions
if they are available in the object.
Args:
forecast_io (ForecastIO): The ForecastIO object
|
juraj-google-style
|
def _handle_create(self, response, ignore_tombstone, auto_refresh):
if response.status_code == 201:
self.uri = self.repo.parse_uri(response.text)
if auto_refresh:
self.refresh()
elif auto_refresh == None:
if self.repo.default_auto_refresh:
self.refresh()
if hasattr(self,'_post_create'):
self._post_create(auto_refresh=auto_refresh)
elif response.status_code == 404:
raise Exception('HTTP 404, for this POST request target location does not exist')
elif response.status_code == 409:
raise Exception('HTTP 409, resource already exists')
elif response.status_code == 410:
if ignore_tombstone:
response = self.repo.api.http_request('DELETE', '%s/fcr:tombstone' % self.uri)
if response.status_code == 204:
logger.debug('tombstone removed, retrying create')
self.create()
else:
raise Exception('HTTP %s, Could not remove tombstone for %s' % (response.status_code, self.uri))
else:
raise Exception('tombstone for %s detected, aborting' % self.uri)
elif response.status_code == 415:
raise Exception('HTTP 415, unsupported media type')
else:
raise Exception('HTTP %s, unknown error creating resource' % response.status_code)
return self
|
Handles response from self.create()
Args:
response (requests.models.Response): response object from self.create()
ignore_tombstone (bool): If True, will attempt creation, if tombstone exists (409), will delete tombstone and retry
|
juraj-google-style
|
def text_pb(tag, data, description=None):
try:
tensor = tensor_util.make_tensor_proto(data, dtype=np.object)
except TypeError as e:
raise TypeError('tensor must be of type string', e)
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
summary = summary_pb2.Summary()
summary.value.add(tag=tag,
metadata=summary_metadata,
tensor=tensor)
return summary
|
Create a text tf.Summary protobuf.
Arguments:
tag: String tag for the summary.
data: A Python bytestring (of type bytes), a Unicode string, or a numpy data
array of those types.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Raises:
TypeError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object.
|
juraj-google-style
|
def ProtoFromDataFrames(self, dataframes):
datasets = []
for dataframe in dataframes:
table = dataframe['table']
table_entries = {}
for col in table:
table_entries[col] = self.NdarrayToEntry(table[col])
datasets.append({
'entries': table_entries,
'size': len(table),
'name': dataframe['name']
})
return self.GetDatasetsProto(datasets)
|
Creates a feature statistics proto from a set of pandas dataframes.
Args:
dataframes: A list of dicts describing tables for each dataset for the
proto. Each entry contains a 'table' field of the dataframe of the
data
and a 'name' field to identify the dataset in the proto.
Returns:
The feature statistics proto for the provided tables.
|
juraj-google-style
|
def list_container_instance_groups_sub(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.ContainerInstance/ContainerGroups', '?api-version=', CONTAINER_API])
return do_get(endpoint, access_token)
|
List the container groups in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON list of container groups and their properties.
|
codesearchnet
|
def set_soft_device_placement(enabled):
context.context().soft_device_placement = enabled
|
Enable or disable soft device placement.
If enabled, ops can be placed on different devices than the device explicitly
assigned by the user. This potentially has a large performance cost due to an
increase in data communication between devices.
Some cases where soft_device_placement would modify device assignment are:
1. no GPU/TPU implementation for the OP
2. no GPU devices are known or registered
3. need to co-locate with reftype input(s) which are from CPU
4. an OP can not be compiled by XLA. Common for TPU which always requires
the XLA compiler.
For TPUs, if this option is true, a feature called automatic outside
compilation is enabled. Automatic outside compilation will move uncompilable
ops within a TPU program to instead run on the host. This can be used when
encountering compilation failures due to unsupported ops.
Note: by default soft device placement is enabled when running in eager mode
(for convenience) and disabled in graph mode (for performance).
Args:
enabled: A boolean indicating whether to enable soft placement.
|
github-repos
|
def get_all_dependencies(metadata: MetaData,
extra_dependencies: List[TableDependency] = None,
sort: bool = True) \
-> List[TableDependency]:
extra_dependencies = extra_dependencies or []
for td in extra_dependencies:
td.set_metadata_if_none(metadata)
dependencies = set([td.sqla_tuple() for td in extra_dependencies])
tables = list(metadata.tables.values())
for table in tables:
for fkc in table.foreign_key_constraints:
if fkc.use_alter is True:
continue
dependent_on = fkc.referred_table
if dependent_on is not table:
dependencies.add((dependent_on, table))
if hasattr(table, "_extra_dependencies"):
dependencies.update(
(parent, table) for parent in table._extra_dependencies
)
dependencies = [
TableDependency(parent_table=parent, child_table=child)
for parent, child in dependencies
]
if sort:
dependencies.sort(key=lambda td_: (td_.parent_tablename,
td_.child_tablename))
return dependencies
|
Describes how the tables found in the metadata depend on each other.
(If table B contains a foreign key to table A, for example, then B depends
on A.)
Args:
metadata: the metadata to inspect
extra_dependencies: additional table dependencies to specify manually
sort: sort into alphabetical order of (parent, child) table names?
Returns:
a list of :class:`TableDependency` objects
See :func:`sort_tables_and_constraints` for method.
|
juraj-google-style
|
def __check_no_missing_attributes(self, node: yaml.Node, mapping: CommentedMap) -> None:
logger.debug('Checking presence of required attributes')
for (name, type_, required) in class_subobjects(self.class_):
if (required and (name not in mapping)):
raise RecognitionError('{}{}Missing attribute {} needed for constructing a {}'.format(node.start_mark, os.linesep, name, self.class_.__name__))
if ((name in mapping) and (not self.__type_matches(mapping[name], type_))):
raise RecognitionError('{}{}Attribute {} has incorrect type {}, expecting a {}'.format(node.start_mark, os.linesep, name, type(mapping[name]), type_))
|
Checks that all required attributes are present.
Also checks that they're of the correct type.
Args:
mapping: The mapping with subobjects of this object.
Raises:
RecognitionError: if an attribute is missing or the type \
is incorrect.
|
codesearchnet
|
def get_partstudio_tessellatededges(self, did, wid, eid):
return self._api.request('get', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/tessellatededges')
|
Gets the tessellation of the edges of all parts in a part studio.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- eid (str): Element ID
Returns:
- requests.Response: Onshape response data
|
juraj-google-style
|
def checksum(path):
filesystem = FileSystems.get_filesystem(path)
return filesystem.checksum(path)
|
Fetch checksum metadata of a file on the
:class:`~apache_beam.io.filesystem.FileSystem`.
This operation returns checksum metadata as stored in the underlying
FileSystem. It should not read any file data. Checksum type and format are
FileSystem dependent and are not compatible between FileSystems.
Args:
path: string path of a file.
Returns: string containing checksum
Raises:
``BeamIOError``: if path isn't a file or doesn't exist.
|
github-repos
|
def run(argv=None, save_main_session=True):
known_args, pipeline_args = parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
engine_handler = KeyedModelHandler(TensorRTEngineHandlerNumPy(min_batch_size=1, max_batch_size=1, engine_path=known_args.engine_path))
with beam.Pipeline(options=pipeline_options) as p:
filename_value_pair = p | 'ReadImageNames' >> beam.io.ReadFromText(known_args.input) | 'ReadImageData' >> beam.Map(lambda image_name: read_image(image_file_name=image_name, path_to_dir=known_args.images_dir)) | 'AttachImageSizeToKey' >> beam.Map(attach_im_size_to_key) | 'PreprocessImages' >> beam.MapTuple(lambda file_name, data: (file_name, preprocess_image(data)))
predictions = filename_value_pair | 'TensorRTRunInference' >> RunInference(engine_handler) | 'ProcessOutput' >> beam.ParDo(PostProcessor())
_ = predictions | 'WriteOutputToGCS' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)
|
Args:
argv: Command line arguments defined for this example.
|
github-repos
|
def ToRequest(self):
param = {}
if self.email:
param['email'] = self.email
if self.user_id:
param['localId'] = self.user_id
if self.name:
param['displayName'] = self.name
if self.photo_url:
param['photoUrl'] = self.photo_url
if (self.email_verified is not None):
param['emailVerified'] = self.email_verified
if self.password_hash:
param['passwordHash'] = base64.urlsafe_b64encode(self.password_hash)
if self.salt:
param['salt'] = base64.urlsafe_b64encode(self.salt)
if self.provider_info:
param['providerUserInfo'] = self.provider_info
return param
|
Converts to gitkit api request parameter dict.
Returns:
Dict, containing non-empty user attributes.
|
codesearchnet
|
def validlocations(configuration=None):
if (Locations._validlocations is None):
if (configuration is None):
configuration = Configuration.read()
Locations._validlocations = configuration.call_remoteckan('group_list', {'all_fields': True})
return Locations._validlocations
|
Read valid locations from HDX
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
List[Dict]: A list of valid locations
|
codesearchnet
|
def multiple(layer: int, limit: int) -> Set[str]:
return {str(x).zfill(2) for x in [2**x for x in range(limit)] if x % 2**(layer - 1) == 0}
|
Returns a set of strings to be used as Slots with Pabianas default Clock.
Args:
layer: The layer in the hierarchy this Area is placed in.
Technically, the number specifies how many of the Clocks signals are relevant to the Area.
Between 1 and limit.
limit: The number of layers of the hierarchy.
|
juraj-google-style
|
def _apply_same_padding(inputs, kernel_size, strides, data_format, operation_type, dilation_rate=1):
spatial_shape = inputs.shape[2:]
num_spatial_dims = len(spatial_shape)
padding = []
if operation_type != 'pooling':
dilation_rate = standardize_tuple(dilation_rate, num_spatial_dims, 'dilation_rate')
for i in range(num_spatial_dims):
dil = 1 if operation_type == 'pooling' else dilation_rate[i]
pad = _compute_padding_length(spatial_shape[i], kernel_size[i], strides[i], dil)
padding.append(pad)
if all((left == right for left, right in padding)):
return (inputs, [left for left, _ in padding])
flattened_padding = []
for pad in reversed(padding):
flattened_padding.extend(pad)
mode = 'replicate' if operation_type == 'pooling' else 'constant'
return (tnn.pad(inputs, pad=tuple(flattened_padding), mode=mode), 0)
|
Apply same padding to the input tensor.
This function will evaluate if the padding value is compatible with torch
functions. To avoid calling `pad()` as much as possible, which may cause
performance or memory issues, when compatible, it does not apply the padding
to the tensor, but returns the input tensor and the padding value to pass to
the torch functions. If not compatible, it returns the padded tensor and 0
as the padding value.
Returns:
tensor: A padded tensor or the inputs.
padding: The padding value, ready to pass to the torch functions.
|
github-repos
|
def validate_language_key(obj, key):
backend = bigchaindb.config['database']['backend']
if (backend == 'localmongodb'):
data = obj.get(key, {})
if isinstance(data, dict):
validate_all_values_for_key_in_obj(data, 'language', validate_language)
elif isinstance(data, list):
validate_all_values_for_key_in_list(data, 'language', validate_language)
|
Validate all nested "language" key in `obj`.
Args:
obj (dict): dictionary whose "language" key is to be validated.
Returns:
None: validation successful
Raises:
ValidationError: will raise exception in case language is not valid.
|
codesearchnet
|
def variables_initializer(var_list, name='init'):
if var_list and (not context.executing_eagerly()):
return control_flow_ops.group(*[v.initializer for v in var_list], name=name)
return control_flow_ops.no_op(name=name)
|
Returns an Op that initializes a list of variables.
After you launch the graph in a session, you can run the returned Op to
initialize all the variables in `var_list`. This Op runs all the
initializers of the variables in `var_list` in parallel.
Calling `initialize_variables()` is equivalent to passing the list of
initializers to `Group()`.
If `var_list` is empty, however, the function still returns an Op that can
be run. That Op just has no effect.
@compatibility(TF2)
In TF2, variables are initialized immediately when they are created. There is
no longer a need to run variable initializers before using them.
@end_compatibility
Args:
var_list: List of `Variable` objects to initialize.
name: Optional name for the returned operation.
Returns:
An Op that run the initializers of all the specified variables.
|
github-repos
|
def pyxb_to_dict(rp_pyxb):
return {'allowed': bool(_get_attr_or_list(rp_pyxb, 'allowed')), 'num': _get_as_int(rp_pyxb), 'block': _get_as_set(rp_pyxb, 'block'), 'pref': _get_as_set(rp_pyxb, 'pref')}
|
Convert ReplicationPolicy PyXB object to a normalized dict.
Args:
rp_pyxb: ReplicationPolicy to convert.
Returns:
dict : Replication Policy as normalized dict.
Example::
{
'allowed': True,
'num': 3,
'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'},
'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'},
}
|
codesearchnet
|
def sd(line, cell=None):
parser = google.datalab.utils.commands.CommandParser(prog='%sd', description='Execute various Stackdriver related operations. Use "%sd <stackdriver_product> -h" for help on a specific Stackdriver product.')
_create_monitoring_subparser(parser)
return google.datalab.utils.commands.handle_magic_line(line, cell, parser)
|
Implements the stackdriver cell magic for ipython notebooks.
Args:
line: the contents of the storage line.
Returns:
The results of executing the cell.
|
codesearchnet
|
def greater_equal(x1, x2):
if any_symbolic_tensors((x1, x2)):
return GreaterEqual().symbolic_call(x1, x2)
return backend.numpy.greater_equal(x1, x2)
|
Return the truth value of `x1 >= x2` element-wise.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Output tensor, element-wise comparison of `x1` and `x2`.
|
github-repos
|
def is_abstract(x: Any) -> bool:
return utils.is_partial(x) or is_pure_symbolic(x)
|
Returns if the input value is abstract.
Example::
@pg.symbolize
class Foo:
def __init__(self, x):
pass
class Bar(pg.PureSymbolic):
pass
assert not pg.is_abstract(1)
assert not pg.is_abstract(Foo(1))
assert pg.is_abstract(Foo.partial())
assert pg.is_abstract(Bar())
assert pg.is_abstract(Foo(Bar()))
assert pg.is_abstract(Foo(pg.oneof([1, 2])))
Args:
x: Value to query against.
Returns:
True if value itself is partial/PureSymbolic or its child and nested
child fields contain partial/PureSymbolic values.
|
github-repos
|
def calculate_row_format(columns, keys=None):
row_format = ''
if keys is None:
keys = columns.keys()
else:
keys = [key for key in keys if key in columns]
for key in keys:
if len(row_format) > 0:
row_format += "|"
row_format += "%%(%s)-%ds" % (key, columns[key])
return '|' + row_format + '|'
|
Calculate row format.
Args:
columns (dict): the keys are the column name and the value the max length.
keys (list): optional list of keys to order columns as well as to filter for them.
Returns:
str: format for table row
|
juraj-google-style
|
def search_stack_for_localvar(varname):
curr_frame = inspect.currentframe()
print(' * Searching parent frames for: ' + six.text_type(varname))
frame_no = 0
while curr_frame.f_back is not None:
if varname in curr_frame.f_locals.keys():
print(' * Found in frame: ' + six.text_type(frame_no))
return curr_frame.f_locals[varname]
frame_no += 1
curr_frame = curr_frame.f_back
print('... Found nothing in all ' + six.text_type(frame_no) + ' frames.')
return None
|
Finds a local varable somewhere in the stack and returns the value
Args:
varname (str): variable name
Returns:
None if varname is not found else its value
|
juraj-google-style
|
def get_flag_value(self, wanted_flag_name):
tensor_tracer_flags = self._env.get(FLAGS_ENV_VAR)
if not tensor_tracer_flags:
return (False, None)
pos = 0
while True:
match, has_value = TTParameters.match_next_flag(tensor_tracer_flags, pos)
if not match:
return (False, None)
flag_name = match.group(1)
if has_value:
flag_value = match.group(2)
else:
flag_value = None
if flag_name == wanted_flag_name:
return (True, flag_value)
pos = match.end()
raise RuntimeError('Invalid tensor tracer flag. Could not recognize %s.' % flag_name)
|
Returns the value of a TensorTracer flags.
Args:
wanted_flag_name: the name of the flag we are looking for.
Returns:
A pair where the first element indicates if the flag is
found and the second element is the value of the flag.
Raises:
RuntimeError: If supposedly deadcode is reached.
|
github-repos
|
def underline(self, action):
if action == 'off':
action = '0'
self.send(chr(27)+chr(45)+action)
else:
self.send(chr(27)+chr(45)+action)
|
Enable/cancel underline printing
Args:
action -- Enable or disable underline printing. Options are '1' - '4' and 'cancel'
Returns:
None
Raises:
None
|
juraj-google-style
|
def is_union(declaration):
if (not is_class(declaration)):
return False
decl = class_traits.get_declaration(declaration)
return (decl.class_type == class_declaration.CLASS_TYPES.UNION)
|
Returns True if declaration represents a C++ union
Args:
declaration (declaration_t): the declaration to be checked.
Returns:
bool: True if declaration represents a C++ union
|
codesearchnet
|
def text(self, tag, textdata, step=None):
if step is None:
step = self._step
else:
self._step = step
smd = SummaryMetadata(
plugin_data=SummaryMetadata.PluginData(plugin_name='text'))
if isinstance(textdata, (str, bytes)):
tensor = tf.make_tensor_proto(
values=[textdata.encode(encoding='utf_8')], shape=(1,))
else:
textdata = onp.array(textdata)
datashape = onp.shape(textdata)
if len(datashape) == 1:
tensor = tf.make_tensor_proto(
values=[td.encode(encoding='utf_8') for td in textdata],
shape=(datashape[0],))
elif len(datashape) == 2:
tensor = tf.make_tensor_proto(
values=[
td.encode(encoding='utf_8') for td in onp.reshape(textdata, -1)
],
shape=(datashape[0], datashape[1]))
summary = Summary(
value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
self.add_summary(summary, step)
|
Saves a text summary.
Args:
tag: str: label for this data
textdata: string, or 1D/2D list/numpy array of strings
step: int: training step
Note: markdown formatting is rendered by tensorboard.
|
juraj-google-style
|
def get(cls, sha1=''):
with conf.within_proj_dir():
cmd = 'git show -s --format="%H||%an||%ae||%s||%b||%P" {}'.format(sha1)
result = shell.run(cmd, capture=True, never_pretend=True).stdout
(sha1, name, email, title, desc, parents) = result.split('||')
return CommitDetails(sha1=sha1, author=Author(name, email), title=title, desc=desc, parents_sha1=parents.split())
|
Return details about a given commit.
Args:
sha1 (str):
The sha1 of the commit to query. If not given, it will return
the details for the latest commit.
Returns:
CommitDetails: Commit details. You can use the instance of the
class to query git tree further.
|
codesearchnet
|
def resolve_identifier(self, name, expected_type=None):
name = str(name)
if (name in self._known_identifiers):
obj = self._known_identifiers[name]
if ((expected_type is not None) and (not isinstance(obj, expected_type))):
raise UnresolvedIdentifierError(u'Identifier resolved to an object of an unexpected type', name=name, expected_type=expected_type.__name__, resolved_type=obj.__class__.__name__)
return obj
if (self.parent is not None):
try:
return self.parent.resolve_identifier(name)
except UnresolvedIdentifierError:
pass
raise UnresolvedIdentifierError(u'Could not resolve identifier', name=name, scope=self.name)
|
Resolve an identifier to an object.
There is a single namespace for identifiers so the user also should
pass an expected type that will be checked against what the identifier
actually resolves to so that there are no surprises.
Args:
name (str): The name that we want to resolve
expected_type (type): The type of object that we expect to receive.
This is an optional parameter. If None is passed, no type checking
is performed.
Returns:
object: The resolved object
|
codesearchnet
|
def export_verified_variants(aggregate_variants, unique_callers):
document_lines = []
for variant in aggregate_variants:
samples = []
for sample in variant['samples']:
line = []
line.append(variant['institute'])
line.append(variant['_id'])
line.append(variant['category'])
line.append(variant['variant_type'])
line.append(variant['display_name'][:30])
case_name = variant['case_obj']['display_name']
local_link = '/'.join(['', variant['institute'], case_name, variant['_id']])
line.append(local_link)
line.append(variant.get('validation'))
line.append(case_name)
case_individual = next((ind for ind in variant['case_obj']['individuals'] if (ind['individual_id'] == sample['sample_id'])))
if (case_individual['phenotype'] == 2):
line.append(' '.join([sample.get('display_name'), '(A)']))
else:
line.append(sample.get('display_name'))
line.append(''.join(['chr', variant['chromosome'], ':', str(variant['position'])]))
line.append('>'.join([variant.get('reference')[:10], variant.get('alternative')[:10]]))
genes = []
prot_effect = []
funct_anno = []
for gene in variant.get('genes'):
genes.append(gene.get('hgnc_symbol', ''))
funct_anno.append(gene.get('functional_annotation'))
for transcript in gene.get('transcripts'):
if (transcript.get('is_canonical') and transcript.get('protein_sequence_name')):
prot_effect.append(urllib.parse.unquote(transcript.get('protein_sequence_name')))
line.append(','.join(prot_effect))
line.append(','.join(funct_anno))
line.append(','.join(genes))
line.append(variant.get('rank_score'))
line.append(variant.get('cadd_score'))
line.append(sample.get('genotype_call'))
line.append(sample['allele_depths'][0])
line.append(sample['allele_depths'][1])
line.append(sample['genotype_quality'])
for caller in unique_callers:
if variant.get(caller):
line.append(variant.get(caller))
else:
line.append('-')
document_lines.append(line)
return document_lines
|
Create the lines for an excel file with verified variants for
an institute
Args:
aggregate_variants(list): a list of variants with aggregates case data
unique_callers(set): a unique list of available callers
Returns:
document_lines(list): list of lines to include in the document
|
codesearchnet
|
def Create(self, request, global_params=None):
config = self.GetMethodConfig('Create')
return self._RunMethod(config, request, global_params=global_params)
|
Creates a new `BitbucketServerConfig`. This API is experimental.
Args:
request: (CloudbuildProjectsLocationsBitbucketServerConfigsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
|
github-repos
|
def __init__(self, binary_id, cause=None):
super(UnknownSignedBinaryError, self).__init__(binary_id, cause=cause)
self.binary_id = binary_id
self.message = ("Signed binary of type %s and path %s was not found" %
(self.binary_id.binary_type, self.binary_id.path))
|
Initializes UnknownSignedBinaryError.
Args:
binary_id: rdf_objects.SignedBinaryID for the signed binary.
cause: A lower-level Exception raised by the database driver, which might
have more details about the error.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.