code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def __init__(self, file_path, cause):
message = six.text_type("Malformed config at {}: {}").format(
file_path,
cause
)
super(MalformedConfig, self).__init__(message)
|
Exception to be raised if pased file is invalid.
Args:
file_path (string): path to bad config
cause (string): reason of failure, i.e. what exactly was the
problem while parsing
|
juraj-google-style
|
def add_peer_parser(subparsers, parent_parser):
parser = subparsers.add_parser('peer', help='Displays information about validator peers', description="Provides a subcommand to list a validator's peers")
grand_parsers = parser.add_subparsers(title='subcommands', dest='subcommand')
grand_parsers.required = True
add_peer_list_parser(grand_parsers, parent_parser)
|
Adds argument parser for the peer command
Args:
subparsers: Add parsers to this subparser object
parent_parser: The parent argparse.ArgumentParser object
|
codesearchnet
|
def send(self, message):
if ('call_id' not in message):
message['call_id'] = self.gen_call_id()
self._ws.send(message.to_json())
|
Sends a RTMMessage
Should be called after starting the loop
Args:
message(RTMMessage): the sending message
Raises:
WebSocketConnectionClosedException: if the loop is closed
|
codesearchnet
|
def graph_execution_traces(self, digest=False, begin=None, end=None):
digests = self._graph_execution_trace_digests
if begin is not None or end is not None:
begin = begin or 0
end = end or len(digests)
digests = digests[begin:end]
if digest:
return digests
else:
return [self.read_graph_execution_trace(digest) for digest in digests]
|
Get all the intra-graph execution tensor traces read so far.
Args:
digest: Whether the results will be returned in the more light-weight
digest form.
begin: Optional beginning index for the requested traces or their digests.
Python-style negative indices are supported.
end: Optional ending index for the requested traces or their digests.
Python-style negative indices are supported.
Returns:
If `digest`: a `list` of `GraphExecutionTraceDigest` objects.
Else: a `list` of `GraphExecutionTrace` objects.
|
github-repos
|
def extract_derivative_feature(feature):
first_derivative_feature = processing.derivative_extraction(feature, DeltaWindows=2)
second_derivative_feature = processing.derivative_extraction(first_derivative_feature, DeltaWindows=2)
feature_cube = np.concatenate((feature[(:, :, None)], first_derivative_feature[(:, :, None)], second_derivative_feature[(:, :, None)]), axis=2)
return feature_cube
|
This function extracts temporal derivative features which are
first and second derivatives.
Args:
feature (array): The feature vector which its size is: N x M
Return:
array: The feature cube vector which contains the static, first and second derivative features of size: N x M x 3
|
codesearchnet
|
def set_timezone(tz=None, deploy=False):
if not tz:
raise CommandExecutionError("Timezone name option must not be none.")
ret = {}
query = {'type': 'config',
'action': 'set',
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/timezone',
'element': '<timezone>{0}</timezone>'.format(tz)}
ret.update(__proxy__['panos.call'](query))
if deploy is True:
ret.update(commit())
return ret
|
Set the timezone of the Palo Alto proxy minion. A commit will be required before this is processed.
CLI Example:
Args:
tz (str): The name of the timezone to set.
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_timezone UTC
salt '*' panos.set_timezone UTC deploy=True
|
juraj-google-style
|
def __definitions_descriptor(self):
result = {}
for (def_key, def_value) in self.__parser.schemas().iteritems():
if (('properties' in def_value) or ('type' in def_value)):
key_result = {}
required_keys = set()
if ('type' in def_value):
key_result['type'] = def_value['type']
if ('properties' in def_value):
for (prop_key, prop_value) in def_value['properties'].items():
if (isinstance(prop_value, dict) and ('required' in prop_value)):
required_keys.add(prop_key)
del prop_value['required']
key_result['properties'] = def_value['properties']
if required_keys:
key_result['required'] = sorted(required_keys)
result[def_key] = key_result
for def_value in result.itervalues():
for prop_value in def_value.itervalues():
if isinstance(prop_value, dict):
if ('$ref' in prop_value):
prop_value['type'] = 'object'
self._add_def_paths(prop_value)
return result
|
Describes the definitions section of the OpenAPI spec.
Returns:
Dictionary describing the definitions of the spec.
|
codesearchnet
|
def xpath(self, exact=None):
exact = (exact if (exact is not None) else self.exact)
if isinstance(self.expression, AbstractExpression):
expression = self._apply_expression_filters(self.expression)
return to_xpath(expression, exact=exact)
else:
return str_(self.expression)
|
Returns the XPath query for this selector.
Args:
exact (bool, optional): Whether to exactly match text.
Returns:
str: The XPath query for this selector.
|
codesearchnet
|
def first_function(function: _evaluation.FirstFunction, operand_result: Optional[_sql_data_types.Select], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select:
del params_result
if operand_result is None:
raise ValueError('first() cannot be called without an operand.')
result = copy.copy(operand_result)
if _fhir_path_data_types.is_collection(function.parent_node.return_type):
return _sql_data_types.Select(select_part=result.select_part, from_part=f'(SELECT FIRST({result.sql_alias}) AS {result.sql_alias} FROM {result.to_subquery()})', sql_dialect=_sql_data_types.SqlDialect.SPARK)
else:
new_alias = result.sql_alias
return _sql_data_types.Select(select_part=_sql_data_types.Identifier((new_alias,), _sql_data_type=result.sql_data_type, _sql_alias=new_alias), from_part=f'(SELECT FIRST({new_alias}) AS {new_alias} FROM {result.to_subquery()})', sql_dialect=_sql_data_types.SqlDialect.SPARK)
|
Generates Spark SQL representing the FHIRPath first() function.
Returns a collection with the first value of the operand collection.
The returned SQL expression is a table with cardinality 0 or 1.
Args:
function: The FHIRPath AST `FirstFunction` node
operand_result: The expression which is being evaluated
params_result: The parameter passed in to function
Returns:
A compiled Spark SQL expression.
Raises:
ValueError: When the function is called without an operand
|
github-repos
|
def Match(self, registry_key):
value_names = frozenset([
registry_value.name for registry_value in registry_key.GetValues()])
return self._value_names.issubset(value_names)
|
Determines if a Windows Registry key matches the filter.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
bool: True if the keys match.
|
juraj-google-style
|
def __init__(self, outer_index, inner_index):
if outer_index.batch_dims != inner_index.batch_dims:
raise ValueError('outer_index.batch_dims and inner_index.batch_dims must be the same.')
super(ProductIndexMap, self).__init__(indices=inner_index.indices + outer_index.indices * tf.cast(inner_index.num_segments, inner_index.indices.dtype), num_segments=inner_index.num_segments * outer_index.num_segments, batch_dims=inner_index.batch_dims)
self.outer_index = outer_index
self.inner_index = inner_index
|
Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the
intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows
and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation
combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has `num_segments` equal to
`outer_index.num_segements` * `inner_index.num_segments`.
Args:
outer_index: IndexMap.
inner_index: IndexMap, must have the same shape as `outer_index`.
|
github-repos
|
def probe_async(self, callback):
def _on_finished(_name, control_info, exception):
if (exception is not None):
callback(self.id, False, str(exception))
return
self._control_info = control_info
try:
info = {'connection_string': 'direct', 'uuid': control_info.uuid, 'signal_strength': 100}
self._trigger_callback('on_scan', self.id, info, self.ExpirationTime)
finally:
callback(self.id, True, None)
self._control_thread.command(JLinkControlThread.FIND_CONTROL, _on_finished, self._device_info.ram_start, self._device_info.ram_size)
|
Send advertisements for all connected devices.
Args:
callback (callable): A callback for when the probe operation has completed.
callback should have signature callback(adapter_id, success, failure_reason) where:
success: bool
failure_reason: None if success is True, otherwise a reason for why we could not probe
|
codesearchnet
|
def get_best_dataset_key(key, choices):
if ((key.wavelength is not None) and choices):
nearest_wl = min([_wl_dist(key.wavelength, x.wavelength) for x in choices if (x.wavelength is not None)])
choices = [c for c in choices if (_wl_dist(key.wavelength, c.wavelength) == nearest_wl)]
if ((key.modifiers is None) and choices):
num_modifiers = min((len((x.modifiers or tuple())) for x in choices))
choices = [c for c in choices if (len((c.modifiers or tuple())) == num_modifiers)]
if ((key.calibration is None) and choices):
best_cal = [x.calibration for x in choices if x.calibration]
if best_cal:
best_cal = min(best_cal, key=(lambda x: CALIBRATION_ORDER[x]))
choices = [c for c in choices if (c.calibration == best_cal)]
if ((key.resolution is None) and choices):
low_res = [x.resolution for x in choices if x.resolution]
if low_res:
low_res = min(low_res)
choices = [c for c in choices if (c.resolution == low_res)]
if ((key.level is None) and choices):
low_level = [x.level for x in choices if x.level]
if low_level:
low_level = max(low_level)
choices = [c for c in choices if (c.level == low_level)]
return choices
|
Choose the "best" `DatasetID` from `choices` based on `key`.
The best key is chosen based on the follow criteria:
1. Central wavelength is nearest to the `key` wavelength if
specified.
2. Least modified dataset if `modifiers` is `None` in `key`.
Otherwise, the modifiers are ignored.
3. Highest calibration if `calibration` is `None` in `key`.
Calibration priority is chosen by `satpy.CALIBRATION_ORDER`.
4. Best resolution (smallest number) if `resolution` is `None`
in `key`. Otherwise, the resolution is ignored.
This function assumes `choices` has already been filtered to only
include datasets that match the provided `key`.
Args:
key (DatasetID): Query parameters to sort `choices` by.
choices (iterable): `DatasetID` objects to sort through to determine
the best dataset.
Returns: List of best `DatasetID`s from `choices`. If there is more
than one element this function could not choose between the
available datasets.
|
codesearchnet
|
def _StopMonitoringProcess(self, process):
if (process is None):
raise ValueError('Missing process.')
pid = process.pid
self._RaiseIfNotMonitored(pid)
del self._process_information_per_pid[pid]
rpc_client = self._rpc_clients_per_pid.get(pid, None)
if rpc_client:
rpc_client.Close()
del self._rpc_clients_per_pid[pid]
if (pid in self._rpc_errors_per_pid):
del self._rpc_errors_per_pid[pid]
logger.debug('Stopped monitoring process: {0:s} (PID: {1:d})'.format(process.name, pid))
|
Stops monitoring a process.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is not monitored.
ValueError: if the process is missing.
|
codesearchnet
|
def load(self, key_filter=None, header_preproc=None):
df = pd.read_csv(self.input_file,
sep='\t',
dtype=object)
if key_filter is not None:
df = df[df[df.columns[0]].str.match(key_filter)]
meta_col = df.columns[0]
df[meta_col] = df[meta_col].str.split(',').str[-1]
for col_name in df.columns[1:]:
stripped = df[col_name].str.replace(r'[a-z]', '')
df[col_name] = pd.to_numeric(stripped, errors='coerce')
if header_preproc is not None:
df.columns = list(df.columns[:1]) + [header_preproc(c) for c in df.columns[1:]]
df.columns = ['key'] + [int(y) for y in df.columns[1:]]
return df
|
Load data table from tsv file, from default location
Args:
key_filter (str): additional filter for key column - regex matching
key values to include; None for no filter
header_preproc (func): function to apply to column headers to extract year numbers (as strings)
Returns:
pd.DataFrame: data
|
juraj-google-style
|
def _format_field_name(self, field_name) -> str:
field = self._get_model_field(field_name)
return self.qn(field.column)
|
Formats a field's name for usage in SQL.
Arguments:
field_name:
The field name to format.
Returns:
The specified field name formatted for
usage in SQL.
|
juraj-google-style
|
def draw_rects(self, *rects):
rect_array = ffi.new('SDL_Rect[]', len(rects))
for i, r in enumerate(rects):
rect_array[i] = r._ptr[0]
check_int_err(lib.SDL_RenderDrawRects(self._ptr, rect_array, len(rects)))
|
Draw some number of rectangles on the current rendering target.
Args:
*rects (Rect): The destination rectangles.
Raises:
SDLError: If an error is encountered.
|
juraj-google-style
|
def get_group(self, uuid=None):
if (uuid is None):
uuid = self.uuid
group_data = self.get('group', params={'uuid': uuid})
return group_data
|
Get group data based on uuid.
Args:
uuid (str): optional uuid. defaults to self.cuuid
Raises:
PyLmodUnexpectedData: No data was returned.
requests.RequestException: Exception connection error
Returns:
dict: group json
|
codesearchnet
|
def retrieve_products(self, reviewer):
if not isinstance(reviewer, self._reviewer_cls):
raise TypeError(
"Type of given reviewer isn't acceptable:", reviewer,
", expected:", self._reviewer_cls)
return list(self.graph.successors(reviewer))
|
Retrieve products reviewed by a given reviewer.
Args:
reviewer: A reviewer.
Returns:
A list of products which the reviewer reviews.
Raises:
TypeError: when given reviewer isn't instance of specified reviewer
class when this graph is constructed.
|
juraj-google-style
|
def use_pcm(self, pcm_params=None, solvent_key='solvent', solvent_params=None, radii_force_field=None):
self.params['pcm'] = dict()
self.params[solvent_key] = dict()
default_pcm_params = {'Theory': 'SSVPE', 'vdwScale': 1.1, 'Radii': 'UFF'}
if (not solvent_params):
solvent_params = {'Dielectric': 78.3553}
if pcm_params:
for (k, v) in pcm_params.items():
self.params['pcm'][k.lower()] = (v.lower() if isinstance(v, str) else v)
for (k, v) in default_pcm_params.items():
if (k.lower() not in self.params['pcm'].keys()):
self.params['pcm'][k.lower()] = (v.lower() if isinstance(v, str) else v)
for (k, v) in solvent_params.items():
self.params[solvent_key][k.lower()] = (v.lower() if isinstance(v, str) else copy.deepcopy(v))
self.params['rem']['solvent_method'] = 'pcm'
if radii_force_field:
self.params['pcm']['radii'] = 'bondi'
self.params['rem']['force_fied'] = radii_force_field.lower()
|
Set the solvent model to PCM. Default parameters are trying to comply to
gaussian default value
Args:
pcm_params (dict): The parameters of "$pcm" section.
solvent_key (str): for versions < 4.2 the section name is "pcm_solvent"
solvent_params (dict): The parameters of solvent_key section
radii_force_field (str): The force fied used to set the solute
radii. Default to UFF.
|
codesearchnet
|
def trigger(self, when=1):
tw = Window(self.stream, self._config['type'])
tw._config['evictPolicy'] = self._config['evictPolicy']
tw._config['evictConfig'] = self._config['evictConfig']
if (self._config['evictPolicy'] == 'TIME'):
tw._config['evictTimeUnit'] = 'MILLISECONDS'
if isinstance(when, datetime.timedelta):
tw._config['triggerPolicy'] = 'TIME'
tw._config['triggerConfig'] = int((when.total_seconds() * 1000.0))
tw._config['triggerTimeUnit'] = 'MILLISECONDS'
elif isinstance(when, int):
tw._config['triggerPolicy'] = 'COUNT'
tw._config['triggerConfig'] = when
else:
raise ValueError(when)
return tw
|
Declare a window with this window's size and a trigger policy.
When the window is triggered is defined by `when`.
If `when` is an `int` then the window is triggered every
`when` tuples. For example, with ``when=5`` the window
will be triggered every five tuples.
If `when` is an `datetime.timedelta` then it is the period
of the trigger. With a `timedelta` representing one minute
then the window is triggered every minute.
By default, when `trigger` has not been called on a `Window`
it triggers for every tuple inserted into the window
(equivalent to ``when=1``).
Args:
when: The size of the window, either an `int` to define the
number of tuples or `datetime.timedelta` to define the
duration of the window.
Returns:
Window: Window that will be triggered.
.. warning:: A trigger is only supported for a sliding window
such as one created by :py:meth:`last`.
|
codesearchnet
|
def num_batches(self):
raise NotImplementedError
|
Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg
for Numpy data, the size is same as (number_of_element / batch_size).
Whereas for dataset or python generator, the size is unknown since it
may or may not have an end state.
Returns:
int, the number of batches for the dataset, or None if it is
unknown. The caller could use this to control the loop of training,
show progress bar, or handle unexpected StopIteration error.
|
github-repos
|
def bounding_box_from(points, i, i1, thr):
pi = points[i]
pi1 = points[i1]
min_lat = min(pi.lat, pi1.lat)
min_lon = min(pi.lon, pi1.lon)
max_lat = max(pi.lat, pi1.lat)
max_lon = max(pi.lon, pi1.lon)
return ((min_lat - thr), (min_lon - thr), (max_lat + thr), (max_lon + thr))
|
Creates bounding box for a line segment
Args:
points (:obj:`list` of :obj:`Point`)
i (int): Line segment start, index in points array
i1 (int): Line segment end, index in points array
Returns:
(float, float, float, float): with bounding box min x, min y, max x and max y
|
codesearchnet
|
def _serve_audio_metadata(self, request):
tag = request.args.get('tag')
run = request.args.get('run')
sample = int(request.args.get('sample', 0))
events = self._multiplexer.Tensors(run, tag)
response = self._audio_response_for_run(events, run, tag, sample)
return http_util.Respond(request, response, 'application/json')
|
Given a tag and list of runs, serve a list of metadata for audio.
Note that the actual audio data are not sent; instead, we respond
with URLs to the audio. The frontend should treat these URLs as
opaque and should not try to parse information about them or
generate them itself, as the format may change.
Args:
request: A werkzeug.wrappers.Request object.
Returns:
A werkzeug.Response application.
|
juraj-google-style
|
def add_log_file(path):
logfile_handler = RotatingFileHandler(
path, maxBytes=50000, backupCount=2)
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)s %(module)s - %(message)s',
datefmt="%d-%b-%Y %H:%M:%S")
logfile_handler.setFormatter(formatter)
geoparse_logger.addHandler(logfile_handler)
|
Add log file.
Args:
path (:obj:`str`): Path to the log file.
|
juraj-google-style
|
def __init__(self,
unique_identifier=None,
data=None):
super(DecryptResponsePayload, self).__init__(
enums.Tags.RESPONSE_PAYLOAD
)
self._unique_identifier = None
self._data = None
self.unique_identifier = unique_identifier
self.data = data
|
Construct a Decrypt response payload struct.
Args:
unique_identifier (string): The ID of the managed object (e.g.,
a symmetric key) used for decryption. Required for encoding
and decoding.
data (bytes): The decrypted data in binary form. Required for
encoding and decoding.
|
juraj-google-style
|
def __init__(self, function_name, unique_function_id, node_name_prefix, attr_name, level=1, children_inputs_mappings=None):
self._function_name = function_name
self._unique_function_id = unique_function_id
self._next_global_index = 0
self._used_global_indices = set()
self._tag_to_global_index = {}
self._tag_to_next_sort_index = {}
self._node_name_prefix = node_name_prefix
self._attr_name = attr_name
self._level = level
self._children_inputs_mappings = children_inputs_mappings
|
Initialize ophint argument.
Args:
function_name: Name of the function that this tracks arguments for.
unique_function_id: UUID of function that this tracks arguments for.
node_name_prefix: How identities that are created are named.
attr_name: Name of attribute to use to store the index for this hint.
i.e. FUNCTION_INPUT_INDEX or FUNCTION_OUTPUT_INDEX
level: Hierarchical level of the Ophint node, a number.
children_inputs_mappings: Inputs/Outputs mapping for children hints.
|
github-repos
|
def getaccountaddress(self, user_id=""):
address = self.rpc.call("getaccountaddress", user_id)
self.logger.debug("Your", self.coin, "address is", address)
return address
|
Get the coin address associated with a user id.
If the specified user id does not yet have an address for this
coin, then generate one.
Args:
user_id (str): this user's unique identifier
Returns:
str: Base58Check address for this account
|
juraj-google-style
|
def _LinearFoldByteStream(self, mapped_value, **unused_kwargs):
try:
attribute_values = [
getattr(mapped_value, attribute_name, None)
for attribute_name in self._attribute_names]
attribute_values = [
value for value in attribute_values if value is not None]
return self._operation.WriteTo(tuple(attribute_values))
except Exception as exception:
error_string = (
'Unable to write: {0:s} to byte stream with error: {1!s}').format(
self._data_type_definition.name, exception)
raise errors.FoldingError(error_string)
|
Folds the data type into a byte stream.
Args:
mapped_value (object): mapped value.
Returns:
bytes: byte stream.
Raises:
FoldingError: if the data type definition cannot be folded into
the byte stream.
|
juraj-google-style
|
def set_name(self, name):
if (not self._campfire.get_user().admin):
return False
result = self._connection.put(('room/%s' % self.id), {'room': {'name': name}})
if result['success']:
self._load()
return result['success']
|
Set the room name.
Args:
name (str): Name
Returns:
bool. Success
|
codesearchnet
|
def undo_windowing(hidden_states: torch.Tensor, shape: List[int], mask_unit_shape: List[int]) -> torch.Tensor:
batch_size, hidden_size = (hidden_states.shape[0], hidden_states.shape[-1])
num_mask_units = [s
hidden_states = hidden_states.view(batch_size, *num_mask_units, *mask_unit_shape, hidden_size)
hidden_states = hidden_states.permute(0, 1, 3, 2, 4, 5)
hidden_states = hidden_states.reshape(batch_size, *shape, hidden_size)
return hidden_states
|
Restore spatial organization by undoing windowed organization of mask units.
Args:
hidden_states (`torch.Tensor`): The hidden states tensor of shape `[batch_size, num_mask_unit_height*num_mask_unit_width, hidden_size]`.
shape (`List[int]`): The original shape of the hidden states tensor before windowing.
mask_unit_shape (`List[int]`): The shape of the mask units used for windowing.
Returns:
torch.Tensor: The restored hidden states tensor of shape [batch_size, num_mask_unit_height*mask_unit_height, num_mask_unit_width*mask_unit_width, hidden_size].
|
github-repos
|
def _handle_request(self, request):
if request is None:
return Response(success=False, uid=request.uid)
action_map = {
'start_dag': self._handle_start_dag,
'stop_workflow': self._handle_stop_workflow,
'join_dags': self._handle_join_dags,
'stop_dag': self._handle_stop_dag,
'is_dag_stopped': self._handle_is_dag_stopped
}
if request.action in action_map:
return action_map[request.action](request)
else:
raise RequestActionUnknown()
|
Handle an incoming request by forwarding it to the appropriate method.
Args:
request (Request): Reference to a request object containing the
incoming request.
Raises:
RequestActionUnknown: If the action specified in the request is not known.
Returns:
Response: A response object containing the response from the method handling
the request.
|
juraj-google-style
|
class RandomUniform(Initializer):
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=None, **kwargs):
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _get_dtype(dtype)
if not dtype.is_floating and (not dtype.is_integer):
raise ValueError('Expected float or integer dtype, got %s.' % dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.random_uniform(shape, self.minval, self.maxval, dtype)
def get_config(self):
return {'minval': self.minval, 'maxval': self.maxval, 'seed': self.seed}
|
Initializer that generates tensors with a uniform distribution.
Also available via the shortcut function
`tf.keras.initializers.random_uniform`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range of
random values to generate (inclusive).
maxval: A python scalar or a scalar tensor. Upper bound of the range of
random values to generate (exclusive).
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
|
github-repos
|
def _write_init_fetchers(self, filenames):
destination = "%s%s" % (self.output_directory, self.fetchers_path)
self.write(destination=destination, filename="__init__.py", template_name="__init_fetcher__.py.tpl",
filenames=self._prepare_filenames(filenames, suffix='Fetcher'),
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
header=self.header_content)
|
Write fetcher init file
Args:
filenames (dict): dict of filename and classes
|
juraj-google-style
|
def _StopFolderSelectionMethod(self, stop_folder):
if (not self.show_stop_hierarchy):
return (lambda stop: (stop_folder, None))
station_folder = self._CreateFolder(stop_folder, 'Stations')
platform_folder = self._CreateFolder(stop_folder, 'Platforms')
platform_connections = self._CreateFolder(platform_folder, 'Connections')
entrance_folder = self._CreateFolder(stop_folder, 'Entrances')
entrance_connections = self._CreateFolder(entrance_folder, 'Connections')
standalone_folder = self._CreateFolder(stop_folder, 'Stand-Alone')
def FolderSelectionMethod(stop):
if (stop.location_type == transitfeed.Stop.LOCATION_TYPE_STATION):
return (station_folder, None)
elif (stop.location_type == googletransit.Stop.LOCATION_TYPE_ENTRANCE):
return (entrance_folder, entrance_connections)
elif stop.parent_station:
return (platform_folder, platform_connections)
return (standalone_folder, None)
return FolderSelectionMethod
|
Create a method to determine which KML folder a stop should go in.
Args:
stop_folder: the parent folder element for all stops.
Returns:
A function that should accept a Stop argument and return a tuple of
(stop KML folder, pathways KML folder).
Given a Stop, we need to determine which folder the stop should go in. In
the most basic case, that's the root Stops folder. However, if
show_stop_hierarchy is enabled, we put a stop in a separate sub-folder
depending on if the stop is a station, a platform, an entrance, or just a
plain-old stand-alone stop. This method returns a function that is used
to pick which folder a stop stop should go in. It also optionally returns
a folder where any line-string connections associated with a stop (eg. to
show the pathway between an entrance and a station) should be added.
|
codesearchnet
|
def __init__(self, hash_start=[], hash_stop=UInt256()):
self.HashStart = hash_start
self.HashStop = hash_stop
|
Create an instance.
Args:
hash_start (list): a list of hash values. Each value is of the bytearray type. Note: should actually be UInt256 objects.
hash_stop (UInt256):
|
juraj-google-style
|
def append_dims_and_file_extension(fname, data_df):
if not fname.endswith(".gct"):
out_fname = '{0}_n{1}x{2}.gct'.format(fname, data_df.shape[1], data_df.shape[0])
return out_fname
else:
basename = os.path.splitext(fname)[0]
out_fname = '{0}_n{1}x{2}.gct'.format(basename, data_df.shape[1], data_df.shape[0])
return out_fname
|
Append dimensions and file extension to output filename.
N.B. Dimensions are cols x rows.
Args:
fname (string): output filename
data_df (pandas df)
Returns:
out_fname (string): output filename with matrix dims and .gct appended
|
juraj-google-style
|
def walk_dependencies(root, visitor):
def visit(parent, visitor):
for d in get_dependencies(parent):
visitor(d, parent)
visit(d, visitor)
visitor(root, None)
visit(root, visitor)
|
Call visitor on root and all dependencies reachable from it in breadth
first order.
Args:
root (component): component function or class
visitor (function): signature is `func(component, parent)`. The
call on root is `visitor(root, None)`.
|
juraj-google-style
|
def GetBaseFiles(self, diff):
files = {}
for line in diff.splitlines(True):
if (line.startswith('Index:') or line.startswith('Property changes on:')):
(unused, filename) = line.split(':', 1)
filename = to_slash(filename.strip())
files[filename] = self.GetBaseFile(filename)
return files
|
Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
|
codesearchnet
|
def CreateTaskStart(self):
task_start = TaskStart()
task_start.identifier = self.identifier
task_start.session_identifier = self.session_identifier
task_start.timestamp = self.start_time
return task_start
|
Creates a task start.
Returns:
TaskStart: task start attribute container.
|
codesearchnet
|
def VerifyServerPEM(self, http_object):
try:
server_pem = http_object.data
server_url = http_object.url
if b"BEGIN CERTIFICATE" in server_pem:
server_certificate = rdf_crypto.RDFX509Cert(server_pem)
self.communicator.LoadServerCertificate(
server_certificate=server_certificate, ca_certificate=self.ca_cert)
logging.info("Server PEM re-keyed.")
return True
except Exception as e:
logging.info("Unable to verify server certificate at %s: %s", server_url,
e)
return False
|
Check the server PEM for validity.
This is used to determine connectivity to the server. Sometimes captive
portals return a valid HTTP status, but the data is corrupted.
Args:
http_object: The response received from the server.
Returns:
True if the response contains a valid server certificate.
|
juraj-google-style
|
def _make_request(self, url, method='get', data=None, extra_headers=None):
attempts = 0
while (attempts < 1):
if (not self._is_authenticated):
self._authenticate()
try:
return self._send_request(url, method, data, extra_headers)
except HTTPError as e:
if (e.response.status_code == 403):
logger.info('Authenticated session against NetMRI timed out. Retrying.')
self._is_authenticated = False
attempts += 1
else:
raise
|
Prepares the request, checks for authentication and retries in case of issues
Args:
url (str): URL of the request
method (str): Any of "get", "post", "delete"
data (any): Possible extra data to send with the request
extra_headers (dict): Possible extra headers to send along in the request
Returns:
dict
|
codesearchnet
|
def Copy(self, name=None):
new = copy.copy(self)
new.d = copy.copy(self.d)
new.name = name if name is not None else self.name
return new
|
Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
Args:
name: string name for the new Hist
|
juraj-google-style
|
def draw_lines_svg_layer(df_endpoints, layer_name, layer_number=1):
dwg = svgwrite.Drawing('should_not_exist.svg', profile='tiny', debug=False)
dwg.attribs['width'] = df_endpoints[['x_source', 'x_target']].values.max()
dwg.attribs['height'] = df_endpoints[['y_source', 'y_target']].values.max()
nsmap = INKSCAPE_NSMAP
dwg.attribs['xmlns:inkscape'] = nsmap['inkscape']
coord_columns = ['x_source', 'y_source', 'x_target', 'y_target']
line_layer = dwg.g(id=('layer%d' % layer_number), **{'inkscape:label': layer_name, 'inkscape:groupmode': 'layer'})
for (i, (x1, y1, x2, y2)) in df_endpoints[coord_columns].iterrows():
line_i = dwg.line((x1, y1), (x2, y2), id=('line%d' % i), style='stroke:
line_layer.add(line_i)
dwg.add(line_layer)
output = StringIO.StringIO()
dwg.write(output)
output.seek(0)
return output
|
Draw lines defined by endpoint coordinates as a layer in a SVG file.
Args:
df_endpoints (pandas.DataFrame) : Each row corresponds to the endpoints
of a single line, encoded through the columns: ``x_source``,
``y_source``, ``x_target``, and ``y_target``.
layer_name (str) : Name of Inkscape layer.
layer_number (int, optional) : Z-order index of Inkscape layer.
Returns
-------
StringIO.StringIO
A file-like object containing SVG XML source.
The XML contains a layer named ``"Connections"``, which in turn
contains one line per row in the input :data:`df_endpoints` table.
|
codesearchnet
|
def get_formatted_string(self, input_string):
if isinstance(input_string, str):
try:
return self.get_processed_string(input_string)
except KeyNotInContextError as err:
raise KeyNotInContextError(f"Unable to format '{input_string}' because {err}") from err
elif isinstance(input_string, SpecialTagDirective):
return input_string.get_value(self)
else:
raise TypeError(f'can only format on strings. {input_string} is a {type(input_string)} instead.')
|
Return formatted value for input_string.
get_formatted gets a context[key] value.
get_formatted_string is for any arbitrary string that is not in the
context.
Only valid if input_string is a type string.
Return a string interpolated from the context dictionary.
If input_string='Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
Then this will return string: "Piping down the valleys wild"
Args:
input_string: string to parse for substitutions.
Returns:
Formatted string.
Raises:
KeyNotInContextError: context[key] has {somekey} where somekey does
not exist in context dictionary.
TypeError: Attempt operation on a non-string type.
|
codesearchnet
|
def runCmd(cls, cmd):
cit.echo(cmd, 'command')
result = os.system(cmd)
cls.checkResult(result)
|
run command and show if success or failed
Args:
cmd: string
Returns:
bool: if this command run successfully
|
codesearchnet
|
async def run_tasks(context):
running_tasks = RunTasks()
context.running_tasks = running_tasks
status = (await running_tasks.invoke(context))
context.running_tasks = None
return status
|
Run any tasks returned by claimWork.
Returns the integer status of the task that was run, or None if no task was
run.
args:
context (scriptworker.context.Context): the scriptworker context.
Raises:
Exception: on unexpected exception.
Returns:
int: exit status
None: if no task run.
|
codesearchnet
|
def security_label(self, name, description=None, color=None):
label = SecurityLabel(name, description, color)
for label_data in self._labels:
if (label_data.name == name):
label = label_data
break
else:
self._labels.append(label)
return label
|
Return instance of SecurityLabel.
.. note:: The provided security label will be create if it doesn't exist. If the security
label already exists nothing will be changed.
Args:
name (str): The value for this security label.
description (str): A description for this security label.
color (str): A color (hex value) for this security label.
Returns:
obj: An instance of SecurityLabel.
|
codesearchnet
|
def scripthash_to_address(scripthash):
sb = bytearray([ADDRESS_VERSION]) + scripthash
c256 = bin_dbl_sha256(sb)[0:4]
outb = sb + bytearray(c256)
return base58.b58encode(bytes(outb)).decode("utf-8")
|
Convert a script hash to a public address.
Args:
scripthash (bytes):
Returns:
str: base58 encoded string representing the wallet address.
|
juraj-google-style
|
def Update(self, attribute=None):
client_id = self.urn.Split()[0]
if attribute == "CONTAINS":
flow_id = flow.StartAFF4Flow(
client_id=client_id,
flow_name="ListDirectory",
pathspec=self.real_pathspec,
notify_to_user=False,
token=self.token)
return flow_id
|
Refresh an old attribute.
Note that refreshing the attribute is asynchronous. It does not change
anything about the current object - you need to reopen the same URN some
time later to get fresh data.
Attributes: CONTAINS - Refresh the content of the directory listing.
Args:
attribute: An attribute object as listed above.
Returns:
The Flow ID that is pending
Raises:
IOError: If there has been an error starting the flow.
|
juraj-google-style
|
def CanonicalPathToLocalPath(path):
r
path = path.replace("/\\", "\\")
path = path.replace("/", "\\")
m = re.match(r"\\([a-zA-Z]):(.*)$", path)
if m:
path = "%s:\\%s" % (m.group(1), m.group(2).lstrip("\\"))
return path
|
r"""Converts the canonical paths as used by GRR to OS specific paths.
Due to the inconsistencies between handling paths in windows we need to
convert a path to an OS specific version prior to using it. This function
should be called just before any OS specific functions.
Canonical paths on windows have:
- / instead of \.
- Begin with /X:// where X is the drive letter.
Args:
path: A canonical path specification.
Returns:
A windows specific path.
|
juraj-google-style
|
def distance_from_point(self, pt):
return np.linalg.norm(np.array(pt) - self.coords)
|
Returns distance between the site and a point in space.
Args:
pt: Cartesian coordinates of point.
Returns:
Distance (float)
|
juraj-google-style
|
def initialize_remaining_constants(self, value=0):
remaining = []
for (node, _inputs, _outputs) in self.iterate_bfs():
streams = (node.input_streams() + [node.stream])
for stream in streams:
if (stream.stream_type is not DataStream.ConstantType):
continue
if (stream not in self.constant_database):
self.add_constant(stream, value)
remaining.append(stream)
return remaining
|
Ensure that all constant streams referenced in the sensor graph have a value.
Constant streams that are automatically created by the compiler are initialized
as part of the compilation process but it's possible that the user references
other constant streams but never assigns them an explicit initial value. This
function will initialize them all to a default value (0 if not passed) and
return the streams that were so initialized.
Args:
value (int): Optional value to use to initialize all uninitialized constants.
Defaults to 0 if not passed.
Returns:
list(DataStream): A list of all of the constant streams that were not previously
initialized and were initialized to the given value in this function.
|
codesearchnet
|
def Execute(self, http):
self._Execute(http)
for key in self.__request_response_handlers:
response = self.__request_response_handlers[key].response
callback = self.__request_response_handlers[key].handler
exception = None
if response.status_code >= 300:
exception = exceptions.HttpError.FromResponse(response)
if callback is not None:
callback(response, exception)
if self.__callback is not None:
self.__callback(response, exception)
|
Execute all the requests as a single batched HTTP request.
Args:
http: A httplib2.Http object to be used with the request.
Returns:
None
Raises:
BatchError if the response is the wrong format.
|
juraj-google-style
|
def __init__(self, output_mediator):
super(SharedElasticsearchOutputModule, self).__init__(output_mediator)
self._client = None
self._document_type = self._DEFAULT_DOCUMENT_TYPE
self._event_documents = []
self._flush_interval = self._DEFAULT_FLUSH_INTERVAL
self._host = None
self._index_name = None
self._number_of_buffered_events = 0
self._password = None
self._port = None
self._username = None
self._use_ssl = None
self._ca_certs = None
self._url_prefix = None
|
Initializes an Elasticsearch output module.
Args:
output_mediator (OutputMediator): mediates interactions between output
modules and other components, such as storage and dfvfs.
|
juraj-google-style
|
def merge_files(context):
resolver = EFTemplateResolver(
profile=context.profile,
region=context.region,
env=context.env,
service=context.service
)
try:
with open(context.template_path, 'r') as f:
template_body = f.read()
f.close()
except IOError as error:
raise IOError("Error loading template file: {} {}".format(context.template_path, repr(error)))
if context.no_params is False:
try:
with open(context.param_path, 'r') as f:
param_body = f.read()
f.close()
except IOError as error:
raise IOError("Error loading param file: {} {}".format(context.param_path, repr(error)))
dest = yaml.safe_load(param_body)["dest"]
if "environments" in dest:
if not resolver.resolved["ENV_SHORT"] in dest["environments"]:
print("Environment: {} not enabled for {}".format(resolver.resolved["ENV_SHORT"], context.template_path))
return
resolver.load(template_body, param_body)
else:
resolver.load(template_body)
rendered_body = resolver.render()
if not resolver.resolved_ok():
raise RuntimeError("Couldn't resolve all symbols; template has leftover {{ or }}: {}".format(resolver.unresolved_symbols()))
if context.lint:
if context.template_path.endswith(".json"):
try:
json.loads(rendered_body, strict=False)
print("JSON passed linting process.")
except ValueError as e:
fail("JSON failed linting process.", e)
elif context.template_path.endswith((".yml", ".yaml")):
conf = yamllint_config.YamlLintConfig(content='extends: relaxed')
lint_output = yamllinter.run(rendered_body, conf)
lint_level = 'error'
lint_errors = [issue for issue in lint_output if issue.level == lint_level]
if lint_errors:
split_body = rendered_body.splitlines()
for error in lint_errors:
print(error)
print("\t", split_body[error.line - 1])
fail("YAML failed linting process.")
if context.verbose:
print(context)
if context.no_params:
print('no_params flag set to true!')
print('Inline template resolution based on external symbol lookup only and no destination for file write.\n')
else:
dir_path = normpath(dirname(dest["path"]))
print("make directories: {} {}".format(dir_path, dest["dir_perm"]))
print("chmod file to: " + dest["file_perm"])
user, group = dest["user_group"].split(":")
print("chown last directory in path to user: {}, group: {}".format(user, group))
print("chown file to user: {}, group: {}\n".format(user, group))
print("template body:\n{}\nrendered body:\n{}\n".format(template_body, rendered_body))
elif context.silent:
print("Config template rendered successfully.")
else:
print(rendered_body)
|
Given a context containing path to template, env, and service:
merge config into template and output the result to stdout
Args:
context: a populated context object
|
juraj-google-style
|
def copy_entities(self, from_namespace, from_workspace, etype, enames):
r = fapi.copy_entities(from_namespace, from_workspace,
self.namespace, self.name, etype, enames,
self.api_url)
fapi._check_response_code(r, 201)
|
Copy entities from another workspace.
Args:
from_namespace (str): Source workspace namespace
from_workspace (str): Source workspace name
etype (str): Entity type
enames (list(str)): List of entity names to copy
|
juraj-google-style
|
def CacheObject(self, identifier, vfs_object):
if (identifier in self._values):
raise KeyError('Object already cached for identifier: {0:s}'.format(identifier))
if (len(self._values) == self._maximum_number_of_cached_values):
raise errors.CacheFullError('Maximum number of cached values reached.')
self._values[identifier] = ObjectsCacheValue(vfs_object)
|
Caches a VFS object.
This method ignores the cache value reference count.
Args:
identifier (str): VFS object identifier.
vfs_object (object): VFS object to cache.
Raises:
CacheFullError: if he maximum number of cached values is reached.
KeyError: if the VFS object already is cached.
|
codesearchnet
|
def AddItem(self, key, item, f=(lambda x: x)):
with self._mutex:
bucket = self._buckets[key]
bucket.AddItem(item, f)
|
Add a new item to the Reservoir with the given tag.
If the reservoir has not yet reached full size, the new item is guaranteed
to be added. If the reservoir is full, then behavior depends on the
always_keep_last boolean.
If always_keep_last was set to true, the new item is guaranteed to be added
to the reservoir, and either the previous last item will be replaced, or
(with low probability) an older item will be replaced.
If always_keep_last was set to false, then the new item will replace an
old item with low probability.
If f is provided, it will be applied to transform item (lazily, iff item is
going to be included in the reservoir).
Args:
key: The key to store the item under.
item: The item to add to the reservoir.
f: An optional function to transform the item prior to addition.
|
codesearchnet
|
def report_validation_error(self, element_path: str, msg: str) -> None:
|
Reports the given error during FHIR validation.
This indicates that the resource does not fully comply with the FHIR
specification or profile.
Args:
element_path: The path to the field where the issue occurred.
msg: The error message produced.
|
github-repos
|
def _get_vep_transcript(self, transcript_info):
transcript = Transcript(hgnc_symbol=transcript_info.get('SYMBOL'), transcript_id=transcript_info.get('Feature'), ensembl_id=transcript_info.get('Gene'), biotype=transcript_info.get('BIOTYPE'), consequence=transcript_info.get('Consequence'), strand=transcript_info.get('STRAND'), sift=transcript_info.get('SIFT'), polyphen=transcript_info.get('PolyPhen'), exon=transcript_info.get('EXON'), HGVSc=transcript_info.get('HGVSc'), HGVSp=transcript_info.get('HGVSp'), GMAF=transcript_info.get('GMAF'), ExAC_MAF=transcript_info.get('ExAC_MAF'))
return transcript
|
Create a Transcript based on the vep annotation
Args:
transcript_info (dict): A dict with vep info
Returns:
transcript (puzzle.models.Transcript): A Transcripts
|
codesearchnet
|
def get_for_type(input_type='text'):
if (input_type in RandomInputHelper.cache):
return RandomInputHelper.cache[input_type]
types = {'text': RandomInputHelper.get_random_value, 'hidden': RandomInputHelper.get_random_value, 'search': RandomInputHelper.get_random_value, 'color': RandomInputHelper.get_random_color, 'week': {'function': RandomInputHelper.get_random_value, 'params': [2, ['1234']]}, 'password': RandomInputHelper.get_random_password, 'number': RandomInputHelper.get_random_number, 'tel': RandomInputHelper.get_random_telephonenumber, 'url': RandomInputHelper.get_random_url, 'textarea': RandomInputHelper.get_random_text, 'email': RandomInputHelper.get_random_email}
if (types.get(input_type) is None):
return ''
if (type(types.get(input_type)) is dict):
generator = types.get(input_type)
value = generator.get('function')(*generator.get('params'))
else:
value = types.get(input_type)()
RandomInputHelper.cache[input_type] = value
return value
|
Get a random string for the given html input type
Args:
input_type (str): The input type (e.g. email).
Returns:
str: The (cached) random value.
|
codesearchnet
|
def __init__(self, value, data_type, masks=None, name='Secret Data'):
super(SecretData, self).__init__()
self._object_type = enums.ObjectType.SECRET_DATA
self.value = value
self.data_type = data_type
self.names = [name]
if masks:
self.cryptographic_usage_masks = masks
self.validate()
|
Create a SecretData object.
Args:
value(bytes): The bytes representing secret data.
data_type(SecretDataType): An enumeration defining the type of the
secret value.
masks(list): A list of CryptographicUsageMask enumerations
defining how the key will be used.
name(string): The string name of the key.
|
juraj-google-style
|
def GetAPFSFileEntryByPathSpec(self, path_spec):
location = getattr(path_spec, 'location', None)
identifier = getattr(path_spec, 'identifier', None)
if identifier is not None:
fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_identifier(
identifier)
elif location is not None:
fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_path(location)
else:
raise errors.PathSpecError(
'Path specification missing location and identifier.')
return fsapfs_file_entry
|
Retrieves the APFS file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
pyfsapfs.file_entry: file entry.
Raises:
PathSpecError: if the path specification is missing location and
identifier.
|
juraj-google-style
|
def _GetDisplayPath(self, path_spec, full_path, data_stream_name):
display_path = ''
if path_spec.HasParent():
parent_path_spec = path_spec.parent
if (parent_path_spec and (parent_path_spec.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION)):
display_path = ''.join([display_path, parent_path_spec.location])
display_path = ''.join([display_path, full_path])
if data_stream_name:
display_path = ':'.join([display_path, data_stream_name])
return display_path
|
Retrieves a path to display.
Args:
path_spec (dfvfs.PathSpec): path specification of the file entry.
full_path (str): full path of the file entry.
data_stream_name (str): name of the data stream.
Returns:
str: path to display.
|
codesearchnet
|
class TimesFmOutput(BaseModelOutput):
loc: Optional[torch.Tensor] = None
scale: Optional[torch.Tensor] = None
|
Args:
loc (`torch.Tensor` of shape `(batch_size, )`):
The mean of the time series inputs.
scale (`torch.Tensor` of shape `(batch_size,)`):
The scale of the time series inputs.
|
github-repos
|
def get_card(self, id, name=None):
return self.create_card(dict(id=id, name=name))
|
Get a card
Returns:
Card: The card with the given `id`
|
codesearchnet
|
def process_actions(self, actions):
notices = {}
notification_contacts = {}
for action in actions:
resource = action['resource']
action_status = ActionStatus.SUCCEED
try:
if action['action'] == AuditActions.REMOVE:
action_status = self.process_action(
resource,
AuditActions.REMOVE
)
if action_status == ActionStatus.SUCCEED:
db.session.delete(action['issue'].issue)
elif action['action'] == AuditActions.STOP:
action_status = self.process_action(
resource,
AuditActions.STOP
)
if action_status == ActionStatus.SUCCEED:
action['issue'].update({
'missing_tags': action['missing_tags'],
'notes': action['notes'],
'last_alert': action['last_alert'],
'state': action['action']
})
elif action['action'] == AuditActions.FIXED:
db.session.delete(action['issue'].issue)
elif action['action'] == AuditActions.ALERT:
action['issue'].update({
'missing_tags': action['missing_tags'],
'notes': action['notes'],
'last_alert': action['last_alert'],
'state': action['action']
})
db.session.commit()
if action_status == ActionStatus.SUCCEED:
for owner in [
dict(t) for t in {tuple(d.items()) for d in (action['owners'] + self.permanent_emails)}
]:
if owner['value'] not in notification_contacts:
contact = NotificationContact(type=owner['type'], value=owner['value'])
notification_contacts[owner['value']] = contact
notices[contact] = {
'fixed': [],
'not_fixed': []
}
else:
contact = notification_contacts[owner['value']]
if action['action'] == AuditActions.FIXED:
notices[contact]['fixed'].append(action)
else:
notices[contact]['not_fixed'].append(action)
except Exception as ex:
self.log.exception('Unexpected error while processing resource {}/{}/{}/{}'.format(
action['resource'].account.account_name,
action['resource'].id,
action['resource'],
ex
))
return notices
|
Process the actions we want to take
Args:
actions (`list`): List of actions we want to take
Returns:
`list` of notifications
|
juraj-google-style
|
def write_file(self, filename='HEADER'):
with open(filename, "w") as f:
f.write(str(self) + "\n")
|
Writes Header into filename on disk.
Args:
filename: Filename and path for file to be written to disk
|
juraj-google-style
|
def addColumn(self, columnName, dtype, defaultValue):
model = self.tableView.model()
if (model is not None):
model.addDataFrameColumn(columnName, dtype, defaultValue)
self.addColumnButton.setChecked(False)
|
Adds a column with the given parameters to the underlying model
This method is also a slot.
If no model is set, nothing happens.
Args:
columnName (str): The name of the new column.
dtype (numpy.dtype): The datatype of the new column.
defaultValue (object): Fill the column with this value.
|
codesearchnet
|
def to_deeper_model(self, target_id, new_layer):
self.operation_history.append(("to_deeper_model", target_id, new_layer))
input_id = self.layer_id_to_input_node_ids[target_id][0]
output_id = self.layer_id_to_output_node_ids[target_id][0]
if self.weighted:
if is_layer(new_layer, "Dense"):
init_dense_weight(new_layer)
elif is_layer(new_layer, "Conv"):
init_conv_weight(new_layer)
elif is_layer(new_layer, "BatchNormalization"):
init_bn_weight(new_layer)
self._insert_new_layers([new_layer], input_id, output_id)
|
Insert a relu-conv-bn block after the target block.
Args:
target_id: A convolutional layer ID. The new block should be inserted after the block.
new_layer: An instance of StubLayer subclasses.
|
juraj-google-style
|
def __init__(self, jid, password, verify_security=False):
self.jid = aioxmpp.JID.fromstr(jid)
self.password = password
self.verify_security = verify_security
self.behaviours = []
self._values = {}
self.conn_coro = None
self.stream = None
self.client = None
self.message_dispatcher = None
self.presence = None
self.loop = None
self.container = Container()
self.container.register(self)
self.loop = self.container.loop
self.web = WebApp(agent=self)
self.traces = TraceStore(size=1000)
self._alive = Event()
|
Creates an agent
Args:
jid (str): The identifier of the agent in the form username@server
password (str): The password to connect to the server
verify_security (bool): Wether to verify or not the SSL certificates
|
juraj-google-style
|
def shape(self):
nrows = self._row_partition.static_nrows
ncols = self._row_partition.static_uniform_row_length
value_shape = self._values.shape[1:]
return tensor_shape.TensorShape([nrows, ncols]).concatenate(value_shape)
|
The statically known shape of this ragged tensor.
Returns:
A `TensorShape` containing the statically known shape of this ragged
tensor. Ragged dimensions have a size of `None`.
Examples:
>>> tf.ragged.constant([[0], [1, 2]]).shape
TensorShape([2, None])
>>> tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape
TensorShape([2, None, 2])
|
github-repos
|
def increase_route_count(self, crawled_request):
for route in self.__routing_options.routes:
if re.compile(route).match(crawled_request.url):
count_key = (str(route) + crawled_request.method)
if (count_key in self.__routing_count.keys()):
self.__routing_count[count_key] += 1
else:
self.__routing_count[count_key] = 1
break
|
Increase the count that determines how many times a URL of a certain route has been crawled.
Args:
crawled_request (:class:`nyawc.http.Request`): The request that possibly matches a route.
|
codesearchnet
|
def __init__(self, operation: Type[Operation], *expressions: Expression) -> None:
self.operation = operation
self.length = len(expressions)
self.constant = Multiset()
self.syntactic = Multiset()
self.sequence_variables = Multiset()
self.sequence_variable_infos = dict()
self.fixed_variables = Multiset()
self.fixed_variable_infos = dict()
self.rest = Multiset()
self.sequence_variable_min_length = 0
self.fixed_variable_length = 0
self.wildcard_min_length = 0
self.optional_count = 0
self.wildcard_fixed = None
for expression in expressions:
expression = expression
if is_constant(expression):
self.constant[expression] += 1
elif isinstance(expression, Wildcard):
wc = cast(Wildcard, expression)
if wc.variable_name:
name = wc.variable_name
if wc.fixed_size:
self.fixed_variables[name] += 1
symbol_type = getattr(wc, 'symbol_type', None)
self._update_var_info(self.fixed_variable_infos, name, wc.min_count, symbol_type, wc.optional)
if wc.optional is None:
self.fixed_variable_length += wc.min_count
else:
self.optional_count += 1
else:
self.sequence_variables[name] += 1
self._update_var_info(self.sequence_variable_infos, name, wc.min_count, None, wc.optional)
if wc.optional is None:
self.sequence_variable_min_length += wc.min_count
else:
self.wildcard_min_length += wc.min_count
if self.wildcard_fixed is None:
self.wildcard_fixed = wc.fixed_size
else:
self.wildcard_fixed = self.wildcard_fixed and wc.fixed_size
elif is_syntactic(expression):
self.syntactic[expression] += 1
else:
self.rest[expression] += 1
|
Create a CommutativePatternsParts instance.
Args:
operation:
The type of the commutative operation. Must be a subclass of :class:`.Operation` with
:attr:`~.Operation.commutative` set to ``True``.
*expressions:
The operands of the commutative operation.
|
juraj-google-style
|
def _build_encryption_key_information(self, value):
if (value is None):
return None
if (not isinstance(value, dict)):
raise TypeError('Encryption key information must be a dictionary.')
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(cryptographic_parameters)
encryption_key_information = cobjects.EncryptionKeyInformation(unique_identifier=value.get('unique_identifier'), cryptographic_parameters=cryptographic_parameters)
return encryption_key_information
|
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
|
codesearchnet
|
def albedo(self, value=999.0):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `albedo`'.format(value))
self._albedo = value
|
Corresponds to IDD Field `albedo`
Args:
value (float): value for IDD Field `albedo`
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def ParseRecord(self, parser_mediator, key, structure):
if key != 'logline':
logger.warning(
'Unable to parse record, unknown structure: {0:s}'.format(key))
return
try:
timestamp = int(structure.timestamp)
except ValueError:
logger.debug('Invalid timestamp string {0:s}, skipping record'.format(
structure.timestamp))
return
try:
nickname, text = self._StripThenGetNicknameAndText(structure.text)
except pyparsing.ParseException:
logger.debug('Error parsing entry at offset {0:d}'.format(self._offset))
return
event_data = XChatScrollbackEventData()
event_data.nickname = nickname
event_data.offset = self._offset
event_data.text = text
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a log record structure.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure parsed from the log file.
|
juraj-google-style
|
def experimental_from_proto(cls, proto: struct_pb2.TypeSpecProto) -> 'TypeSpec':
return nested_structure_coder.decode_proto(struct_pb2.StructuredValue(type_spec_value=proto))
|
Returns a TypeSpec instance based on the serialized proto.
Do NOT override for custom non-TF types.
Args:
proto: Proto generated using 'experimental_as_proto'.
|
github-repos
|
def dump(destination, ms, single=False, pretty_print=False, **kwargs):
text = dumps(ms, single=single, pretty_print=pretty_print, **kwargs)
if hasattr(destination, 'write'):
print(text, file=destination)
else:
with open(destination, 'w') as fh:
print(text, file=fh)
|
Serialize Xmrs objects to the Prolog representation and write to a file.
Args:
destination: filename or file object where data will be written
ms: an iterator of Xmrs objects to serialize (unless the
*single* option is `True`)
single: if `True`, treat *ms* as a single Xmrs object
instead of as an iterator
pretty_print: if `True`, add newlines and indentation
|
codesearchnet
|
def DeterminePeakMemoryUsage(self, item):
return tf_cluster.TF_DeterminePeakMemoryUsage(item.tf_item, self._tf_cluster)
|
Returns a snapshot of the peak memory usage.
Args:
item: The item for which to measure the costs.
Returns: A hashtable indexed by device name.
|
github-repos
|
def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024):
ds = ds.batch(batch_size)
ds = ds.map(lambda x: tf.strings.substr(x, 0, 12))
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
h = collections.Counter()
try:
while True:
h.update(sess.run(get_next))
except tf.errors.OutOfRangeError:
pass
return h
|
Given dataset of key names, return histogram of moves/game.
Move counts are written by the game players, so
this is mostly useful for repair or backfill.
Args:
sess: TF session
ds: TF dataset containing game move keys.
batch_size: performance tuning parameter
|
juraj-google-style
|
def ValidateAccessAndSubjects(requested_access, subjects):
if (not requested_access):
raise access_control.UnauthorizedAccess(('Must specify requested access type for %s' % subjects))
for s in requested_access:
if (s not in 'rwq'):
raise ValueError(('Invalid access requested for %s: %s' % (subjects, requested_access)))
if (('q' in requested_access) and ('r' not in requested_access)):
raise access_control.UnauthorizedAccess(('Invalid access request: query permissions require read permissions for %s' % subjects), requested_access=requested_access)
return True
|
Does basic requested access validation.
Args:
requested_access: String consisting or 'r', 'w' and 'q' characters.
subjects: A list of subjects that are about to be accessed with a given
requested_access. Used for logging purposes only.
Returns:
True if requested_access is valid.
Raises:
access_control.UnauthorizedAccess: if requested_access is not valid.
ValueError: if subjects list is empty.
|
codesearchnet
|
def mark_flags_as_mutual_exclusive(flag_names, required=False,
flag_values=FLAGS):
def validate_mutual_exclusion(flags_dict):
flag_count = sum(1 for val in flags_dict.values() if val is not None)
if flag_count == 1 or (not required and flag_count == 0):
return True
message = ('%s one of (%s) must be specified.' %
('Exactly' if required else 'At most', ', '.join(flag_names)))
raise ValidationError(message)
register_multi_flags_validator(
flag_names, validate_mutual_exclusion, flag_values=flag_values)
|
Ensures that only one flag among flag_names is set.
Args:
flag_names: [str], a list of the flag names to be checked.
required: Boolean, if set, exactly one of the flags must be set.
Otherwise, it is also valid for none of the flags to be set.
flag_values: An optional FlagValues instance to validate against.
|
juraj-google-style
|
def list_files(root, suffix, prefix=False):
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
|
List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
|
juraj-google-style
|
def validate(self):
endpoint = '/validate'
payload = dict(accessToken=self.access_token)
rep = self._ygg_req(endpoint, payload)
return (not bool(rep))
|
Check if an access token is valid
Returns:
dict: Empty or error dict
|
codesearchnet
|
def removedirs(self, target_directory):
target_directory = self.filesystem.absnormpath(target_directory)
directory = self.filesystem.confirmdir(target_directory)
if directory.contents:
self.filesystem.raise_os_error(errno.ENOTEMPTY, self.path.basename(target_directory))
else:
self.rmdir(target_directory)
(head, tail) = self.path.split(target_directory)
if (not tail):
(head, tail) = self.path.split(head)
while (head and tail):
head_dir = self.filesystem.confirmdir(head)
if head_dir.contents:
break
self.filesystem.rmdir(head, allow_symlink=True)
(head, tail) = self.path.split(head)
|
Remove a leaf fake directory and all empty intermediate ones.
Args:
target_directory: the directory to be removed.
Raises:
OSError: if target_directory does not exist or is not a directory.
OSError: if target_directory is not empty.
|
codesearchnet
|
def with_subject(self, subject):
return self.__class__(
self._signer,
service_account_email=self._service_account_email,
scopes=self._scopes,
token_uri=self._token_uri,
subject=subject,
project_id=self._project_id,
additional_claims=self._additional_claims.copy())
|
Create a copy of these credentials with the specified subject.
Args:
subject (str): The subject claim.
Returns:
google.auth.service_account.Credentials: A new credentials
instance.
|
juraj-google-style
|
def report_proto_path(self):
return self._report_proto_path
|
Getter for path where tensor_tracer.proto object should be written.
Returns:
A string path.
|
github-repos
|
def recipe_google_ads_segmentology(config, auth_read, customer_id, developer_token, login_id, auth_write, recipe_slug):
dataset(config, {'description': 'Create a dataset for bigquery tables.', 'hour': [4], 'auth': auth_write, 'dataset': recipe_slug})
bigquery(config, {'auth': auth_write, 'function': 'Pearson Significance Test', 'to': {'dataset': recipe_slug}})
google_api(config, {'auth': auth_read, 'api': 'googleads', 'version': 'v8', 'function': 'customers.googleAds.search', 'kwargs': {'customerId': customer_id, 'body': {'query': 'SELECT\n campaign.name,\n ad_group.name,\n segments.geo_target_postal_code,\n metrics.impressions,\n metrics.clicks,\n metrics.conversions,\n metrics.interactions\n FROM user_location_view '}}, 'headers': {'developer-token': developer_token, 'login-customer-id': login_id}, 'iterate': True, 'results': {'bigquery': {'dataset': recipe_slug, 'table': 'GoogleAds_KPI', 'schema': [{'name': 'userLocationView', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [{'name': 'resourceName', 'type': 'STRING', 'mode': 'NULLABLE'}]}, {'name': 'segments', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [{'name': 'geoTargetPostalCode', 'type': 'STRING', 'mode': 'NULLABLE'}]}, {'name': 'metrics', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [{'name': 'interactions', 'type': 'INTEGER', 'mode': 'NULLABLE'}, {'name': 'impressions', 'type': 'INTEGER', 'mode': 'NULLABLE'}, {'name': 'conversions', 'type': 'INTEGER', 'mode': 'NULLABLE'}, {'name': 'clicks', 'type': 'INTEGER', 'mode': 'NULLABLE'}]}, {'name': 'adGroup', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'resourceName', 'type': 'STRING', 'mode': 'NULLABLE'}]}, {'name': 'campaign', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'resourceName', 'type': 'STRING', 'mode': 'NULLABLE'}]}]}}})
bigquery(config, {'auth': auth_write, 'from': {'query': 'SELECT\n campaign.name AS Campaign,\n adGRoup.name AS Ad_Group,\n segments.geoTargetPostalCode AS Postal_Code,\n SAFE_DIVIDE(metrics.impressions, SUM(metrics.impressions) OVER()) AS Impression,\n SAFE_DIVIDE(metrics.clicks, metrics.impressions) AS Click,\n SAFE_DIVIDE(metrics.conversions, metrics.impressions) AS Conversion,\n SAFE_DIVIDE(metrics.interactions, metrics.impressions) AS Interaction,\n metrics.impressions AS Impressions FROM\n `{dataset}.GoogleAds_KPI`; ', 'parameters': {'dataset': recipe_slug}, 'legacy': False}, 'to': {'dataset': recipe_slug, 'view': 'GoogleAds_KPI_Normalized'}})
census(config, {'auth': auth_write, 'normalize': {'census_geography': 'zip_codes', 'census_year': '2018', 'census_span': '5yr'}, 'to': {'dataset': recipe_slug, 'type': 'view'}})
census(config, {'auth': auth_write, 'correlate': {'join': 'Postal_Code', 'pass': ['Campaign', 'Ad_Group'], 'sum': ['Impressions'], 'correlate': ['Impression', 'Click', 'Conversion', 'Interaction'], 'dataset': recipe_slug, 'table': 'GoogleAds_KPI_Normalized', 'significance': 80}, 'to': {'dataset': recipe_slug, 'type': 'view'}})
|
GoogleAds funnel analysis using Census data.
Args:
auth_read (authentication) - Credentials used for reading data.
customer_id (string) - Google Ads customer.
developer_token (string) - Google Ads developer token.
login_id (string) - Google Ads login.
auth_write (authentication) - Authorization used for writing data.
recipe_slug (string) - Name of Google BigQuery dataset to create.
|
github-repos
|
def stat_v2(path):
return _pywrap_file_io.Stat(compat.path_to_str(path))
|
Returns file statistics for a given path.
Args:
path: string, path to a file
Returns:
FileStatistics struct that contains information about the path
Raises:
errors.OpError: If the operation fails.
|
github-repos
|
def _find_furthest_new_line(read_buffer):
new_line_positions = [read_buffer.rfind(n) for n in new_lines_bytes]
return max(new_line_positions)
|
Return -1 if read_buffer does not contain new line otherwise the position of the rightmost newline.
Args:
read_buffer (bytestring)
Returns:
int: The right most position of new line character in read_buffer if found, else -1
|
codesearchnet
|
def set_installed_version(vcs, version):
version_path = _get_version_path(vcs)
with open(version_path, 'w') as f:
f.write(version)
|
Set the installed version for this project.
Args:
vcs (easyci.vcs.base.Vcs)
version (str)
|
codesearchnet
|
def tags(pode, leaf=False):
fulltags = [tag for tag in pode[1]['tags']]
if not leaf:
return fulltags
retn = []
for size, tag in sorted([(len(t), t) for t in fulltags], reverse=True):
look = tag + '.'
if any([r.startswith(look) for r in retn]):
continue
retn.append(tag)
return retn
|
Get all the tags for a given node.
Args:
pode (tuple): A packed node.
leaf (bool): If True, only return the full tags.
Returns:
list: A list of tag strings.
|
juraj-google-style
|
def _GetEventData(
self, parser_mediator, record_index, evt_record, recovered=False):
event_data = WinEvtRecordEventData()
try:
event_data.record_number = evt_record.identifier
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read record identifier from event record: {0:d} '
'with error: {1!s}').format(record_index, exception))
try:
event_identifier = evt_record.event_identifier
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read event identifier from event record: {0:d} '
'with error: {1!s}').format(record_index, exception))
event_identifier = None
event_data.offset = evt_record.offset
event_data.recovered = recovered
if event_identifier is not None:
event_data.event_identifier = event_identifier & 0xffff
event_data.facility = (event_identifier >> 16) & 0x0fff
event_data.severity = event_identifier >> 30
event_data.message_identifier = event_identifier
event_data.event_type = evt_record.event_type
event_data.event_category = evt_record.event_category
event_data.source_name = evt_record.source_name
event_data.computer_name = evt_record.computer_name
event_data.user_sid = evt_record.user_security_identifier
event_data.strings = list(evt_record.strings)
return event_data
|
Retrieves event data from the Windows EventLog (EVT) record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
record_index (int): event record index.
evt_record (pyevt.record): event record.
recovered (Optional[bool]): True if the record was recovered.
Returns:
WinEvtRecordEventData: event data.
|
juraj-google-style
|
def get_all_pipelines(app=''):
url = '{host}/applications/{app}/pipelineConfigs'.format(host=API_URL, app=app)
response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
assert response.ok, 'Could not retrieve Pipelines for {0}.'.format(app)
pipelines = response.json()
LOG.debug('Pipelines:\n%s', pipelines)
return pipelines
|
Get a list of all the Pipelines in _app_.
Args:
app (str): Name of Spinnaker Application.
Returns:
requests.models.Response: Response from Gate containing Pipelines.
|
codesearchnet
|
def coalescence_times(self, backward=True):
if (not isinstance(backward, bool)):
raise TypeError('backward must be a bool')
for dist in sorted((d for (n, d) in self.distances_from_root() if (len(n.children) > 1)), reverse=backward):
(yield dist)
|
Generator over the times of successive coalescence events
Args:
``backward`` (``bool``): ``True`` to go backward in time (i.e., leaves to root), otherwise ``False``
|
codesearchnet
|
def _GetGradSource(op_or_tensor):
name_tokens = op_or_tensor.name.split('/')
grad_pos = [i for i, x in enumerate(name_tokens) if x.startswith('gradients')]
if not grad_pos:
raise ValueError(f"Expected op/tensor name to start with gradients (excluding scope), got: {op_or_tensor.name}. This means that a tf.gradients op with this op in its dependency path has a custom name that does not start with 'gradients'. Please make sure all calls to tf.gradients that have non-empty `name` arguments use names that start with 'gradients'.")
return '/'.join(name_tokens[:grad_pos[-1] + 1])
|
Identify which call to tf.gradients created this gradient op or tensor.
TensorArray gradient calls use an accumulator TensorArray object. If
multiple gradients are calculated and run in the same session, the multiple
gradient nodes may accidentally flow through the same accumulator TensorArray.
This double counting breaks the TensorArray gradient flow.
The solution is to identify which gradient call this particular
TensorArray*Grad is being called in, by looking at the input gradient
tensor's name, and create or lookup an accumulator gradient TensorArray
associated with this specific call. This solves any confusion and ensures
different gradients from the same forward graph get their own accumulators.
This function creates the unique label associated with the tf.gradients call
that is used to create the gradient TensorArray.
Args:
op_or_tensor: `Tensor` or `Operation` which is an input to a
TensorArray*Grad call.
Returns:
A python string, the unique label associated with this particular
gradients calculation.
Raises:
ValueError: If not called within a gradients calculation.
|
github-repos
|
def predict(self, a, b, sig=[-1, -1], maxpnt=500):
a = (a - np.mean(a)) / np.std(a)
b = (b - np.mean(b)) / np.std(b)
return FastHsicTestGamma(a, b, sig, maxpnt)
|
Compute the test statistic
Args:
a (array-like): Variable 1
b (array-like): Variable 2
sig (list): [0] (resp [1]) is kernel size for a(resp b) (set to median distance if -1)
maxpnt (int): maximum number of points used, for computational time
Returns:
float: test statistic
|
juraj-google-style
|
def default_num_choices(self) -> int:
return OnnxConfig.default_fixed_num_choices
|
The default number of choices to use if no other indication
Returns:
Integer > 0
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.