code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def create_audit_event(self, code='AUDIT'):
event = self._meta.event_model(code=code, model=self.__class__.__name__)
if current_user:
event.created_by = current_user.get_id()
self.copy_foreign_keys(event)
self.populate_audit_fields(event)
return event
|
Creates a generic auditing Event logging the changes between saves
and the initial data in creates.
Kwargs:
code (str): The code to set the new Event to.
Returns:
Event: A new event with relevant info inserted into it
|
codesearchnet
|
def read(self, length=(- 1)):
if (0 <= length < len(self)):
newpos = (self.pos + length)
data = self.buf[self.pos:newpos]
self.pos = newpos
self.__discard()
return data
data = self.buf[self.pos:]
self.clear()
return data
|
Reads from the FIFO.
Reads as much data as possible from the FIFO up to the specified
length. If the length argument is negative or ommited all data
currently available in the FIFO will be read. If there is no data
available in the FIFO an empty string is returned.
Args:
length: The amount of data to read from the FIFO. Defaults to -1.
|
codesearchnet
|
def run_gpu_or_tpu(func: _F) -> _F:
if tf_inspect.isclass(func):
raise ValueError('`run_gpu_or_tpu` only supports test methods.')
def decorated(self: 'TensorFlowTestCase', *args, **kwargs):
if config.list_physical_devices('GPU'):
return func(self, 'GPU', *args, **kwargs)
if config.list_physical_devices('TPU'):
return func(self, 'TPU', *args, **kwargs)
self.skipTest('Test requires GPU or TPU')
return decorated
|
Execute the decorated test only if a physical GPU or TPU is available.
This function is intended to be applied to tests that require the presence
of a physical GPU or TPU. It complies with the following rules:
- If a GPU is available, the test will run on the GPU.
- If a GPU is absent and a TPU is available, the test will run on the TPU.
- If both GPU and TPU are absent, the test will be skipped.
Args:
func: function to be annotated.
Returns:
Returns a function that will conditionally skip the decorated test method.
|
github-repos
|
def FromDBInstance(db_token):
hash_ar = bytearray(binascii.unhexlify(db_token.ContractHash))
hash_ar.reverse()
hash = UInt160(data=hash_ar)
token = NEP5Token(script=None)
token.SetScriptHash(hash)
token.name = db_token.Name
token.symbol = db_token.Symbol
token.decimals = db_token.Decimals
return token
|
Get a NEP5Token instance from a database token.
Args:
db_token (neo.Implementations.Wallets.peewee.Models.NEP5Token):
Returns:
NEP5Token: self.
|
codesearchnet
|
def AddArguments(cls, argument_group):
storage_formats = sorted(definitions.STORAGE_FORMATS)
argument_group.add_argument(
'--storage_format', '--storage-format', action='store',
choices=storage_formats, dest='storage_format', type=str,
metavar='FORMAT', default=definitions.DEFAULT_STORAGE_FORMAT, help=(
'Format of the storage file, the default is: {0:s}. Supported '
'options: {1:s}'.format(
definitions.DEFAULT_STORAGE_FORMAT,
', '.join(storage_formats))))
|
Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
|
juraj-google-style
|
def map_across_full_axis(self, axis, map_func):
num_splits = self._compute_num_partitions()
preprocessed_map_func = self.preprocess_func(map_func)
partitions = self.column_partitions if not axis else self.row_partitions
result_blocks = np.array(
[
part.apply(preprocessed_map_func, num_splits=num_splits)
for part in partitions
]
)
return (
self.__constructor__(result_blocks.T)
if not axis
else self.__constructor__(result_blocks)
)
|
Applies `map_func` to every partition.
Note: This method should be used in the case that `map_func` relies on
some global information about the axis.
Args:
axis: The axis to perform the map across (0 - index, 1 - columns).
map_func: The function to apply.
Returns:
A new BaseFrameManager object, the type of object that called this.
|
juraj-google-style
|
def add_line(self, start, end, color=(0.5, 0.5, 0.5), width=1):
source = vtk.vtkLineSource()
source.SetPoint1(start)
source.SetPoint2(end)
vertexIDs = vtk.vtkStringArray()
vertexIDs.SetNumberOfComponents(1)
vertexIDs.SetName("VertexIDs")
vertexIDs.InsertNextValue("a")
vertexIDs.InsertNextValue("b")
source.GetOutput().GetPointData().AddArray(vertexIDs)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetLineWidth(width)
self.ren.AddActor(actor)
|
Adds a line.
Args:
start: Starting coordinates for line.
end: Ending coordinates for line.
color: Color for text as RGB. Defaults to grey.
width: Width of line. Defaults to 1.
|
juraj-google-style
|
def sget_timestamp(self, cycle, step, dataset_number=None):
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
timestamp_header = self.headers_normal.test_time_txt
step_index_header = self.headers_normal.step_index_txt
test = self.datasets[dataset_number].dfdata
if isinstance(step, (list, tuple)):
warnings.warn(f"The varialbe step is a list."
f"Should be an integer."
f"{step}")
step = step[0]
c = test[(test[cycle_index_header] == cycle) &
(test[step_index_header] == step)]
if not self.is_empty(c):
t = c[timestamp_header]
return t
else:
return pd.Series()
|
Returns timestamp for cycle, step.
Convinience function; same as issuing
dfdata[(dfdata[cycle_index_header] == cycle) &
(dfdata[step_index_header] == step)][timestamp_header]
Args:
cycle: cycle number
step: step number
dataset_number: the dataset number (automatic selection if None)
Returns:
pandas.Series
|
juraj-google-style
|
def condense(input_string):
try:
assert isinstance(input_string, basestring)
except AssertionError:
raise TypeError
removed_leading_whitespace = re.sub('>\s+', '>', input_string).strip()
removed_trailing_whitespace = re.sub('\s+<', '<', removed_leading_whitespace).strip()
return removed_trailing_whitespace
|
Trims leadings and trailing whitespace between tags in an html document
Args:
input_string: A (possible unicode) string representing HTML.
Returns:
A (possibly unicode) string representing HTML.
Raises:
TypeError: Raised if input_string isn't a unicode string or string.
|
juraj-google-style
|
def DataRefreshRequired(self, path=None, last=None):
if (last is None):
if (path is None):
raise type_info.TypeValueError("Either 'path' or 'last' must be supplied as an argument.")
fd = aff4.FACTORY.Open(self.root.Add(path), token=self.token)
stat_obj = fd.Get(fd.Schema.STAT)
if stat_obj:
last = stat_obj.age
else:
last = rdfvalue.RDFDatetime(0)
if (last is None):
return True
last = last.AsDatetime()
return ((datetime.datetime.utcnow() - last) > self.max_age_before_refresh)
|
True if we need to update this path from the client.
Args:
path: The path relative to the root to check freshness of.
last: An aff4:last attribute to check freshness of.
At least one of path or last must be supplied.
Returns:
True if the path hasn't been updated in the last
self.max_age_before_refresh seconds, else False.
Raises:
type_info.TypeValueError: If no arguments are supplied.
|
codesearchnet
|
def start(self, **kwargs):
return self.client.api.start(self.id, **kwargs)
|
Start this container. Similar to the ``docker start`` command, but
doesn't support attach options.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def _make_query_from_terms(self, terms):
match_query = ''
expanded_terms = self._expand_terms(terms)
if expanded_terms['doc']:
match_query = self.backend._and_join(expanded_terms['doc'])
if expanded_terms['keywords']:
if match_query:
match_query = self.backend._and_join(
[match_query, self.backend._join_keywords(expanded_terms['keywords'])])
else:
match_query = self.backend._join_keywords(expanded_terms['keywords'])
if match_query:
query = text()
query_params = {
'match_query': match_query}
else:
query = text()
query_params = {}
return query, query_params
|
Creates a query for partition from decomposed search terms.
Args:
terms (dict or unicode or string):
Returns:
tuple of (str, dict): First element is str with FTS query, second is parameters of the query.
|
juraj-google-style
|
def get_continent(self, callsign, timestamp=timestamp_now):
return self.get_all(callsign, timestamp)[const.CONTINENT]
|
Returns the continent Identifier of a callsign
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
str: continent identified
Raises:
KeyError: No Continent found for callsign
Note:
The following continent identifiers are used:
- EU: Europe
- NA: North America
- SA: South America
- AS: Asia
- AF: Africa
- OC: Oceania
- AN: Antarctica
|
codesearchnet
|
def parse(response_text: str, *, batch: bool, validate_against_schema: bool=True) -> Union[(JSONRPCResponse, List[JSONRPCResponse])]:
if (not response_text):
if batch:
return []
else:
return NotificationResponse()
deserialized = deserialize(response_text)
if validate_against_schema:
jsonschema.validate(deserialized, schema)
if isinstance(deserialized, list):
return [get_response(r) for r in deserialized if ('id' in r)]
return get_response(deserialized)
|
Parses response text, returning JSONRPCResponse objects.
Args:
response_text: JSON-RPC response string.
batch: If the response_text is an empty string, this determines how to parse.
validate_against_schema: Validate against the json-rpc schema.
Returns:
Either a JSONRPCResponse, or a list of them.
Raises:
json.JSONDecodeError: The response was not valid JSON.
jsonschema.ValidationError: The response was not a valid JSON-RPC response
object.
|
codesearchnet
|
def from_stream(credential_filename):
if credential_filename and os.path.isfile(credential_filename):
try:
return _get_application_default_credential_from_file(
credential_filename)
except (ApplicationDefaultCredentialsError, ValueError) as error:
extra_help = (' (provided as parameter to the '
'from_stream() method)')
_raise_exception_for_reading_json(credential_filename,
extra_help,
error)
else:
raise ApplicationDefaultCredentialsError(
'The parameter passed to the from_stream() '
'method should point to a file.')
|
Create a Credentials object by reading information from a file.
It returns an object of type GoogleCredentials.
Args:
credential_filename: the path to the file from where the
credentials are to be read
Raises:
ApplicationDefaultCredentialsError: raised when the credentials
fail to be retrieved.
|
juraj-google-style
|
def _to_json(self, strip, to_serialize=None):
curr_type = self.__class__
if (to_serialize is None):
to_serialize = copy.copy(self.__dict__)
else:
to_serialize = copy.copy(to_serialize)
for member in strip:
if (member in to_serialize):
del to_serialize[member]
to_serialize['token_expiry'] = _parse_expiry(to_serialize.get('token_expiry'))
to_serialize['_class'] = curr_type.__name__
to_serialize['_module'] = curr_type.__module__
for (key, val) in to_serialize.items():
if isinstance(val, bytes):
to_serialize[key] = val.decode('utf-8')
if isinstance(val, set):
to_serialize[key] = list(val)
return json.dumps(to_serialize)
|
Utility function that creates JSON repr. of a Credentials object.
Args:
strip: array, An array of names of members to exclude from the
JSON.
to_serialize: dict, (Optional) The properties for this object
that will be serialized. This allows callers to
modify before serializing.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
|
codesearchnet
|
def insert_tile(self, tile_info):
for (i, tile) in enumerate(self.registered_tiles):
if (tile.slot == tile_info.slot):
self.registered_tiles[i] = tile_info
return
self.registered_tiles.append(tile_info)
|
Add or replace an entry in the tile cache.
Args:
tile_info (TileInfo): The newly registered tile.
|
codesearchnet
|
def _create_or_get_tensor_history_values_cache(self, cache_name, graph, shape=None, dtype=dtypes.float32):
if graph is None:
raise ValueError('Invalid graph.')
if graph not in self._history_value_cache:
self._history_value_cache[graph] = {}
if cache_name not in self._history_value_cache[graph]:
if shape is None:
raise ValueError('shape must be provided at cache creation.')
if dtype.is_integer:
init_val = int(_COMPACT_TRACE_ENTRY_INIT_VALUE)
else:
init_val = _COMPACT_TRACE_ENTRY_INIT_VALUE
with graph.as_default() as g, g.name_scope(None):
self._history_value_cache[graph][cache_name] = variable_scope.get_variable('tt_history' + '_' + self._escape_namescopes(cache_name), shape=shape, dtype=dtype, initializer=init_ops.constant_initializer(init_val), trainable=False, use_resource=True, collections=[_TENSOR_TRACER_STORAGE, ops.GraphKeys.LOCAL_VARIABLES])
return self._history_value_cache[graph][cache_name]
|
Creates a variable as the cache to store historic intermediate tensor values.
Args:
cache_name: Name to be given to the cache (an instance of tf.variable).
graph: Tensorflow graph.
shape: A list of dimensions.
dtype: Data type of created cache.
Returns:
A ref to newly created or existing cache with the given dimensions.
Raises:
ValueError:
(1) If graph is None, or
(2) shape is None when a new cache needs to be created.
|
github-repos
|
def dotd(A, B):
r
A = asarray(A, float)
B = asarray(B, float)
if A.ndim == 1 and B.ndim == 1:
return dot(A, B)
out = empty((A.shape[0],), float)
out[:] = sum(A * B.T, axis=1)
return out
|
r"""Diagonal of :math:`\mathrm A\mathrm B^\intercal`.
If ``A`` is :math:`n\times p` and ``B`` is :math:`p\times n`, it is done in
:math:`O(pn)`.
Args:
A (array_like): Left matrix.
B (array_like): Right matrix.
Returns:
:class:`numpy.ndarray`: Resulting diagonal.
|
juraj-google-style
|
def parse_uri(self, uri=None):
if not uri:
return rdflib.term.URIRef(self.root)
elif type(uri) == str:
if type(uri) == str and not uri.startswith('http'):
return rdflib.term.URIRef("%s%s" % (self.root, uri))
else:
return rdflib.term.URIRef(uri)
elif type(uri) == rdflib.term.URIRef:
return uri
else:
raise TypeError('invalid URI input')
|
parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef
Args:
uri (rdflib.term.URIRef,str): input URI
Returns:
rdflib.term.URIRef
|
juraj-google-style
|
def download_and_install(uri, name=DEFAULT_MODULE_NAME, cache=True):
should_use_cache = (cache and exists(name))
if (not should_use_cache):
with _files.tmpdir() as tmpdir:
if uri.startswith('s3:
dst = os.path.join(tmpdir, 'tar_file')
_files.s3_download(uri, dst)
module_path = os.path.join(tmpdir, 'module_dir')
os.makedirs(module_path)
with tarfile.open(name=dst, mode='r:gz') as t:
t.extractall(path=module_path)
else:
module_path = uri
prepare(module_path, name)
install(module_path)
|
Download, prepare and install a compressed tar file from S3 or local directory as a module.
The SageMaker Python SDK saves the user provided scripts as compressed tar files in S3.
This function downloads this compressed file and, if provided, transforms it
into a module before installing it.
This method is the predecessor of :meth:`~sagemaker_containers.beta.framework.files.download_and_extract`
and has been kept for backward-compatibility purposes.
Args:
name (str): name of the script or module.
uri (str): the location of the module.
cache (bool): defaults to True. It will not download and install the module again if it is already installed.
|
codesearchnet
|
def generate_sigproc_header(f):
header_string = b''
header_string += to_sigproc_keyword(b'HEADER_START')
for keyword in f.header.keys():
if keyword == b'src_raj':
header_string += to_sigproc_keyword(b'src_raj') + to_sigproc_angle(f.header[b'src_raj'])
elif keyword == b'src_dej':
header_string += to_sigproc_keyword(b'src_dej') + to_sigproc_angle(f.header[b'src_dej'])
elif keyword == b'az_start' or keyword == b'za_start':
header_string += to_sigproc_keyword(keyword) + np.float64(f.header[keyword]).tostring()
elif keyword not in header_keyword_types.keys():
pass
else:
header_string += to_sigproc_keyword(keyword, f.header[keyword])
header_string += to_sigproc_keyword(b'HEADER_END')
return header_string
|
Generate a serialzed sigproc header which can be written to disk.
Args:
f (Filterbank object): Filterbank object for which to generate header
Returns:
header_str (str): Serialized string corresponding to header
|
juraj-google-style
|
def get_size(fileobj):
old_pos = fileobj.tell()
try:
fileobj.seek(0, 2)
return fileobj.tell()
finally:
fileobj.seek(old_pos, 0)
|
Returns the size of the file.
The position when passed in will be preserved if no error occurs.
Args:
fileobj (fileobj)
Returns:
int: The size of the file
Raises:
IOError
|
codesearchnet
|
def get_max_recv_data_size(self, target):
fname = 'get_max_recv_data_size'
cname = ((self.__class__.__module__ + '.') + self.__class__.__name__)
raise NotImplementedError(('%s.%s() is required' % (cname, fname)))
|
Returns the maximum number of data bytes for receiving.
The maximum number of data bytes acceptable for receiving with
either :meth:`send_cmd_recv_rsp` or :meth:`send_rsp_recv_cmd`.
The value reflects the local device capabilities for receiving
in the mode determined by *target*. It does not relate to any
protocol capabilities and negotiations.
Arguments:
target (nfc.clf.Target): The current local or remote
communication target.
Returns:
int: Maximum number of data bytes supported for receiving.
|
codesearchnet
|
def scores2recos(self, scores, candidates, rev=False):
sorted_indices = np.argsort(scores)
if rev:
sorted_indices = sorted_indices[::(- 1)]
return (candidates[sorted_indices], scores[sorted_indices])
|
Get recommendation list for a user u_index based on scores.
Args:
scores (numpy array; (n_target_items,)):
Scores for the target items. Smaller score indicates a promising item.
candidates (numpy array; (# target items, )): Target items' indices. Only these items are considered as the recommendation candidates.
rev (bool): If true, return items in an descending order. A ascending order (i.e., smaller scores are more promising) is default.
Returns:
(numpy array, numpy array) : (Sorted list of items, Sorted scores).
|
codesearchnet
|
def convert_outlook_msg(msg_bytes):
if (not is_outlook_msg(msg_bytes)):
raise ValueError('The supplied bytes are not an Outlook MSG file')
orig_dir = os.getcwd()
tmp_dir = tempfile.mkdtemp()
os.chdir(tmp_dir)
with open('sample.msg', 'wb') as msg_file:
msg_file.write(msg_bytes)
try:
subprocess.check_call(['msgconvert', 'sample.msg'], stdout=null_file, stderr=null_file)
eml_path = 'sample.eml'
with open(eml_path, 'rb') as eml_file:
rfc822 = eml_file.read()
except FileNotFoundError:
raise EmailParserError('Failed to convert Outlook MSG: msgconvert utility not found')
finally:
os.chdir(orig_dir)
shutil.rmtree(tmp_dir)
return rfc822
|
Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to
standard RFC 822 format
Args:
msg_bytes (bytes): the content of the .msg file
Returns:
A RFC 822 string
|
codesearchnet
|
def check_for_wdiff():
cmd = ['which', CMD_WDIFF]
DEVNULL = open(os.devnull, 'wb')
proc = sub.Popen(cmd, stdout=DEVNULL)
proc.wait()
DEVNULL.close()
if (proc.returncode != 0):
msg = "the `{}` command can't be found".format(CMD_WDIFF)
raise WdiffNotFoundError(msg)
|
Checks if the `wdiff` command can be found.
Raises:
WdiffNotFoundError: if ``wdiff`` is not found.
|
codesearchnet
|
def start(self, extra_args='', tag=''):
if self.started:
return
utils.create_dir(self.log_path)
if tag:
tag = (tag + ',')
out_file_name = 'IPerfServer,{},{}{}.log'.format(self.port, tag, len(self.log_files))
full_out_path = os.path.join(self.log_path, out_file_name)
cmd = ('%s %s > %s' % (self.iperf_str, extra_args, full_out_path))
self.iperf_process = utils.start_standing_subprocess(cmd, shell=True)
self.log_files.append(full_out_path)
self.started = True
|
Starts iperf server on specified port.
Args:
extra_args: A string representing extra arguments to start iperf
server with.
tag: Appended to log file name to identify logs from different
iperf runs.
|
codesearchnet
|
def find_container_traits(cls_or_string):
if utils.is_str(cls_or_string):
if not templates.is_instantiation(cls_or_string):
return None
name = templates.name(cls_or_string)
if name.startswith('std::'):
name = name[len('std::'):]
if name.startswith('std::tr1::'):
name = name[len('std::tr1::'):]
for cls_traits in all_container_traits:
if cls_traits.name() == name:
return cls_traits
else:
if isinstance(cls_or_string, class_declaration.class_types):
if cls_or_string.cache.container_traits is not None:
return cls_or_string.cache.container_traits
for cls_traits in all_container_traits:
if cls_traits.is_my_case(cls_or_string):
if isinstance(cls_or_string, class_declaration.class_types):
cls_or_string.cache.container_traits = cls_traits
return cls_traits
|
Find the container traits type of a declaration.
Args:
cls_or_string (str | declarations.declaration_t): a string
Returns:
declarations.container_traits: a container traits
|
juraj-google-style
|
def self(self) -> 'EFBChat':
self.chat_name = 'You'
self.chat_alias = None
self.chat_uid = EFBChat.SELF_ID
self.chat_type = ChatType.User
return self
|
Set the chat as yourself.
In this context, "yourself" means the user behind the master channel.
Every channel should relate this to the corresponding target.
Returns:
EFBChat: This object.
|
codesearchnet
|
def _DisableNetworkManager(self, interfaces, logger):
for interface in interfaces:
interface_config = os.path.join(
self.network_path, 'ifcfg-%s' % interface)
if os.path.exists(interface_config):
self._ModifyInterface(
interface_config, 'DEVICE', interface, replace=False)
self._ModifyInterface(
interface_config, 'NM_CONTROLLED', 'no', replace=True)
else:
with open(interface_config, 'w') as interface_file:
interface_content = [
'
'BOOTPROTO=none',
'DEFROUTE=no',
'DEVICE=%s' % interface,
'IPV6INIT=no',
'NM_CONTROLLED=no',
'NOZEROCONF=yes',
'',
]
interface_file.write('\n'.join(interface_content))
logger.info('Created config file for interface %s.', interface)
|
Disable network manager management on a list of network interfaces.
Args:
interfaces: list of string, the output device names enable.
logger: logger object, used to write to SysLog and serial port.
|
juraj-google-style
|
def all_near_zero_mod(a: Union[(float, complex, Iterable[float], np.ndarray)], period: float, *, atol: float=1e-08) -> bool:
b = (((np.asarray(a) + (period / 2)) % period) - (period / 2))
return np.all(np.less_equal(np.abs(b), atol))
|
Checks if the tensor's elements are all near multiples of the period.
Args:
a: Tensor of elements that could all be near multiples of the period.
period: The period, e.g. 2 pi when working in radians.
atol: Absolute tolerance.
|
codesearchnet
|
def send_state_event(self, event_type, content, state_key=""):
return self.client.api.send_state_event(
self.room_id,
event_type,
content,
state_key
)
|
Send a state event to the room.
Args:
event_type (str): The type of event that you are sending.
content (): An object with the content of the message.
state_key (str, optional): A unique key to identify the state.
|
juraj-google-style
|
def altcode(msg):
if df(msg) not in [0, 4, 16, 20]:
raise RuntimeError("Message must be Downlink Format 0, 4, 16, or 20.")
mbin = hex2bin(msg)
mbit = mbin[25]
qbit = mbin[27]
if mbit == '0':
if qbit == '1':
vbin = mbin[19:25] + mbin[26] + mbin[28:32]
alt = bin2int(vbin) * 25 - 1000
if qbit == '0':
C1 = mbin[19]
A1 = mbin[20]
C2 = mbin[21]
A2 = mbin[22]
C4 = mbin[23]
A4 = mbin[24]
B1 = mbin[26]
B2 = mbin[28]
D2 = mbin[29]
B4 = mbin[30]
D4 = mbin[31]
graystr = D2 + D4 + A1 + A2 + A4 + B1 + B2 + B4 + C1 + C2 + C4
alt = gray2alt(graystr)
if mbit == '1':
vbin = mbin[19:25] + mbin[26:31]
alt = int(bin2int(vbin) * 3.28084)
return alt
|
Computes the altitude from DF4 or DF20 message, bit 20-32.
credit: @fbyrkjeland
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
int: altitude in ft
|
juraj-google-style
|
def to_dict(mapreduce_yaml):
all_configs = []
for config in mapreduce_yaml.mapreduce:
out = {
"name": config.name,
"mapper_input_reader": config.mapper.input_reader,
"mapper_handler": config.mapper.handler,
}
if config.mapper.params_validator:
out["mapper_params_validator"] = config.mapper.params_validator
if config.mapper.params:
param_defaults = {}
for param in config.mapper.params:
param_defaults[param.name] = param.default or param.value
out["mapper_params"] = param_defaults
if config.params:
param_defaults = {}
for param in config.params:
param_defaults[param.name] = param.default or param.value
out["params"] = param_defaults
if config.mapper.output_writer:
out["mapper_output_writer"] = config.mapper.output_writer
all_configs.append(out)
return all_configs
|
Converts a MapReduceYaml file into a JSON-encodable dictionary.
For use in user-visible UI and internal methods for interfacing with
user code (like param validation). as a list
Args:
mapreduce_yaml: The Pyton representation of the mapreduce.yaml document.
Returns:
A list of configuration dictionaries.
|
juraj-google-style
|
def _get_colors(n):
import matplotlib.pyplot as plt
from matplotlib.colors import rgb2hex as r2h
from numpy import linspace
cols = linspace(0.05, 0.95, n)
cmap = plt.get_cmap('nipy_spectral')
return [r2h(cmap(i)) for i in cols]
|
Returns n unique and "evenly" spaced colors for the backgrounds
of the projects.
Args:
n (int): The number of unique colors wanted.
Returns:
colors (list of str): The colors in hex form.
|
codesearchnet
|
def format_unitary(mat, decimals=None):
num_basis = len(mat)
mat_complex = np.zeros((num_basis, num_basis), dtype=complex)
for i, vec in enumerate(mat):
mat_complex[i] = format_statevector(vec, decimals)
return mat_complex
|
Format unitary coming from the backend to present to the Qiskit user.
Args:
mat (list[list]): a list of list of [re, im] complex numbers
decimals (int): the number of decimals in the statevector.
If None, no rounding is done.
Returns:
list[list[complex]]: a matrix of complex numbers
|
juraj-google-style
|
def run(self):
return self._test_suite
|
Runs the dynamically generated test suite.
This method simply returns the test suite class created during
initialization. The test runner (e.g., unittest.main()) can then be used
to discover and run the tests within this suite.
Returns:
The dynamically created unittest.TestCase subclass.
|
github-repos
|
def _GetConfigValue(self, config_parser, section_name, value_name):
try:
return config_parser.get(section_name, value_name)
except configparser.NoOptionError:
return None
|
Retrieves a value from the config parser.
Args:
config_parser (ConfigParser): configuration parser.
section_name (str): name of the section that contains the value.
value_name (str): name of the value.
Returns:
object: configuration value or None if the value does not exists.
|
codesearchnet
|
def copy_pkg(self, filename, _):
basename = os.path.basename(filename)
self._copy(filename, os.path.join(self.connection['mount_point'], 'Packages', basename))
|
Copy a package to the repo's Package subdirectory.
Args:
filename: Path for file to copy.
_: Ignored. Used for compatibility with JDS repos.
|
codesearchnet
|
def run(data, base_logdir, session_id, group_id, hparams):
model = model_fn(hparams=hparams, seed=session_id)
logdir = os.path.join(base_logdir, session_id)
callback = tf.keras.callbacks.TensorBoard(
logdir,
update_freq=flags.FLAGS.summary_freq,
profile_batch=0,
)
hparams_callback = hp.KerasCallback(logdir, hparams, group_name=group_id)
((x_train, y_train), (x_test, y_test)) = data
result = model.fit(
x=x_train,
y=y_train,
epochs=flags.FLAGS.num_epochs,
shuffle=False,
validation_data=(x_test, y_test),
callbacks=[callback, hparams_callback],
)
|
Run a training/validation session.
Flags must have been parsed for this function to behave.
Args:
data: The data as loaded by `prepare_data()`.
base_logdir: The top-level logdir to which to write summary data.
session_id: A unique string ID for this session.
group_id: The string ID of the session group that includes this
session.
hparams: A dict mapping hyperparameters in `HPARAMS` to values.
|
juraj-google-style
|
def predict_features(self, df_features, df_target, idx=0, C=.1, **kwargs):
lsvc = LinearSVR(C=C).fit(df_features.values, df_target.values)
return np.abs(lsvc.coef_)
|
For one variable, predict its neighbouring nodes.
Args:
df_features (pandas.DataFrame):
df_target (pandas.Series):
idx (int): (optional) for printing purposes
kwargs (dict): additional options for algorithms
C (float): Penalty parameter of the error term
Returns:
list: scores of each feature relatively to the target
|
juraj-google-style
|
def _add_step(self, step):
self._closed()
self.has_workflow_step = self.has_workflow_step or step.is_workflow
self.wf_steps[step.name_in_workflow] = step
|
Add a step to the workflow.
Args:
step (Step): a step from the steps library.
|
juraj-google-style
|
def ensure_app_data_dir(appname, *args):
from ubelt import util_path
dpath = get_app_data_dir(appname, *args)
util_path.ensuredir(dpath)
return dpath
|
Calls `get_app_data_dir` but ensures the directory exists.
Args:
appname (str): the name of the application
*args: any other subdirectories may be specified
SeeAlso:
get_app_data_dir
Example:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_data_dir('ubelt')
>>> assert exists(dpath)
|
codesearchnet
|
def GetFailedTasks(self):
with self._lock:
return [task for task in self._tasks_abandoned.values() if (not task.has_retry)]
|
Retrieves all failed tasks.
Failed tasks are tasks that were abandoned and have no retry task once
the foreman is done processing.
Returns:
list[Task]: tasks.
|
codesearchnet
|
def decorate(fn):
if (not isfunction(fn)):
raise TypeError('paco: fn must be a callable object')
@functools.wraps(fn)
def decorator(*args, **kw):
for arg in args:
if iscoro_or_corofunc(arg):
return fn(*args, **kw)
if (len(args) and (args[0] is None)):
raise TypeError('paco: first argument cannot be empty')
def wrapper(coro, *_args, **_kw):
if (not iscoro_or_corofunc(coro)):
raise TypeError('paco: first argument must be a coroutine or coroutine function')
_args = ((coro,) + (args + _args))
kw.update(_kw)
return fn(*_args, **kw)
return wrapper
return decorator
|
Generic decorator for coroutines helper functions allowing
multiple variadic initialization arguments.
This function is intended to be used internally.
Arguments:
fn (function): target function to decorate.
Raises:
TypeError: if function or coroutine function is not provided.
Returns:
function: decorated function.
|
codesearchnet
|
def easeInOutCubic(n):
_checkRange(n)
n = (2 * n)
if (n < 1):
return (0.5 * (n ** 3))
else:
n = (n - 2)
return (0.5 * ((n ** 3) + 2))
|
A cubic tween function that accelerates, reaches the midpoint, and then decelerates.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
|
codesearchnet
|
def recipe_fred_series_to_bigquery(config, auth, fred_api_key, fred_series_id, fred_units, fred_frequency, fred_aggregation_method, project, dataset):
fred(config, {'auth': auth, 'api_key': fred_api_key, 'frequency': fred_frequency, 'series': [{'series_id': fred_series_id, 'units': fred_units, 'aggregation_method': fred_aggregation_method}], 'out': {'bigquery': {'project': project, 'dataset': dataset}}})
|
Download federal reserve series.
Args:
auth (authentication) - Credentials used for writing data.
fred_api_key (string) - 32 character alpha-numeric lowercase string.
fred_series_id (string) - Series ID to pull data from.
fred_units (choice) - A key that indicates a data value transformation.
fred_frequency (choice) - An optional parameter that indicates a lower frequency to aggregate values to.
fred_aggregation_method (choice) - A key that indicates the aggregation method used for frequency aggregation.
project (string) - Existing BigQuery project.
dataset (string) - Existing BigQuery dataset.
|
github-repos
|
def show_constant(val: types.BaseValue) -> str:
def _ellipsis_printer(v):
if isinstance(v, types.PythonConstant):
return v.str_of_constant(_ellipsis_printer)
return '...'
return _ellipsis_printer(val)
|
Pretty-print a value if it is a constant.
Recurses into a constant, printing the underlying Python value for constants
and just using "..." for everything else (e.g., Variables). This is useful
for generating clear error messages that show the exact values related to an
error while preventing implementation details from leaking into the message.
Args:
val: an abstract value.
Returns:
A string of the pretty-printed constant.
|
github-repos
|
def get_player_stats(self, player_key, board_key):
player_stats_url = self.api_path + 'player/' + player_key + '/league/' + board_key + '/stats/'
response = self.get_response(player_stats_url)
return response
|
Calling the Player Stats API
Args:
player_key: Key of the player
board_key: key of the board
Return:
json data
|
juraj-google-style
|
class XLNetPoolerEndLogits(nn.Module):
def __init__(self, config: XLNetConfig):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense_1 = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:
assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None'
if start_positions is not None:
slen, hsz = hidden_states.shape[-2:]
start_positions = start_positions[:, None, None].expand(-1, -1, hsz)
start_states = hidden_states.gather(-2, start_positions)
start_states = start_states.expand(-1, slen, -1)
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x).squeeze(-1)
if p_mask is not None:
if p_mask.dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e+30 * p_mask
return x
|
Compute SQuAD end logits from sequence hidden states.
Args:
config ([`XLNetConfig`]):
The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps`
to use.
|
github-repos
|
def update_ref(profile, ref, sha):
resource = "/refs/" + ref
payload = {"sha": sha}
data = api.patch_request(profile, resource, payload)
return prepare(data)
|
Point a ref to a new SHA.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
ref
The ref to update, e.g., ``heads/my-feature-branch``.
sha
The SHA of the commit to point the ref to.
Returns
A dict with data about the ref.
|
juraj-google-style
|
def _format_param_val(self, param_val):
if isinstance(param_val, list):
return ' '.join((str(x) for x in param_val))
else:
return str(param_val)
|
Internal method to format values in the packmol parameter dictionaries
Args:
param_val:
Some object to turn into String
Returns:
string representation of the object
|
codesearchnet
|
def Eq(left: str, right: str) -> BooleanTerm:
if left == right:
return TRUE
elif left > right:
return _Eq(left, right)
else:
return _Eq(right, left)
|
Create an equality or its simplified equivalent.
This will ensure that left > right. (For left == right, it'll just return
TRUE).
Args:
left: A string. Left side of the equality. This will get sorted, so it might
end up on the right.
right: A string. Right side of the equality. This will get sorted, so it
might end up on the left.
Returns:
A BooleanTerm.
|
github-repos
|
def get_attrs(obj: object) -> dict[str, object]:
attrs = {}
for k in dir(obj) + object.__dir__(obj):
if k in attrs:
continue
try:
v = getattr(obj, k)
except Exception as e:
v = ExceptionWrapper(e)
attrs[k] = v
return attrs
|
Parse all attributes from an object.
Limitation:
* Descriptor will be resolved, so all properties are executed (some can
have side effects, or take a lot of time to compute)
Args:
obj: Object to inspect
Returns:
Dict mapping attribute name to values.
|
github-repos
|
def grating_coupler_period(wavelength, n_eff, n_clad, incidence_angle_deg, diffration_order=1):
k0 = ((2.0 * np.pi) / wavelength)
beta = (n_eff.real * k0)
n_inc = n_clad
grating_period = (((2.0 * np.pi) * diffration_order) / (beta - ((k0 * n_inc) * np.sin(np.radians(incidence_angle_deg)))))
return grating_period
|
Calculate the period needed for a grating coupler.
Args:
wavelength (float): The target wavelength for the
grating coupler.
n_eff (float): The effective index of the mode
of a waveguide with the width of the grating
coupler.
n_clad (float): The refractive index of the cladding.
incidence_angle_deg (float): The incidence angle
the grating coupler should operate at [degrees].
diffration_order (int): The grating order the coupler
should work at. Default is 1st order (1).
Returns:
float: The period needed for the grating coupler
in the same units as the wavelength was given at.
|
codesearchnet
|
def add_spectrum(self, label, spectrum, color=None):
self._spectra[label] = spectrum
self.colors.append((color or self.colors_cycle[(len(self._spectra) % len(self.colors_cycle))]))
|
Adds a Spectrum for plotting.
Args:
label (str): Label for the Spectrum. Must be unique.
spectrum: Spectrum object
color (str): This is passed on to matplotlib. E.g., "k--" indicates
a dashed black line. If None, a color will be chosen based on
the default color cycle.
|
codesearchnet
|
def files(self, request, id):
gist = self.send(request, id).json()
return gist['files']
|
Returns a list of files in the gist
Arguments:
request: an initial request object
id: the gist identifier
Returns:
A list of the files
|
juraj-google-style
|
def ReadFromDirectory(self, artifacts_reader, path, extension='yaml'):
for artifact_definition in artifacts_reader.ReadDirectory(
path, extension=extension):
self.RegisterDefinition(artifact_definition)
|
Reads artifact definitions into the registry from files in a directory.
This function does not recurse sub directories.
Args:
artifacts_reader (ArtifactsReader): an artifacts reader.
path (str): path of the directory to read from.
extension (Optional[str]): extension of the filenames to read.
Raises:
KeyError: if a duplicate artifact definition is encountered.
|
juraj-google-style
|
def install(self, updates):
if (updates.count() == 0):
ret = {'Success': False, 'Updates': 'Nothing to install'}
return ret
installer = self._session.CreateUpdateInstaller()
self._session.ClientApplicationID = 'Salt: Install Update'
with salt.utils.winapi.Com():
install_list = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
ret = {'Updates': {}}
for update in updates.updates:
uid = update.Identity.UpdateID
ret['Updates'][uid] = {}
ret['Updates'][uid]['Title'] = update.Title
ret['Updates'][uid]['AlreadyInstalled'] = bool(update.IsInstalled)
if (not salt.utils.data.is_true(update.IsInstalled)):
log.debug('To Be Installed: %s', uid)
log.debug('\tTitle: %s', update.Title)
install_list.Add(update)
if (install_list.Count == 0):
ret = {'Success': True, 'Updates': 'Nothing to install'}
return ret
installer.Updates = install_list
try:
log.debug('Installing Updates')
result = installer.Install()
except pywintypes.com_error as error:
(hr, msg, exc, arg) = error.args
try:
failure_code = self.fail_codes[exc[5]]
except KeyError:
failure_code = 'Unknown Failure: {0}'.format(error)
log.error('Install Failed: %s', failure_code)
raise CommandExecutionError(failure_code)
result_code = {0: 'Installation Not Started', 1: 'Installation In Progress', 2: 'Installation Succeeded', 3: 'Installation Succeeded With Errors', 4: 'Installation Failed', 5: 'Installation Aborted'}
log.debug('Install Complete')
log.debug(result_code[result.ResultCode])
ret['Message'] = result_code[result.ResultCode]
if (result.ResultCode in [2, 3]):
ret['Success'] = True
ret['NeedsReboot'] = result.RebootRequired
log.debug('NeedsReboot: %s', result.RebootRequired)
else:
log.debug('Install Failed')
ret['Success'] = False
reboot = {0: 'Never Reboot', 1: 'Always Reboot', 2: 'Poss Reboot'}
for i in range(install_list.Count):
uid = install_list.Item(i).Identity.UpdateID
ret['Updates'][uid]['Result'] = result_code[result.GetUpdateResult(i).ResultCode]
ret['Updates'][uid]['RebootBehavior'] = reboot[install_list.Item(i).InstallationBehavior.RebootBehavior]
return ret
|
Install the updates passed in the updates collection. Load the updates
collection using the ``search`` or ``available`` functions. If the
updates need to be downloaded, use the ``download`` function.
Args:
updates (Updates): An instance of the Updates class containing a
the updates to be installed.
Returns:
dict: A dictionary containing the results of the installation
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
# install KB3195454
updates = wua.search('KB3195454')
results = wua.download(updates)
results = wua.install(updates)
|
codesearchnet
|
async def get(self, request):
ticket = await self.get_ticket(request)
if ticket is None:
return None
try:
now = time.time()
fields = self._ticket.validate(ticket, self._get_ip(request), now)
if (self._reissue_time is not None and
now >= (fields.valid_until - self._reissue_time)):
request[_REISSUE_KEY] = self._new_ticket(request, fields.user_id)
return fields.user_id
except TicketError as e:
return None
|
Gets the user_id for the request.
Gets the ticket for the request using the get_ticket() function, and
authenticates the ticket.
Args:
request: aiohttp Request object.
Returns:
The userid for the request, or None if the ticket is not
authenticated.
|
juraj-google-style
|
def _encode_required_fields(self, builder: expressions.Builder) -> List[validation_pb2.SqlRequirement]:
if not isinstance(builder.return_type, _fhir_path_data_types.StructureDataType):
return []
if builder.return_type.element_type == 'Extension':
return []
encoded_requirements: List[validation_pb2.SqlRequirement] = []
for name, desc_message in builder.return_type.iter_all_descendants():
containing_type_builder = builder
child_builder = containing_type_builder
paths = name.split('.')
for path in paths:
if isinstance(child_builder.return_type, _fhir_path_data_types.StructureDataType):
containing_type_builder = child_builder
child_builder = self._get_new_child_builder(child_builder, path)
if not child_builder:
break
if not child_builder:
continue
name = paths[-1]
requirement = self._encode_required_field(name, containing_type_builder, child_builder, desc_message)
if requirement:
encoded_requirements.append(requirement)
return encoded_requirements
|
Returns `SqlRequirement`s for all required fields in `ElementDefinition`.
Args:
builder: The builder containing the element to encode required fields for.
Returns:
A list of `SqlRequirement`s representing requirements generated from
required fields on the element.
|
github-repos
|
def _get_userprofile_from_registry(user, sid):
profile_dir = __utils__['reg.read_value']('HKEY_LOCAL_MACHINE', 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ProfileList\\{0}'.format(sid), 'ProfileImagePath')['vdata']
log.debug('user %s with sid=%s profile is located at "%s"', user, sid, profile_dir)
return profile_dir
|
In case net user doesn't return the userprofile we can get it from the
registry
Args:
user (str): The user name, used in debug message
sid (str): The sid to lookup in the registry
Returns:
str: Profile directory
|
codesearchnet
|
def _code_search(query, github_user=None):
github_client = temple.utils.GithubClient()
headers = {'Accept': 'application/vnd.github.v3.text-match+json'}
resp = github_client.get('/search/code', params={'q': query, 'per_page': 100}, headers=headers)
if ((resp.status_code == requests.codes.unprocessable_entity) and github_user):
raise temple.exceptions.InvalidGithubUserError('Invalid Github user or org - "{}"'.format(github_user))
resp.raise_for_status()
resp_data = resp.json()
repositories = collections.defaultdict(dict)
while True:
repositories.update({'git@github.com:{}.git'.format(repo['repository']['full_name']): repo['repository'] for repo in resp_data['items']})
next_url = _parse_link_header(resp.headers).get('next')
if next_url:
resp = requests.get(next_url, headers=headers)
resp.raise_for_status()
resp_data = resp.json()
else:
break
return repositories
|
Performs a Github API code search
Args:
query (str): The query sent to Github's code search
github_user (str, optional): The Github user being searched in the query string
Returns:
dict: A dictionary of repository information keyed on the git SSH url
Raises:
`InvalidGithubUserError`: When ``github_user`` is invalid
|
codesearchnet
|
def _prefix_from_ip_int(self, ip_int):
trailing_zeroes = _count_righthand_zero_bits(ip_int,
self._max_prefixlen)
prefixlen = self._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = self._max_prefixlen
details = _int_to_bytes(ip_int, byteslen, 'big')
msg = 'Netmask pattern %r mixes zeroes & ones'
raise ValueError(msg % details)
return prefixlen
|
Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones
|
juraj-google-style
|
def __init__(self, connect_func, max_size=10):
self.connect_func = connect_func
self.limiter = threading.BoundedSemaphore(max_size)
self.idle_conns = []
self.closed = False
|
Creates a ConnectionPool.
Args:
connect_func: A closure which returns a new connection to the underlying
database, i.e. a MySQLdb.Connection. Should raise or block if the
database is unavailable.
max_size: The maximum number of simultaneous connections.
|
juraj-google-style
|
def top_kth_iterative(x, k):
def next_x(cur_x, _):
top_x = tf.reduce_max(cur_x, axis=-1, keep_dims=True)
return cur_x * to_float(cur_x < top_x)
fin_x = tf.foldl(next_x, tf.range(k - 1), initializer=tf.stop_gradient(x),
parallel_iterations=2, back_prop=False)
return tf.stop_gradient(tf.reduce_max(fin_x, axis=-1, keep_dims=True))
|
Compute the k-th top element of x on the last axis iteratively.
This assumes values in x are non-negative, rescale if needed.
It is often faster than tf.nn.top_k for small k, especially if k < 30.
Note: this does not support back-propagation, it stops gradients!
Args:
x: a Tensor of non-negative numbers of type float.
k: a python integer.
Returns:
a float tensor of the same shape as x but with 1 on the last axis
that contains the k-th largest number in x.
|
juraj-google-style
|
def bind(self, devices_to_bind):
if self.entity_api_key == "":
return {'status': 'failure', 'response': 'No API key found in request'}
url = self.base_url + "api/0.1.0/subscribe/bind"
headers = {"apikey": self.entity_api_key}
data = {
"exchange": "amq.topic",
"keys": devices_to_bind,
"queue": self.entity_id
}
with self.no_ssl_verification():
r = requests.post(url, json=data, headers=headers)
response = dict()
if "No API key" in str(r.content.decode("utf-8")):
response["status"] = "failure"
r = json.loads(r.content.decode("utf-8"))['message']
elif 'bind queue ok' in str(r.content.decode("utf-8")):
response["status"] = "success"
r = r.content.decode("utf-8")
else:
response["status"] = "failure"
r = r.content.decode("utf-8")
response["response"] = str(r)
return response
|
This function allows an entity to list the devices to subscribe for data. This function must be called
at least once, before doing a subscribe. Subscribe function will listen to devices that are bound here.
Args:
devices_to_bind (list): an array of devices to listen to.
Example bind(["test100","testDemo"])
|
juraj-google-style
|
def run(self, dag):
if self.layout is None:
if self.property_set["layout"]:
self.layout = self.property_set["layout"]
else:
self.layout = Layout.generate_trivial_layout(*dag.qregs.values())
self.property_set['is_swap_mapped'] = True
for gate in dag.twoQ_gates():
physical_q0 = self.layout[gate.qargs[0]]
physical_q1 = self.layout[gate.qargs[1]]
if self.coupling_map.distance(physical_q0, physical_q1) != 1:
self.property_set['is_swap_mapped'] = False
return
|
If `dag` is mapped to `coupling_map`, the property
`is_swap_mapped` is set to True (or to False otherwise).
Args:
dag (DAGCircuit): DAG to map.
|
juraj-google-style
|
def Normalize(self, fraction=1.0):
if self.log:
raise ValueError('Pmf is under a log transform')
total = self.Total()
if (total == 0.0):
raise ValueError('total probability is zero.')
logging.warning('Normalize: total probability is zero.')
return total
factor = (float(fraction) / total)
for x in self.d:
self.d[x] *= factor
return total
|
Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
|
codesearchnet
|
def _get_bucket_attribute(bucket, query_param, xml_response_tag, retry_params=None, _account_id=None):
api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id)
common.validate_bucket_path(bucket)
(status, headers, content) = api.get_bucket(('%s?%s' % (bucket, query_param)))
errors.check_status(status, [200], bucket, resp_headers=headers, body=content)
root = ET.fromstring(content)
if ((root.tag == xml_response_tag) and root.text):
return root.text
return None
|
Helper method to request a bucket parameter and parse the response.
Args:
bucket: A Google Cloud Storage bucket of form '/bucket'.
query_param: The query parameter to include in the get bucket request.
xml_response_tag: The expected tag in the xml response.
retry_params: An api_utils.RetryParams for this call to GCS. If None,
the default one is used.
_account_id: Internal-use only.
Returns:
The xml value as a string. None if the returned xml does not match expected
format.
Raises:
errors.AuthorizationError: if authorization failed.
errors.NotFoundError: if the bucket does not exist.
|
codesearchnet
|
def create_sequence_pretty_tensor(sequence_input, shape=None, save_state=True):
inputs = prettytensor.wrap_sequence(sequence_input.inputs, tensor_shape=shape)
targets = prettytensor.wrap_sequence(sequence_input.targets)
if save_state:
bookkeeper.set_recurrent_state_saver(sequence_input)
return inputs, targets
|
Creates a PrettyTensor object for the given sequence.
The first dimension is treated as a time-dimension * batch and a default is
set for `unroll` and `state_saver`.
TODO(eiderman): Remove shape.
Args:
sequence_input: A SequenceInput or StateSavingSequenceInput
shape: The shape of each item in the sequence (including batch).
save_state: If true, use the sequence_input's state and save_state methods.
Returns:
2 Layers: inputs, targets
|
juraj-google-style
|
def pow(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"pow", other, axis=axis, level=level, fill_value=fill_value
)
|
Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied.
|
juraj-google-style
|
def read(self, key):
key = quote(key, safe='~')
url = '/internal/playbooks/keyValue/{}'.format(key)
r = self.tcex.session.get(url)
data = r.content
if ((data is not None) and (not isinstance(data, str))):
data = str(r.content, 'utf-8')
return data
|
Read data from remote KV store for the provided key.
Args:
key (string): The key to read in remote KV store.
Returns:
(any): The response data from the remote KV store.
|
codesearchnet
|
def __init__(self, prefs, g, kappa=2.0, omega=0.5, beta=1.0, mu=1.0,
freeparams=['kappa', 'omega', 'beta', 'mu']):
_checkParam('g', g, self.PARAMLIMITS, self.PARAMTYPES)
assert abs(1 - g.sum()) <= ALMOST_ZERO, "g doesn't sum to 1"
self.g = g.copy()
self.g /= self.g.sum()
super(ExpCM_empirical_phi, self).__init__(prefs, kappa=kappa,
omega=omega, beta=beta, mu=mu, freeparams=freeparams)
|
Initialize an `ExpCM_empirical_phi` object.
Args:
`prefs`, `kappa`, `omega`, `beta`, `mu`, `freeparams`
Same meaning as for an `ExpCM`
`g`
Has the meaning described in the main class doc string.
|
juraj-google-style
|
def get_section_header(self, section):
self._ensure_section_headers_loaded()
if (type(section) is int):
return self._section_headers_by_index[section]
else:
return self._section_headers_by_name[section]
|
Get a specific section header by index or name.
Args:
section(int or str): The index or name of the section header to return.
Returns:
:class:`~ELF.SectionHeader`: The section header.
Raises:
KeyError: The requested section header does not exist.
|
codesearchnet
|
def export_warnings(self, export_file):
warn_filepath = op.dirname(export_file)
warn_filename = op.splitext(op.basename(export_file))[0]
self._add_entry(templates.EXPORT_WARNINGS.format(warnings_export_path=warn_filepath, warnings_export_file=warn_filename))
|
Append an export warnings entry to the journal.
This instructs Revit to export warnings from the opened model.
Currently Revit will stop journal execution if the model does not
have any warnings and the export warnings UI button is disabled.
Args:
export_file (str): full path of the ouput html file
|
codesearchnet
|
def rank_dated_files(pattern, dir, descending=True):
files = glob.glob(op.join(dir, pattern))
return sorted(files, reverse=descending)
|
Search a directory for files that match a pattern. Return an ordered list of these files by filename.
Args:
pattern: The glob pattern to search for.
dir: Path to directory where the files will be searched for.
descending: Default True, will sort alphabetically by descending order.
Returns:
list: Rank-ordered list by filename.
|
codesearchnet
|
def nextindx(self):
indx = 0
with s_lmdbslab.Scan(self.slab, self.db) as curs:
last_key = curs.last_key()
if (last_key is not None):
indx = (s_common.int64un(last_key) + 1)
return indx
|
Determine the next insert offset according to storage.
Returns:
int: The next insert offset.
|
codesearchnet
|
def _preprocess_numpy_input(x, data_format, mode):
if not issubclass(x.dtype.type, np.floating):
x = x.astype(backend.floatx(), copy=False)
if mode == 'tf':
x /= 127.5
x -= 1.0
return x
elif mode == 'torch':
x /= 255.0
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
else:
if data_format == 'channels_first':
if len(x.shape) == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
if data_format == 'channels_first':
if len(x.shape) == 3:
x[0, :, :] -= mean[0]
x[1, :, :] -= mean[1]
x[2, :, :] -= mean[2]
if std is not None:
x[0, :, :] /= std[0]
x[1, :, :] /= std[1]
x[2, :, :] /= std[2]
else:
x[:, 0, :, :] -= mean[0]
x[:, 1, :, :] -= mean[1]
x[:, 2, :, :] -= mean[2]
if std is not None:
x[:, 0, :, :] /= std[0]
x[:, 1, :, :] /= std[1]
x[:, 2, :, :] /= std[2]
else:
x[..., 0] -= mean[0]
x[..., 1] -= mean[1]
x[..., 2] -= mean[2]
if std is not None:
x[..., 0] /= std[0]
x[..., 1] /= std[1]
x[..., 2] /= std[2]
return x
|
Preprocesses a NumPy array encoding a batch of images.
Args:
x: Input array, 3D or 4D.
data_format: Data format of the image array.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
Returns:
Preprocessed Numpy array.
|
github-repos
|
async def post(self):
logging.debug('\n\n[+] -- Account debugging. ')
if settings.SIGNATURE_VERIFICATION:
super().verify()
try:
data = json.loads(self.request.body)
except:
self.set_status(400)
self.write({'error': 400, 'reason': 'Unexpected data format. JSON required'})
raise tornado.web.Finish
message = data['message']
new_account = (await self.account.createaccount(**data))
logging.debug('\n\n [+] -- New account debugging.')
logging.debug(new_account['id'])
if ('error' in new_account.keys()):
self.set_status(new_account['error'])
self.write(new_account)
raise tornado.web.Finish
wallets = (await self.account.balance.get_wallets(uid=new_account['id']))
if isinstance(wallets, dict):
if ('error' in wallets.keys()):
self.set_status(wallets['error'])
self.write(wallets)
raise tornado.web.Finish
new_account.update({'href': ((settings.ENDPOINTS['ams'] + '/') + new_account['public_key']), 'wallets': json.dumps(wallets['wallets'])})
if new_account.get('email'):
email_data = {'to': new_account['email'], 'subject': 'Robin8 Support', 'optional': (('Your account was created on %s' % settings.domain) + new_account['href'])}
(await self.account.mailer.sendmail(**email_data))
self.write(new_account)
|
Creates new account
Accepts:
- message (signed dict):
- "device_id" - str
- "email" - str
- "phone" - str
- "public_key" - str
- "signature" - str
Returns:
dictionary with following fields:
- "device_id" - str
- "phone" - str
- "public_key" - str
- "count" - int ( wallets amount )
- "level" - int (2 by default)
- "news_count" - int (0 by default)
- "email" - str
- "href" - str
- "wallets" - list
Verified: True
|
codesearchnet
|
def set_membership(self, room_id, user_id, membership, reason="", profile=None,
timestamp=None):
if profile is None:
profile = {}
body = {
"membership": membership,
"reason": reason
}
if 'displayname' in profile:
body["displayname"] = profile["displayname"]
if 'avatar_url' in profile:
body["avatar_url"] = profile["avatar_url"]
return self.send_state_event(room_id, "m.room.member", body, state_key=user_id,
timestamp=timestamp)
|
Perform PUT /rooms/$room_id/state/m.room.member/$user_id
Args:
room_id (str): The room ID
user_id (str): The user ID
membership (str): New membership value
reason (str): The reason
timestamp (int): Set origin_server_ts (For application services only)
|
juraj-google-style
|
def __sid_to_username(sid):
if sid is None or sid == '':
return ''
try:
sid_bin = win32security.GetBinarySid(sid)
except pywintypes.error as exc:
raise ValueError(
'pkg: Software owned by {0} is not valid: [{1}] {2}'.format(sid, exc.winerror, exc.strerror)
)
try:
name, domain, _account_type = win32security.LookupAccountSid(None, sid_bin)
user_name = '{0}\\{1}'.format(domain, name)
except pywintypes.error as exc:
if exc.winerror == winerror.ERROR_NONE_MAPPED:
return sid
else:
raise ValueError(
'Failed looking up sid \'{0}\' username: [{1}] {2}'.format(sid, exc.winerror, exc.strerror)
)
try:
user_principal = win32security.TranslateName(
user_name,
win32api.NameSamCompatible,
win32api.NameUserPrincipal)
except pywintypes.error as exc:
if exc.winerror in (winerror.ERROR_NO_SUCH_DOMAIN,
winerror.ERROR_INVALID_DOMAINNAME,
winerror.ERROR_NONE_MAPPED):
return '{0}@{1}'.format(name.lower(), domain.lower())
else:
raise
return user_principal
|
Provided with a valid Windows Security Identifier (SID) and returns a Username
Args:
sid (str): Security Identifier (SID).
Returns:
str: Username in the format of username@realm or username@computer.
|
juraj-google-style
|
def set_hasher(self, hash, rounds=None):
hash = hash.replace('-', '_')
if (hash not in VALID_HASHERS):
raise WrongHashAlgorithm(WRONG_HASH_MESSAGE)
hasher = getattr(ph, hash)
utils.test_hasher(hasher)
default_rounds = getattr(hasher, 'default_rounds', 1)
min_rounds = getattr(hasher, 'min_rounds', 1)
max_rounds = getattr(hasher, 'max_rounds', float('inf'))
rounds = min(max((rounds or default_rounds), min_rounds), max_rounds)
op = {'schemes': (VALID_HASHERS + DEPRECATED_HASHERS), 'deprecated': DEPRECATED_HASHERS, 'default': hash, (hash + '__default_rounds'): rounds}
self.hasher = CryptContext(**op)
self.hash = hash.replace('_', '-')
self.rounds = rounds
|
Updates the has algorithm and, optionally, the number of rounds
to use.
Raises:
`~WrongHashAlgorithm` if new algorithm isn't one of the three
recomended options.
|
codesearchnet
|
def _open_script_interface(self, connection_id, callback):
try:
context = self.connections.get_context(connection_id)
except ArgumentError:
callback(connection_id, self.id, False, "Could not find connection information")
return
success = HighSpeedChar in context['services'][TileBusService]
reason = None
if not success:
reason = 'Could not find high speed streaming characteristic'
callback(connection_id, self.id, success, reason)
|
Enable script streaming interface for this IOTile device
Args:
connection_id (int): The unique identifier for the connection
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason)
|
juraj-google-style
|
def _get_values(self, data_blob, dtype_enum, shape_string):
buf = np.frombuffer(data_blob, dtype=tf.DType(dtype_enum).as_numpy_dtype)
return buf.reshape([int(i) for i in shape_string.split(',')]).tolist()
|
Obtains values for histogram data given blob and dtype enum.
Args:
data_blob: The blob obtained from the database.
dtype_enum: The enum representing the dtype.
shape_string: A comma-separated string of numbers denoting shape.
Returns:
The histogram values as a list served to the frontend.
|
juraj-google-style
|
def send_log_messages(self, messages: List[LogMessage]) -> None:
pass
|
Sends multiple log messages to be handled.
Args:
* messages: list of LogMessage dictionaries
Returns:
* None
|
github-repos
|
def resolve_backend_name(name, backends, deprecated, aliased):
available = [backend.name() for backend in backends]
resolved_name = deprecated.get(name, aliased.get(name, name))
if isinstance(resolved_name, list):
resolved_name = next((b for b in resolved_name if (b in available)), '')
if (resolved_name not in available):
raise LookupError("backend '{}' not found.".format(name))
if (name in deprecated):
logger.warning("WARNING: '%s' is deprecated. Use '%s'.", name, resolved_name)
return resolved_name
|
Resolve backend name from a deprecated name or an alias.
A group will be resolved in order of member priorities, depending on
availability.
Args:
name (str): name of backend to resolve
backends (list[BaseBackend]): list of available backends.
deprecated (dict[str: str]): dict of deprecated names.
aliased (dict[str: list[str]]): dict of aliased names.
Returns:
str: resolved name (name of an available backend)
Raises:
LookupError: if name cannot be resolved through regular available
names, nor deprecated, nor alias names.
|
codesearchnet
|
def _reconstruct_sequence_inputs(op_def, inputs, attrs) -> list[Union[tensor_lib.Tensor, list[tensor_lib.Tensor]]]:
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
|
Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
|
github-repos
|
def get_create_agent(agent_kwargs):
def create_agent(sess, environment, summary_writer=None):
'Creates a DQN agent.\n\n Simplified version of `dopamine.discrete_domains.train.create_agent`\n\n Args:\n sess: a session\n environment: an environment\n summary_writer: a summary writer.\n\n Returns:\n a DQN agent.\n '
return BatchDQNAgent(env_batch_size=environment.batch_size, sess=sess, num_actions=environment.action_space.n, summary_writer=summary_writer, tf_device='/gpu:*', **agent_kwargs)
return create_agent
|
Factory for dopamine agent initialization.
Args:
agent_kwargs: dict of BatchDQNAgent parameters
Returns:
Function(sess, environment, summary_writer) -> BatchDQNAgent instance.
|
codesearchnet
|
def with_attributes(name, checkpointable_objects=None, functions=None, copy_from=None):
checkpointable_objects = checkpointable_objects or []
functions = functions or []
if copy_from is not None:
for cls in copy_from:
checkpointable_objects.extend(cls.all_checkpointable_objects)
functions.extend(cls.all_functions)
classdict = {'all_checkpointable_objects': set(checkpointable_objects), 'all_functions': set(functions)}
return type(name, (SerializedAttributes,), classdict)
|
Creates a subclass with all attributes as specified in the arguments.
Args:
name: Name of subclass
checkpointable_objects: List of checkpointable objects to be serialized
in the SavedModel.
functions: List of functions to be serialized in the SavedModel.
copy_from: List of other SerializedAttributes subclasses. The returned
class will copy checkpoint objects/functions from each subclass.
Returns:
Child class with attributes as defined in the `checkpointable_objects`
and `functions` lists.
|
github-repos
|
def _ParseRecordLogline(self, parser_mediator, structure):
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds()
try:
datetime_iso8601 = self._GetISO8601String(structure.date_time)
date_time.CopyFromStringISO8601(datetime_iso8601)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
event_data = GoogleDriveSyncLogEventData()
event_data.log_level = structure.log_level
event_data.pid = structure.pid
event_data.thread = structure.thread
event_data.source_code = structure.source_code
event_data.message = structure.message.replace('\n', ' ')
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a logline record structure and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
|
juraj-google-style
|
def _shard_num_args(self, constant_dict: Dict[(str, Any)]=None) -> List[Dict[(str, Any)]]:
args = []
for shard_num in range(self._num_shards):
append_dict = (dict(constant_dict) if constant_dict else {})
append_dict['shard_num'] = shard_num
append_dict['num_shards'] = self._num_shards
append_dict['num_shard_qubits'] = self._num_shard_qubits
append_dict.update(self._shared_mem_dict)
args.append(append_dict)
return args
|
Helper that returns a list of dicts including a num_shard entry.
The dict for each entry also includes shared_mem_dict, the number of
shards, the number of shard qubits, and the supplied constant dict.
Args:
constant_dict: Dictionary that will be updated to every element of
the returned list of dictionaries.
Returns:
A list of dictionaries. Each dictionary is constant except for the
'shard_num' key which ranges from 0 to number of shards - 1.
Included keys are 'num_shards' and 'num_shard_qubits' along with
all the keys in constant_dict.
|
codesearchnet
|
def _async_open(self, session_id, proto_version):
try:
(yield self.application_context.create_session_if_needed(session_id, self.request))
session = self.application_context.get_session(session_id)
protocol = Protocol(proto_version)
self.receiver = Receiver(protocol)
log.debug('Receiver created for %r', protocol)
self.handler = ProtocolHandler()
log.debug('ProtocolHandler created for %r', protocol)
self.connection = self.application.new_connection(protocol, self, self.application_context, session)
log.info('ServerConnection created')
except ProtocolError as e:
log.error('Could not create new server session, reason: %s', e)
self.close()
raise e
msg = self.connection.protocol.create('ACK')
(yield self.send_message(msg))
raise gen.Return(None)
|
Perform the specific steps needed to open a connection to a Bokeh session
Specifically, this method coordinates:
* Getting a session for a session ID (creating a new one if needed)
* Creating a protocol receiver and hander
* Opening a new ServerConnection and sending it an ACK
Args:
session_id (str) :
A session ID to for a session to connect to
If no session exists with the given ID, a new session is made
proto_version (str):
The protocol version requested by the connecting client.
Returns:
None
|
codesearchnet
|
def _concatenate_inner(self, direction):
tmp_bucket = []
source_chunks = (self if direction else self[::(- 1)])
target_chunks = ChunkList()
for chunk in source_chunks:
if ((chunk.dependency == direction) or ((direction is False) and chunk.is_space())):
tmp_bucket.append(chunk)
continue
tmp_bucket.append(chunk)
if (not direction):
tmp_bucket = tmp_bucket[::(- 1)]
new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])
new_chunk = Chunk(new_word, pos=chunk.pos, label=chunk.label, dependency=chunk.dependency)
target_chunks.append(new_chunk)
tmp_bucket = ChunkList()
if tmp_bucket:
target_chunks += tmp_bucket
if (not direction):
target_chunks = target_chunks[::(- 1)]
self.list = target_chunks
|
Concatenates chunks based on each chunk's dependency.
Args:
direction (bool): Direction of concatenation process. True for forward.
|
codesearchnet
|
def get_strategy() -> 'StrategyBase':
return _get_per_thread_mode().strategy
|
Returns the current `tf.distribute.Strategy` object.
Typically only used in a cross-replica context:
```
if tf.distribute.in_cross_replica_context():
strategy = tf.distribute.get_strategy()
...
```
Returns:
A `tf.distribute.Strategy` object. Inside a `with strategy.scope()` block,
it returns `strategy`, otherwise it returns the default (single-replica)
`tf.distribute.Strategy` object.
|
github-repos
|
def _transpile_circuit(circuit_config_tuple):
(circuit, transpile_config) = circuit_config_tuple
if transpile_config.pass_manager:
pass_manager = transpile_config.pass_manager
elif transpile_config.coupling_map:
pass_manager = default_pass_manager(transpile_config.basis_gates, transpile_config.coupling_map, transpile_config.initial_layout, transpile_config.seed_transpiler)
else:
pass_manager = default_pass_manager_simulator(transpile_config.basis_gates)
return pass_manager.run(circuit)
|
Select a PassManager and run a single circuit through it.
Args:
circuit_config_tuple (tuple):
circuit (QuantumCircuit): circuit to transpile
transpile_config (TranspileConfig): configuration dictating how to transpile
Returns:
QuantumCircuit: transpiled circuit
|
codesearchnet
|
def _avro_rows(block, avro_schema):
blockio = six.BytesIO(block.avro_rows.serialized_binary_rows)
while True:
try:
(yield fastavro.schemaless_reader(blockio, avro_schema))
except StopIteration:
break
|
Parse all rows in a stream block.
Args:
block ( \
~google.cloud.bigquery_storage_v1beta1.types.ReadRowsResponse \
):
A block containing Avro bytes to parse into rows.
avro_schema (fastavro.schema):
A parsed Avro schema, used to deserialized the bytes in the
block.
Returns:
Iterable[Mapping]:
A sequence of rows, represented as dictionaries.
|
codesearchnet
|
def can_fetch(self, request: Request, file=None) -> bool:
try:
return self.can_fetch_pool(request)
except NotInPoolError:
pass
(yield from self.fetch_robots_txt(request, file=file))
return self.can_fetch_pool(request)
|
Return whether the request can fetched.
Args:
request: Request.
file: A file object to where the robots.txt contents are written.
Coroutine.
|
codesearchnet
|
def append(
self,
moment_or_operation_tree: Union[ops.Moment, ops.OP_TREE],
strategy: InsertStrategy = InsertStrategy.EARLIEST):
self.insert(len(self._moments), moment_or_operation_tree, strategy)
|
Appends operations onto the end of the circuit.
Moments within the operation tree are appended intact.
Args:
moment_or_operation_tree: The moment or operation tree to append.
strategy: How to pick/create the moment to put operations into.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.