content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def _set_default_construction_id(construction_id: int, subcategory_id: int) -> int:
"""Set the default construction ID for switches.
:param construction_id: the current construction ID.
:param subcategory_id: the subcategory ID of the switch with missing defaults.
:return: _construction_id
:rtype: int
"""
if construction_id > 0:
return construction_id
try:
return {
1: 1,
2: 1,
}[subcategory_id]
except KeyError:
return 0
|
57e9544a3bca0c443fe89c38a9960fd9d4797ac2
| 232,644 |
import json
def index_by_page_id(s):
"""Pair a dict with a page_id key, pair the page_id with the dict.
Args:
s: a dict as a JSON document.
Returns:
a tuple of page_id, and parsed Python dict.
"""
d = json.loads(s)
return (d['page_id'], d)
|
a960a645d56864edec7c040795e2ae5f71a26bf0
| 583,902 |
def dependency_mapping(dep):
"""
+-----------+-----------------------------------+
| EGrid Tag | Dependency Tag |
+===========+===================================+
| S | nsub, csubj, csubjpass, dsubjpass |
+-----------+-----------------------------------+
| O | iobj, obj, pobj, dobj |
+-----------+-----------------------------------+
| X | For any other dependency tag |
+-----------+-----------------------------------+
"""
S = ['nsubj', 'csubj', 'csubjpass', 'dsubjpass']
O = ['iobj', 'obj', 'pobj', 'dobj']
if S.count(dep) == 1:
return 'S'
if O.count(dep) == 1:
return 'O'
return 'X'
|
065ff113142a0014e1f7c033c0027ede73225ac0
| 125,282 |
def as_list(x):
"""Convert x to a list if it is an iterable; otherwise, wrap it in a list."""
try:
return list(x)
except TypeError:
return [x]
|
b44b22ae624badeae3ae1a3061f12e17f0cb66d6
| 432,830 |
def get_flattened_qconfig_dict(qconfig_dict):
""" flatten the global, object_type and module_name qconfig
to the same qconfig_dict so that it can be used by
propagate_qconfig_ function.
"module_name_regex" is ignored for now since it's not supported
in propagate_qconfig_, but it can be fixed later.
For example:
Input: {
"": qconfig,
"object_type": [
(torch.add, qconfig)
],
"module_name": [
("conv", qconfig)
]
}
Output: {
"": qconfig,
torch.add: qconfig,
"conv": qconfig
}
"""
flattened = dict()
if '' in qconfig_dict:
flattened[''] = qconfig_dict['']
def flatten_key(key):
if key in qconfig_dict:
for obj, qconfig in qconfig_dict[key]:
flattened[obj] = qconfig
flatten_key('object_type')
flatten_key('module_name')
return flattened
|
abcba255a69345a9fe1d2d09ddde0b51f2a6abcd
| 300,318 |
def apply_ants_transform_to_point(transform, point):
"""
Apply transform to a point
ANTsR function: `applyAntsrTransformToPoint`
Arguments
---------
point : list/tuple
point to which the transform will be applied
Returns
-------
tuple : transformed point
Example
-------
>>> import ants
>>> tx = ants.new_ants_transform()
>>> params = tx.parameters
>>> tx.set_parameters(params*2)
>>> pt2 = tx.apply_to_point((1,2,3)) # should be (2,4,6)
"""
return transform.apply_to_point(point)
|
87ebb8c562c3b3c8ed297efac652a452f708f3e3
| 673,095 |
import re
def clean_column(s):
""" utils function that clean a string to be a cleaner name of column
Parameter
---------
s : string
the string to clean
Return
------
cleaned string
"""
if s is None:
return None
r = s.strip().lower()
r = re.sub(r"[?\(\)/\[\]\\]", "", r)
r = re.sub(r"[:' \-\.\n]", "_", r)
r = re.sub("_+", "_", r)
r = r.replace("#", "number")
r = r.replace("%", "pct")
r = r.replace("$", "usd")
r = r.replace("&", "_and_")
r = r.replace("€", "eur")
r = r.replace("£", "gbp")
r = r.replace("¥", "jpy")
return r
|
dfb26fa641738729d4d4c3dc3d1d16e32f575fc3
| 672,098 |
def from_dict(x: dict) -> int:
""" Converts a dictionary containing the prime factors and powers of an integer into it's integer form."""
res = 1
for p, r in x.items(): res *= p**r
return res
|
8fab9139694b5fb19cc2a3e3b3ca08e136c32e32
| 575,325 |
def scale(val, minx, maxx, minscale, maxscale):
""" Scales a value in one range to another. """
# https://stackoverflow.com/a/5295202
return (maxscale - minscale) * (val - minx) / maxx - minx + minscale
|
0768fa72421a185aacb2bf78c04326422fb4a470
| 642,942 |
def filename_to_image_name_tag(filename):
""" Get image name and tag using image filename"""
filename = filename.replace(".docker", "")
name_tag_list = filename.rsplit("-", 1)
name = name_tag_list[0].replace("-", "/", 1)
tag = name_tag_list[1]
name_and_tag = name + ":" + tag
return name_and_tag
|
4c633ec81e5aa9ae1e7f0e35307bc556c1294e1d
| 226,563 |
def setup_element(format_element, element_count, max_element_count):
"""Interpolate element specific setup string."""
return format_element.format(
max_element_count=max_element_count, element_count=element_count
)
|
9b4e74abfa2361d83180a2ae6a83c7a5901efad3
| 599,049 |
def encode_public_key(key):
"""Encodes a public RSA key in ASN.1 format as defined by x.509"""
return key.publickey().exportKey(format="DER")
|
321bf44954b86bb4750116b1f93f36ae17902725
| 537,007 |
def _trace_dense(op): # pragma: no cover
"""Trace of a dense operator.
"""
x = 0.0
for i in range(op.shape[0]):
x += op[i, i]
return x
|
e309d74c5e39834eb3c4d7382172ba0fd71b7130
| 40,594 |
def get_hr (point):
"""
Returns the heart rate exercise at the GPX point. Heart rate is provided by the GPX equipment.
If not exist 0 is returned
Parameters
----------
point: point where to get prower
"""
for extension in point.extensions:
if extension.tag != 'power':
for exten in extension:
if exten.tag == '{http://www.garmin.com/xmlschemas/TrackPointExtension/v1}hr':
return int(exten.text)
return 0
|
2932be6397b8cea41257ef0999b16e3b5d42053e
| 552,779 |
def _pysymbol_key(name):
"""Return a sortable key index for name.
Sorting is case-insensitive, with the first underscore counting as
worse than any character, but subsequent underscores do not. This
means that dunder symbols (like __init__) are sorted after symbols
that start with an alphabetic character, but before those that
start with only a single underscore.
"""
if name.startswith("_"):
name = "~" + name[1:]
return name.lower()
|
0eca4738a3702f79337852f13cc62b45d5348cf5
| 603,106 |
def format_run_status(run, default='-'):
"""common formatting success boolean field"""
if not run:
return default
return run.success
|
35f9c3cae32cf0a0082ff9490fb9cd4de19647b6
| 572,085 |
import logging
from datetime import datetime
def TimedCommand(functor, *args, **kwargs):
"""Wrapper for simple log timing of other python functions.
If you want to log info about how long it took to run an arbitrary command,
you would do something like:
TimedCommand(RunCommand, ['wget', 'http://foo'])
Args:
functor: The function to run.
args: The args to pass to the function.
kwargs: Optional args to pass to the function.
timed_log_level: The log level to use (defaults to logging.INFO).
timed_log_msg: The message to log after the command completes. It may have
keywords: "name" (the function name), "args" (the args passed to the
func), "kwargs" (the kwargs passed to the func), "ret" (the return value
from the func), and "delta" (the timing delta).
timed_log_callback: Function to call upon completion (instead of logging).
Will be passed (log_level, log_msg, result, datetime.timedelta).
"""
log_msg = kwargs.pop(
'timed_log_msg',
'%(name)s(*%(args)r, **%(kwargs)r)=%(ret)s took: %(delta)s')
log_level = kwargs.pop('timed_log_level', logging.INFO)
log_callback = kwargs.pop('timed_log_callback', None)
start = datetime.now()
ret = functor(*args, **kwargs)
delta = datetime.now() - start
log_msg %= {
'name': getattr(functor, '__name__', repr(functor)),
'args': args,
'kwargs': kwargs,
'ret': ret,
'delta': delta,
}
if log_callback is None:
logging.log(log_level, log_msg)
else:
log_callback(log_level, log_msg, ret, delta)
return ret
|
36e86cf15657b258c2a085afe2c75eba90d21232
| 83,519 |
def check_index(index: str, values: list, messages: list) -> bool:
"""Checks that min-max values are a two-items list.
Parameters
----------
index : str
Numeric indicator.
values : list
Metadata variables in criteria.
messages : list
Message to print in case of error.
Returns
-------
boolean : bool
Whether to keep the key/value or not.
"""
boolean = False
if index == '2' and len(values) != 2:
messages.append('For min-max subsetting, two-items list need: no min (or no max) should be "None"')
boolean = True
return boolean
|
eb93bbbd681b4cc73f4c082170b76ec6b66bb72a
| 122,958 |
def clean_name(name: str) -> str:
"""
Changes a word/few words to lower case and separated by _ instead of spaces.
Args:
name (str):
Returns:
str:
"""
return '_'.join(name.split(' ')).lower()
|
b3cbb412c9468512b7b1da4c42f994da57bc4147
| 349,149 |
import requests
import ast
def prepare_api_request(request):
"""Takes a dict of the form:
{"type": "POST",
"url": "www.xyz.com/api/v1.0/order",
"params: {"param_1": "start_time=1"},
"headers": {"header_1": "header"},
"data": {"data_1": "some_data"},
"json": {"json_data": {"json_stuff": "data"}}
}
Only working with GET or POST currently
"""
req = requests.Request(request["type"])
req.url = request["url"]
req.headers = request["headers"] if "headers" in request else {}
req.data = ast.literal_eval(request["data"]) if "data" in request else []
req.params = ast.literal_eval(request["params"]) if "params" in request else {}
req.json = ast.literal_eval(request["json"]) if "json" in request else {}
prepped = req.prepare()
return prepped
|
0c7aabced68e3ad8533ee673bf1768b6a80dfcef
| 78,171 |
from typing import OrderedDict
def value_label(label_value: str, label_text: str) -> OrderedDict:
"""Prepare a Stata XML value label (label) element."""
return OrderedDict([
('@value', str(int(label_value))),
('#text', label_text)
])
|
d62a99f4084620bcab5ce15e7c2b001d6dc652ec
| 560,709 |
def gl_get_projects(gl, config):
"""Return a list of all the projects.
This function looks at the config and returns all the projects of a user if
`user` is specified and all the projects of a group if `group` is specified
in the config.
:param gl: An instance of Gitlab's API.
:type gl: :class:`gitlab.Gitlab`
:param config: Configuration for Gitlab from config gile.
:type config: dict
"""
projects = []
if 'group' in config:
for group_name in config['group'].split():
group = gl.groups.get(group_name)
projects.extend(group.projects.list())
if 'user' in config:
for username in config['user'].split():
user = gl.users.list(username=username)[0]
projects.extend(user.projects.list())
return projects
|
1a54691a10a215235d35411e51fde0175b9eba9b
| 221,369 |
def convert_dms_to_dd(degrees, minutes, seconds):
"""Return a decimal degrees value by converting from degrees, minutes, seconds.
Cite: https://gist.github.com/tsemerad/5053378"""
return degrees + float(minutes) / 60 + float(seconds) / 3600
|
f77fd0f66504e6ca2942050eb64647d318d40c1b
| 266,015 |
def unmake_xml_name(attr_name):
""" Convert an XML attribute name to its pythonic equivalent by replacing
all '-' with '_'. CamelCase names are retained as such.
:param str attr_name: Name of the attribute
:returns: Attribute name in pythonic format.
"""
return attr_name.replace('-', '_')
|
51ad880821cdc3bea5b9064b81129506f949e0a7
| 250,074 |
def extend_schema_serializer(many=None):
"""
Decorator for the "serializer" kind. Intended for overriding default serializer behaviour that
cannot be influenced through `.extend_schema`.
:param many: override how serializer is initialized. Mainly used to coerce the list view detection
heuristic to acknowledge a non-list serializer.
"""
def decorator(klass):
if not hasattr(klass, '_spectacular_annotation'):
klass._spectacular_annotation = {}
if many is not None:
klass._spectacular_annotation['many'] = many
return klass
return decorator
|
732429ba047a446bc1023b3e80b8848791da0394
| 468,434 |
def lists2dict(list1, list2):
"""Return a dictionary where list1 provides
the keys and list2 provides the values."""
# Zip lists: zipped_lists
zipped_lists = zip(list1, list2)
# Create a dictionary: rs_dict
rs_dict = dict(zipped_lists)
# Return the dictionary
return rs_dict
|
3a09c95a4b223142d6be4c1002000e98fe2d80b8
| 538,985 |
def vhdl_fixed_start(address):
"""
Generate the start of a line in the VHDL ROM.
:param address: address of the ROM line.
:return: a string containg the start of the ROM line.
"""
rom_start = '\t\t%3d => "' % address
return rom_start
|
25759a923465826e8b2c1269cb3269ec8a1abaf8
| 299,720 |
def get_redshift_iam_role_arn(iam, iam_role_name):
"""
Retrieves IAM Role ARN
Args:
iam: IAM client.
iam_role_name: IAM Role name.
Returns:
IAM ROLE ARN
"""
return iam.get_role(RoleName=iam_role_name)['Role']['Arn']
|
ec226b6676a110136c83e83061620c2ea12e371b
| 306,458 |
def shorten_sha(str):
"""
return the short (7-character) version of a git sha
"""
return str[:7]
|
76db594b9bf3796a3227c363fe899acaf35dfa2d
| 565,844 |
def date_parser(dates):
"""
removes time from the date
eg input: dates[:3] == [
'2019-11-29 12:50:54',
'2019-11-29 12:46:53',
'2019-11-29 12:46:10']
output: ['2019-11-29', '2019-11-29', '2019-11-29']
"""
#Create a new list with a list comprehension that split time from the date
parsed_dates=[i.split()[0] for i in dates]
return(parsed_dates)
|
364c0b8debe9610eafd04df9f1f4acdce02a5b40
| 420,415 |
def mark_backend_unsupported(func):
"""Mark a method as being not supported by a backend."""
func._oculy_backend_unsupported = True
return func
|
5d1053dfcfeaf7a078a0b8cb87321152d9ee1111
| 667,009 |
def _generate_name(name):
"""
Generates a user-friendly name out of
Command or Ability class names, by
inserting spaces in camel cased names
as well as truncating 'Command' and 'Ability'
in the names.
:param name: string, name of a class.
hint: Command or Ability subclass
:return: str, 'SetTimeCommand' -> 'Set Time'
"""
new_name = ""
for i in ("Ability", "feature", "Command", "command"):
name = name.replace(i, "")
for i, c in enumerate(name):
if i > 0 and c.isupper():
new_name += " "
new_name += c
return new_name
|
b6286371597eb67fc2e4f0ecb73db2d631385742
| 433,967 |
import secrets
def synthesize_secrets(record):
"""
For a single record, synthesize sensitive fields
- signature
- data
- chain
- serial_number
Returns updated version of the record (dict) with synthetic secrets
"""
secret = secrets.token_urlsafe(684)
record["identity_certificate"]["signature"] = secret
data = secrets.token_urlsafe(3328)
record["identity_certificate"]["data"] = data
chain = secrets.token_urlsafe(5920)
record["identity_certificate"]["chain"] = chain
serial = secrets.token_urlsafe(24)
record["identity_certificate"]["serial_number"] = serial
return record
|
2df586aa313ed58d2b53a2d3248d2cbe69e8deae
| 388,306 |
def string_to_list(coor_str):
"""
Method that transforms a list of coordinate lists into a tuple
with latitude and longitude values.
Params:
----------
coord_str: String
- List of coordinate lists
Return:
----------
coord: Tuple
- Latitude and longitude values.
"""
b = coor_str.replace("[", "")
b = b.replace("]", "")
coor_str = b.split(", ")
# print(coor_str)
lat = []
lon = []
for i in range(len(coor_str)):
if i % 2 == 0:
lat.append(float(coor_str[i]))
else:
lon.append(float(coor_str[i]))
# print("Lat: ", sum(lat) / len(lat))
# print("Lon: ", sum(lon) / len(lon))
return (lat, lon)
|
b16419c56da30808aaaa38d99d1704dd9ce3f51b
| 579,214 |
import re
def take_first_chain_from_list(chainList):
"""
Takes first chain from a chain list
returns the header: "chain 52633 chr..." and a list of lines of this chain
returns twice None if no chains are present
"""
headLine = None
chainContent = None
chainStart = None
chainEnd = None
for pos in range(0, len(chainList)):
line = chainList[pos]
# check if chain line
m = re.match(r"chain", line)
if m is not None:
headLine = line.strip("\n")
# process and store end position
pos += 1
line = chainList[pos]
chainStart = pos
while re.match("^\d+", line) is not None:
pos += 1
line = chainList[pos]
chainEnd = pos # actually position after chain
# don't process lower scoring chains
break
# extract chain
if chainStart is not None:
chainContent = chainList[chainStart:chainEnd]
return (headLine, chainContent)
|
7ea7ebc0b1241453d3a66a884e54e753b4844f28
| 324,464 |
def make_pair(coll, lbracket='<', rbracket='>'):
"""
A context aware function for making a string representation of elements of relationships.
It takes into account the length of the element. If there is just one element, the brackets are
left of, but when there are more, all the elements will be seperated by a comma, and the
brackets will be inserted as well.
:param coll: The collection that needs to be printed. Can be a generator, but cannot be
infinite.
:param str lbracket: The bracket that goes on the left.
:param str rbracket: The bracket that goes on the right.
:returns: A context aware string representation of the pair.
:rtype: str
"""
coll = list(coll)
if len(coll) == 1:
return str(coll[0])
return lbracket + ', '.join(coll) + rbracket
|
2d3726adb7765a2a0eb2fc6fe238427b683f68e3
| 690,361 |
from typing import Optional
def _is_in_range(a_list: list,
min: Optional[float] = None,
max: Optional[float] = None) -> bool:
"""
Return True if `a_list` ontains values between `min` and `max`, False
otherwise
"""
for el in a_list:
if min is not None:
if el < min:
return False
if max is not None:
if el > max:
return False
return True
|
af50579279e98459dd4ed759c7fe36efd1caeff7
| 682,079 |
from typing import List
def remove_comments(lines: List[str]) -> List[str]:
"""Remove comments and empty lines"""
formatlines = []
for line in lines:
# comment
if "//" in line:
formatlines.append(line[:line.index("//")].strip())
# ignore blank lines
elif line:
formatlines.append(line)
return formatlines
|
6141f9c9e06481cbbca46b82806d0a48a9703d6c
| 477,729 |
def enable() -> dict:
"""Enables headless events for the target."""
return {"method": "HeadlessExperimental.enable", "params": {}}
|
7d1a227dd0788e1fe3bf211dc4b10895f3de3b53
| 326,805 |
def colored(fmt, fg=None, bg=None, style=None):
"""
Return colored string.
List of colours (for fg and bg):
- k: black
- r: red
- g: green
- y: yellow
- b: blue
- m: magenta
- c: cyan
- w: white
List of styles:
- b: bold
- i: italic
- u: underline
- s: strike through
- x: blinking
- r: reverse
- y: fast blinking
- f: faint
- h: hide
Args:
fmt (str): string to be colored
fg (str): foreground color
bg (str): background color
style (str): text style
"""
colcode = {
'k': 0, # black
'r': 1, # red
'g': 2, # green
'y': 3, # yellow
'b': 4, # blue
'm': 5, # magenta
'c': 6, # cyan
'w': 7 # white
}
fmtcode = {
'b': 1, # bold
'f': 2, # faint
'i': 3, # italic
'u': 4, # underline
'x': 5, # blinking
'y': 6, # fast blinking
'r': 7, # reverse
'h': 8, # hide
's': 9, # strike through
}
# properties
props = []
if isinstance(style, str):
props = [fmtcode[s] for s in style]
if isinstance(fg, str):
props.append(30 + colcode[fg])
if isinstance(bg, str):
props.append(40 + colcode[bg])
# display
props = ';'.join([str(x) for x in props])
if props:
return '\x1b[%sm%s\x1b[0m' % (props, fmt)
else:
return fmt
|
dd37301b36398d6d2359da8e5c0e54509b3838b0
| 238,401 |
import csv
def lodict2csv(listofdicts, out, fnames=None, header=True):
"""
Write a dictionary list with csv formatting to the stream out.
:param fnames: when provided as a list, it is used as the column selection,
otherwise all keys occuring at least once are used.
:param header: should the header be written
"""
if fnames is None:
fnames = set([])
for dictionary in listofdicts:
fnames.update(list(dictionary.keys()))
fnames = sorted(fnames)
csv_writer = csv.DictWriter(out,
fieldnames=fnames,
restval='NA',
extrasaction='ignore')
if header:
csv_writer.writeheader()
csv_writer.writerows(listofdicts)
return len(listofdicts)
|
228936ae84b6977504573376895070f96c986a9a
| 116,169 |
def get_users(f_path):
"""
Getting user name and password hash from file
"""
out = {}
with open(f_path) as file:
for d in file:
h = d.split(':')[1]
if h != '*':
u = str(d.split(':')[0]).strip()
if len(u) > 0:
out[u] = h
return out
|
c82a1b2ae844f48695f58997fcebd5cf44c0e11a
| 676,566 |
def splitLine(text):
"""split a line of text on the first space character and return
two strings, the first word and the remaining string. This is
used for parsing the incoming messages from left to right since
the command and its arguments are all delimited by spaces and
the command may not contain spaces"""
sp = text.split(" ")
try:
a = sp[0]
b = " ".join(sp[1:])
except:
a = text
b = ""
return a, b
|
82ce7005f18c22de6af438fd810a59760471b3d9
| 65,506 |
def list_array_paths(path, array_dict):
"""
Given a dictionary containing each directory (experiment folder) as a
key and a list of array data files (analysis of array containing Log2Ratio
data) as its value (i.e., the output of 'find_arrays'), returns a list of
full paths to array files.
"""
array_path_list = []
for key_folder in array_dict:
for array_value in array_dict[key_folder]:
array_path_list.append(path + key_folder + "/" + array_value)
return array_path_list
|
79d5f58a97005fb915de290ae8ccc480fbacd3c0
| 703,724 |
import logging
def get_poefixer_logger(level=logging.INFO):
"""
Return a logger for this application.
Logging `level` is the only parameter and should be one of the logging
module's defined levels such as `logging.INFO`
"""
logger = logging.getLogger('poefixer')
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
|
c69bd5ef7b55d478105a8050bde91679dc0237c3
| 135,830 |
def all_transitions(transition_lists):
"""
Given a list of transition lists, combine them all into a list of unique transitions.
"""
transitions = set()
for trans_list in transition_lists:
for trans in trans_list:
transitions.add(trans)
return sorted(transitions)
|
f29b0d341f4c994f7bca90cd290bbc60093fc45d
| 614,972 |
def rescale(values, out_range = (0., 1.), in_range = None, clamp = False,
scale = None):
"""Rescales a list of numbers into a given range.
`out_range` gives the range of the output values; by default, the minimum
of the original numbers in the list will be mapped to the first element
in the output range and the maximum will be mapped to the second element.
Elements between the minimum and maximum values in the input list will be
interpolated linearly between the first and second values of the output
range.
`in_range` may be used to override which numbers are mapped to the first
and second values of the output range. This must also be a tuple, where
the first element will be mapped to the first element of the output range
and the second element to the second.
If `clamp` is ``True``, elements which are outside the given `out_range`
after rescaling are clamped to the output range to ensure that no number
will be outside `out_range` in the result.
If `scale` is not ``None``, it will be called for every element of `values`
and the rescaling will take place on the results instead. This can be used,
for instance, to transform the logarithm of the original values instead of
the actual values. A typical use-case is to map a range of values to color
identifiers on a logarithmic scale. Scaling also applies to the `in_range`
parameter if present.
Examples:
>>> rescale(list(range(5)), (0, 8))
[0.0, 2.0, 4.0, 6.0, 8.0]
>>> rescale(list(range(5)), (2, 10))
[2.0, 4.0, 6.0, 8.0, 10.0]
>>> rescale(list(range(5)), (0, 4), (1, 3))
[-2.0, 0.0, 2.0, 4.0, 6.0]
>>> rescale(list(range(5)), (0, 4), (1, 3), clamp=True)
[0.0, 0.0, 2.0, 4.0, 4.0]
>>> rescale([0]*5, (1, 3))
[2.0, 2.0, 2.0, 2.0, 2.0]
>>> from math import log10
>>> rescale([1, 10, 100, 1000, 10000], (0, 8), scale=log10)
[0.0, 2.0, 4.0, 6.0, 8.0]
>>> rescale([1, 10, 100, 1000, 10000], (0, 4), (10, 1000), scale=log10)
[-2.0, 0.0, 2.0, 4.0, 6.0]
"""
if scale is not None:
values = [scale(value) for value in values]
if in_range is None:
mi, ma = min(values), max(values)
else:
mi, ma = in_range
if scale is not None:
mi, ma = scale(mi), scale(ma)
ratio = float(ma - mi)
if not ratio:
return [(out_range[0] + out_range[1]) / 2.] * len(values)
min_out, max_out = list(map(float, out_range))
ratio = (max_out - min_out) / ratio
result = [(x - mi) * ratio + min_out for x in values]
if clamp:
return [max(min(x, max_out), min_out) for x in result]
else:
return result
|
a8dd7e52342f22d7bd6c3aac3399ac1bf419533a
| 158,617 |
def find_lowest_hole(data):
"""
Finds the next lowest value in a list
>>> find_lowest([0, 1, 3, 4])
2
"""
possible_values = set(range(len(data) + 1))
new_key = min(possible_values - set(data))
return new_key
|
34068e971716f3b43dbe7e4d7b605be8267fe889
| 441,553 |
def get_short_id_from_ec2_arn(arn: str) -> str:
"""
Return the short-form resource ID from an EC2 ARN.
For example, for "arn:aws:ec2:us-east-1:test_account:instance/i-1337", return 'i-1337'.
:param arn: The ARN
:return: The resource ID
"""
return arn.split('/')[-1]
|
77ea7fc01ffbdbb5d4fd4a636769cb9c0fb0f5ed
| 366,082 |
def separator(s):
"""
From any string, return the list composed of each character that are in the string
:param s: String to be separated character by character like '121211'
:return: Separated list of characters where one can look like ['1', '2', '1', '2', '1', '1']
"""
result = []
for i in s:
result.append(i)
return result
|
b174dd77d28f742d8091d77cd7e2d435bd98d04a
| 531,132 |
import platform
def get_pytest_mpl_threshold(threshold_dict):
"""Get pytest-mpl image comparison threshold based on ``platform.system()``
Parameters
----------
threshold_dict : dict,
Dictionary of thresholds with platform.system() as keys.
Returns
-------
threshold : int, float.
The threshold use for image comparisons in pytest.
"""
return threshold_dict[platform.system()]
|
650e2fb2186faf73aae55f270532a8edd2e3f5f4
| 324,376 |
def get_frequency(train):
"""
Get the mean frequency of a spike train. Assumes time is in ms.
"""
try:
length = len(train)
first = train[0]
last = train[-1]
return float(length-1)/float(last-first)*1000.
except: # On any error, just return 0
return 0
|
4f5f33949ba1897ee3fe56e4b302361c73d4345c
| 456,071 |
def normalize(dist):
"""Scale values in a dictionary or list such that they represent
a probability distribution. Each value lies in 0 <= value <= 1
and the sum of all values is 1.
:param dist: The distribution. May be numeric, or a dictionary of numeric
values. Note that dictionaries are modified, other iterables
have copies returned.
:return val: Returns a dictionary or list.
"""
if isinstance(dist, dict):
total = sum(dist.values())
for key in dist:
dist[key] = dist[key] / total
assert 0 <= dist[key] <= 1 # probabilities must be between 0 and 1
return dist
total = sum(dist)
return [(n / total) for n in dist]
|
239bbd4d2791ef5dace012256f2752a2db1580ec
| 249,249 |
def _IndentString(source_string, indentation):
"""Indent string some number of characters."""
lines = [(indentation * ' ') + line
for line in source_string.splitlines(True)]
return ''.join(lines)
|
f85c2e18448497edcd764068ae9205ec4bbaec5d
| 30,335 |
def colorHEXrgba(HEX):
"""
Convert color in HEX into RGB
:param HEX: str (#RRGGBB)
:return: tuple (RRR, GGG, BBB)
"""
HEX = HEX.lstrip('#')
return tuple(int(HEX[i:i + 2], 16) for i in (0, 2, 4))
|
717bbf365fd8ca254fcd1299758ce6de55196161
| 162,366 |
def get_fastq_id(fastq_name):
"""Splits and returns the first part of the read name"""
return fastq_name.split(' ')[0]
|
ccf34f5e681d02a1638ee5dcb6f5920702a68d0e
| 170,460 |
def cut_suffix(name, suffix):
"""Cuts off the *suffix* from *name* string, if it ends with it
:param name: original name from which suffix will be cut off
:type name: string
:param suffix: string to be removed
:return: string without suffix
"""
if isinstance(name, str) and name.endswith(suffix):
name = name[:-len(suffix)]
return name
|
6c55b265a7b31fc97598ecaa6d7aa6e51d13440b
| 90,736 |
def filter_factors(labels, num_observed_factors, random_state):
"""Filter observed factor keeping only a random subset of them.
Args:
labels: Factors of variations. Numpy array of shape
(num_labelled_samples, num_factors) of Float32.
num_observed_factors: How many factors should be kept.
random_state: Random state used to sample the permutation.
Returns:
Filters the labels so that only num_observed_factors are observed.
"""
if num_observed_factors < 1:
raise ValueError("Cannot observe negative amount of factors.")
elif num_observed_factors > labels.shape[1]:
raise ValueError(
"Cannot observe more factors than the ones in the dataset.")
factors_to_keep = random_state.choice(labels.shape[1],
size=num_observed_factors,
replace=False)
return labels[:, factors_to_keep], factors_to_keep
|
7c6c424f9b7cb7f35af71142a81d58e02f7741e9
| 295,984 |
import re
def extract_subkeys(d, base):
"""Extract items from a dictionary with keys starting with a given string.
>>> d = {'xxx_yyy': 1, 'xyz': 'abc'}
>>> extract_subkeys(d, 'xxx_')
{'yyy': 1}
"""
subkeys = {}
for key, val in d.items():
match = re.match('%s(?P<subkey>\w+)' % base, key)
if match:
subkeys[match.group('subkey')] = val
return subkeys
|
67091c6110cfa1c5f91ea645c48fa7554639ccd7
| 274,327 |
import inspect
from pathlib import Path
def versioned(func):
"""Automatically infer version string from calling filename.
When the caller's filename is formatted ``'v{version}.py'``, extract
``{version}`` from that filename and set the ``"version"`` key in the result.
For example, in the file ``v1.0.0.py``::
@versioned
def specialize():
return {}
data = specialize()
assert data['version'] == '1.0.0'
"""
# get the calling frame's filename; extract the version
frame = inspect.stack()[1]
file = frame[0].f_code.co_filename
version = Path(file).stem.lstrip('v')
def wrap(*arg, **kwarg):
data = func(*arg, **kwarg)
data['version'] = version
return data
return wrap
|
4a74bea706843b946c2766a0eb6a4d638f947668
| 516,992 |
def revcomp(seq: str) -> str:
"""
reverse complement a nucleotide sequence.
"""
rc_nuc = {
'A': 'T',
'C': 'G',
'T': 'A',
'G': 'C',
}
seq_rev = seq[::-1]
seq_rev_comp = ''.join([rc_nuc[n] for n in list(seq_rev)])
return seq_rev_comp
|
b2745b3b3cd29b339f9305414bdfdaf297fb53a9
| 63,674 |
import base64
def encode_byte_array(byte_arr):
""" Encodes the byte array as a base64-encoded string
:param byte_arr: A bytearray containing the bytes to convert
:return: A base64 encoded string
"""
enc_string = base64.b64encode(bytes(byte_arr))
return enc_string
|
0134d159d869b3adb7f371848f6a60818660ee0d
| 626,206 |
import math
def outlierCleaner(predictions, ages, net_worths):
"""
Clean away the 10% of points that have the largest
residual errors (difference between the prediction
and the actual net worth).
Return a list of tuples named cleaned_data where
each tuple is of the form (age, net_worth, error).
"""
nb_cleaned = int(math.ceil(len(predictions) * 0.1))
cleaned_data = []
#Calculate all residual errors
for prediction, age, net_worth in zip(predictions, ages, net_worths):
error = (prediction - net_worth)**2
cleaned_data.append((age, net_worth, error))
#Sort with highest error first
cleaned_data.sort(key=lambda x: x[2], reverse=True)
#Remove highest errors:
cleaned_data = cleaned_data[nb_cleaned:]
return cleaned_data
|
af3809faed46f3cf5539f6451aaf92ffddf7878c
| 97,004 |
def find_highest(tem, h):
"""
:param tem:int, the temperature that user entered.
:param h:int, the highest temperature so far.
This function finds the highest temperature.
"""
max = h
if tem > max:
return tem
return h
|
726660d2375c71b87e60997d993458b6f704cb68
| 501,592 |
def ease_out_quad(n: float) -> float:
"""A quadratic tween function that begins fast and then decelerates."""
return -n * (n - 2)
|
4e276d77ac5a03731ca34f50d1662423ad3e38f1
| 352,968 |
def read_flist(listpath, delimiter='|'):
"""read file list from file"""
flists_flat = []
lines = open(listpath, 'r').readlines()
for line in lines:
entry = line.rstrip().split(delimiter)
flists_flat.append(entry)
return flists_flat
|
02f7a617a625841db03336a439bbe4278fd25eb9
| 58,763 |
import logging
def mongo_filter_errors(obj, converter):
"""Call converter on obj and ignore failed objects.
Returns result of converter. If converter raises Exception, None will be
returned.
If obj is list, converter is called on each element. Those which raise
exceptions are skipped.
"""
if obj is None:
return None
try:
return converter(obj)
except Exception as error:
logging.error("During parsing db data (single): %s", error)
logging.error("Method %s", converter)
logging.error("Value %s", obj)
return None
|
32ac18e94788813d0b0873d39bb30a5e43eb95d6
| 389,325 |
def osm_id_comparator(el):
"""This function is used as key for sorting lists of
OSM-originated objects
"""
return (el['osm_type'], el['osm_id'])
|
40b483bd222187cef614e46d349aaabc93980c68
| 200,161 |
import functools
def _hook_function(func, pre_func_hook=None, post_func_hook=None):
"""Wrap a function with pre- and post-processing hooks.
Return a new function that calls `pre_func_hook()`, `func()`,
`post_func_hook()`.
All functions receive the same arguments as the wrapper was called with.
The return value of the wrapper is the return value of `func()`.
Both hooks are optional and can be omitted.
"""
if not pre_func_hook and not post_func_hook:
return func
@functools.wraps(func)
def f(*args, **kwargs):
if pre_func_hook:
pre_func_hook(*args, **kwargs)
result = func(*args, **kwargs)
if post_func_hook:
post_func_hook(*args, **kwargs)
return result
return f
|
ac20e47d3c75e764e922d7be46200319439f9b54
| 526,104 |
def clipMinMax(size, minSize, maxSize):
"""
Clip the size so it is bigger then minSize but smaller than maxSize.
"""
return size.expandedTo(minSize).boundedTo(maxSize)
|
5d812694f14337797d0423564314546369b3957b
| 84,182 |
def _Dij(A, i, j):
"""Sum of lower-left and upper-right blocks of contingency table."""
# See [2] bottom of page 309
return A[i+1:, :j].sum() + A[:i, j+1:].sum()
|
b57910ea124940d133e23321b0e970f30183b514
| 647,830 |
def setBit(num, n):
"""
Return num with the nth bit set to 1.
"""
# Make a mask of all 0s with the nth bit set to 1.
mask = 1 << n
return num | mask
|
42ec57a67b37171171ef929c5355e3de368ec63c
| 625,462 |
import itertools
def peek_sample_name(variants_iter):
"""Gets the call_set_name from the first Variant of variants_iter.
Args:
variants_iter: iterable[nucleus.protos.Variant]. Our source of variants.
Returns:
tuple of (str, iterable[Variant]). The first element is the call_set_name of
the first variant of variants_iter, or 'UNKNOWN' if the iterable is empty.
The second is a new iterable that yields the same elements of variant_iter,
in the same order, which is necessary to return as we need to peek into
the original iterator.
"""
try:
first = next(variants_iter)
return first.calls[0].call_set_name, itertools.chain([first], variants_iter)
except StopIteration:
# No variants, just return a dummy value.
return 'UNKNOWN', iter([])
|
75db40929079c861985e3cff1c790c175ef0921a
| 239,894 |
import torch
def deprocess_lab(L_chan, a_chan, b_chan):
"""Converts LAB color channels to original data range"""
return torch.stack([(L_chan + 1) / 2.0 * 100.0, a_chan * 110.0, b_chan * 110.0], dim=2)
|
3894cf8ffb150cda0969e8dd1ef0cbdb0bc13d6e
| 278,696 |
def _create_tool_url(fqdn, http_port):
"""Create an HTTP URL for a tool.
Args:
fqdn: A tool FQDN (already processed by fqdn_rewrite).
http_port: The HTTP port number part of the URL.
Returns:
A constructed tool HTTP URL with protocol and http port.
"""
# Use str() to ensure we return a non-unicode string
return str('http://%s:%s' % (fqdn, http_port))
|
cb7592c0979f2c7af93536206c19b020d30e1a6d
| 391,528 |
def hex2int(s: str):
"""Convert a hex-octets (a sequence of octets) to an integer"""
return int(s, 16)
|
ecdb3152f8c661c944edd2811d016fce225c3d51
| 707,345 |
def shiny_gold_in(bag: str, rules: dict) -> bool:
"""Recursively check for shiny gold bags."""
if "shiny gold" in rules[bag].keys():
return True
elif not rules[bag]: # bag holds no others
return False
else:
for inner in rules[bag]:
if shiny_gold_in(inner, rules):
return True
return False
|
1859f499b72a938a58a78af5ca1a78e9f7221731
| 14,148 |
import random
import string
def generate_id(size=6):
"""Generate a random ID, hopefully unique to be used as a unique identifier
for each uploaded file.
Args:
size (int): Number of characters to generate
Returns:
str: The random ID
"""
return ''.join(
random.SystemRandom().choice(string.ascii_letters) for _ in range(size)
)
|
0fe893eb1d07e41ce20508d07916a775f1333a40
| 543,536 |
from typing import List
from typing import Optional
from typing import Any
def navigation(options: List, **kwargs) -> Optional[Any]:
"""
Displays A List of Options and Handles User Input in Selecting One of the Provided Options
:param options: List of Options to get a User to Choose From
:param kwargs: Additional Arguments to Modify how the Options are Displayed
:keyword attribute: The Object Attribute Name for Each Option to Be Displayed and Sorted by (If Sorted is True)
:keyword title: A title to Describe the Individual Options Provided
:keyword input_text: Modify the Selection Text for the Options
:keyword sort: Sort the Options by their Attribute Name (If attribute is also Provided)
:return: The Selected Option from the Options or None
"""
# Handle kwargs
attribute = kwargs.get('attribute', None)
title = kwargs.get('title', '')
input_text = kwargs.get('input', "Enter {} Number to Select ['c' to Exit]: ".format(title))
sort = kwargs.get('sort', False)
if sort and attribute is not None:
options = sorted(options, key=lambda element: getattr(element, attribute))
if not options:
return None # No Options to Chose From
while True:
for i in range(len(options)):
print("[{}] {}".format(i + 1, getattr(options[i], attribute) if attribute is not None else str(options[i])))
choice = input(input_text + "\n")
if choice.isalpha():
return None # No Choice Made
try:
choice = int(choice)
if choice > len(options) or choice <= 0:
raise ValueError
return options[choice - 1] # maybe just return the index
except (TypeError, ValueError):
print("Invalid Selection")
|
2e5f4c34060a571d4f0c0a2e2b79437c2e173cee
| 166,543 |
def gs_to_public_url(gs_url):
"""Converts a gs:// URI to a HTTP URL."""
assert gs_url.startswith('gs://')
return gs_url.replace('gs://', 'https://storage.googleapis.com/', 1)
|
ff00fa1876f35be65b4c9b3314120ffd5cb0498a
| 68,247 |
def update_single_tunnel_exception(
self,
exception_id: int,
appliance_1_ne_pk: str,
appliance_1_label: str,
appliance_2_ne_pk: str,
appliance_2_label: str,
description: str = "",
) -> bool:
"""Update a tunnel exception between two appliances.
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - exception
- PUT
- /exception/tunnel/{id}
:param exception_id: ID of the existing tunnel exception to update
:type exception_id: int
:param appliance_1_ne_pk: Network Primary Key (nePk) of first
appliance, e.g. ``3.NE``
:type appliance_1_ne_pk: str
:param appliance_1_label: Interface label id of first appliance for
tunnel exception. Can use the value of ``any`` for all interface
labels, e.g. ``2`` or ``any``
:type appliance_1_label: str
:param appliance_1_ne_pk: Network Primary Key (nePk) of second
appliance, e.g. ``5.NE``
:type appliance_2_ne_pk: str
:param appliance_2_label: Interface label id of second appliance for
tunnel exception. Can use the value of ``any`` for all interface
labels, e.g. ``2`` or ``any``
:type appliance_2_label: str
:param description: Description/comment to include with exception
:type description: str, optional
:return: Returns True/False based on successful call
:rtype: bool
"""
data = {
"appliance_id_1": appliance_1_ne_pk,
"interface_label_1": appliance_1_label,
"appliance_id_2": appliance_2_ne_pk,
"interface_label_2": appliance_2_label,
"description": description,
}
return self._put(
"/exception/tunnel/{}".format(exception_id),
data=data,
expected_status=[204],
return_type="bool",
)
|
af5b747307ab8ac93a46e071d638422e07e715d7
| 366,948 |
def read_line(line):
"""
:param line: csv line Build of:
timestamp, some string, value
example:
"0.384060000000000,Async Serial,0xFF"
:return: pair of timestamp, value
"""
x = line.rstrip().split(',')
timestamp = int(float(x[0])*1000)
try:
value = int(x[2], 16)
except ValueError:
value = int(x[2][:4], 16)
print(x[2][4:], 'at: ', timestamp)
return timestamp, value
|
9e298efd174df28eb28dacdc35385333a255d5a8
| 545,589 |
import re
def FindSplitIndices(lines):
"""Given some lines representing a markdown file with multiple entries in it,
find each split point."""
def CodeLine(line):
if line == '\n':
return 'N'
elif re.match('\w', line):
return 'T'
elif re.match('^===+$', line):
return 'D'
else:
return '?'
# Code lines: T if any text, N if new line, D if divider.
coded_lines = [CodeLine(line) for line in lines]
coded = ''.join(coded_lines)
#warnings.warn(coded)
# Look for patterns of NTDN in the coded lines string. If such a pattern is
# found, output the index.
return [m.start() for m in re.finditer('NTD', coded)]
|
5cabca9768d94dcce2cea75e4fc834a8d3640186
| 277,544 |
def is_string_type(thetype):
"""Returns true if the type is one of: STRING, TIMESTAMP, DATE, or
TIME."""
return thetype in [
'STRING', 'TIMESTAMP', 'DATE', 'TIME', 'QINTEGER', 'QFLOAT', 'QBOOLEAN'
]
|
b6f010d0b12bf5452635be81d11b89fde074af39
| 668,430 |
def _parse_styles(style_text, specificity):
"""Parse a style string into a dictionary {style_key: (style_value, specificity)}."""
return dict([(k.strip(), (v.strip(), specificity))
for k, v in [x.strip().split(':', 1)
for x in style_text.split(';') if x.strip()]])
|
e49984adf19ddd6c44f9584715dab45e45f5b052
| 407,134 |
import six
def check(pkt, pktType, keyMatches=None, **valMatches):
"""This function takes an object that was expected to come from a packet (after it has been JSONized)
and compares it against the arg requirements so you don't have to have 10 if() statements to look for keys in a dict, etc..
Args:
@pkt : object to look at
@pktType : object type expected (dict, list, etc..)
@keyMatches : a list of minimum keys found in parent level of dict, expected to be an array
@valMatches : a dict of key:value pairs expected to be found in the parent level of dict
the value can be data (like 5) OR a type (like this value must be a @list@).
Returns:
None if everything matches, otherwise it returns a string as to why it failed."""
# First check that the pkt type is equal to the input type
if(type(pkt) is not pktType):
return 'expected %s' % str(pktType)
if(keyMatches):
# Convert the keys to a set
keyMatches = set(keyMatches)
# The keyMatches is expected to be an array of the minimum keys we want to see in the pkt if the type is dict
if(type(pkt) is dict):
if(not keyMatches.issubset(pkt.keys())):
return 'missing, "%s"' % ', '.join(list(keyMatches - set(pkt.keys())))
else:
return None
# Finally for anything in the valMatches find those values
if(valMatches):
# Pull out the dict object from the "valMatches" key
if('valMatches' in valMatches.keys()):
matchObj = valMatches['valMatches']
else:
matchObj = valMatches
for k, v in six.iteritems(matchObj):
# Check for the key
if(k not in pkt.keys()):
return 'key missing "%s"' % k
# See how we should be comparing it:
if(type(v) is type):
if(type(pkt[k]) is not v):
return 'key "%s", bad value type, "%s", expected "%s"' % (k, type(pkt[k]), v)
else:
# If key exists check value
if(v != pkt[k]):
return 'key "%s", bad value data, "%s", expected "%s"' % (k, pkt[k], v)
return None
|
a2e9819cf25ed2d919da74e2ff3141537303cb0e
| 48,127 |
def solution(A):
"""
A function that given a non-empty array A containing an odd number (N) of elements - all integers, and each element of the array can be paired with another element that has the same value, except for one element that is left unpaired, returns the value of the unpaired element.
For example, given array A such that:
A[0] = 9 A[1] = 3 A[2] = 9
A[3] = 3 A[4] = 9 A[5] = 7
A[6] = 9
the function should return 7
"""
# Define a dictionary to keep value-count pairs
value_dict = {}
# Define a variable to store the value with an odd count
odd_value = None
# Loop through A taking note of the current element's count
for el in A:
if el in value_dict:
value_dict[el] += 1
else:
value_dict[el] = 1
odd_value = el if value_dict[el] % 2 == 1 else odd_value
return odd_value
|
54fbef0954b5f08fae7494eedbfec49539d40253
| 60,453 |
from datetime import datetime
def datetime_to_ms(dt: datetime) -> int:
"""Convert a datetime to milliseconds."""
epoch = datetime.fromtimestamp(0, dt.tzinfo)
return int((dt - epoch).total_seconds() * 1000.0)
|
c14128887dfa7201d68b055c2af5be56f5f8ab37
| 697,148 |
def _value_error_on_false(ok, *args):
"""Returns None / args[0] / args if ok."""
if not isinstance(ok, bool):
raise TypeError("first argument should be a bool")
if not ok:
raise ValueError("call failed")
if args:
return args if len(args) > 1 else args[0]
return None
|
261b026bfb564ebb62b5f85aa2e19d8958dbeb71
| 617,534 |
import re
def title_case(s):
"""Convert a string to title case, with some grammatical exceptions.
Args:
s (str, utf-8): original string to be Title Cased.
Returns:
A Title Cased string.
"""
articles = ['a', 'an', 'of', 'the', 'is']
word_list = re.split(' ', s)
final = [word_list[0].capitalize()]
for word in word_list[1:]:
final.append(word if word in articles else word.capitalize())
return " ".join(final)
|
90b0b42473e291f544dd94e4f4f3af2a5ea6816c
| 569,515 |
def _substitute_window(seq, idx_freq_win, order=2):
"""
This function carries out substitution of the most frequent window with an
integer. Each window substitution reduces the length of the sequence by
(order-1) elements. The substituted integer is one greater than the largest
element in the sequence.
The substitution uses indices/locations of the most frequent window and
is carried out in 3 steps, for a single window:
1. Replace 1st element of seq at window index by integer (max(seq)+1)
2. Replace the next order - 1 elements with False
3. Filter list to eliminate all False elements
Iteration of first 2 steps happens together over indices in idx_freq_win
Parameters
----------
seq : list
Sequence of integers.
idx_freq_win : compress iterator or tuple or list
Indices that correspond to the occurence of the most frequent window.
The index of the window is the same as the index of the first element
of that window in seq.
order : int, optional
Number of elements in window for substitution.
The default is 2 for pairs.
Returns
-------
reduced_seq : list
Sequence of integers with most frequent window substituted.
len(reduced_seq) < len(seq)
"""
# Integer value to use for substitution
a = max(seq) + 1
# Iterate over indices/locations of most frequent window
for index in idx_freq_win:
# Substitute the first element of window, in seq, by integer
seq[index] = a
# Substitute the remaining elements of window, in seq, with False
for n in range(1, order):
seq[index + n] = False
# Filter out all False elements and return what's left
reduced_seq = [element for element in seq if element]
return reduced_seq
|
ba224621816eed206d885bc3636f61701335a49a
| 122,582 |
import re
def find_max_part(row_groups):
"""
Find the highest integer matching "**part.*.parquet" in referenced paths.
"""
paths = [c.file_path or "" for rg in row_groups for c in rg.columns]
s = re.compile(r'.*part.(?P<i>[\d]+).parquet$')
matches = [s.match(path) for path in paths]
nums = [int(match.groupdict()['i']) for match in matches if match]
if nums:
return max(nums) + 1
else:
return 0
|
58152dfa5440f42568122d7cf4365c51ceefabe7
| 314,371 |
def get_patient_timestamp_entries(patient):
""" Get timestamp information from patient database
This function indexes the patient database dictionary and pulls each
timestamp for each heart rate entry. The entries are then saved in the
"all_timestampes" variable.
:param patient: A dictionary containing all of a patient's information
:returns: a list of all of the indicated patient's heart rate entry
timestamps
"""
all_timestamps = patient["timestamp"]
return all_timestamps
|
37d0959b66a801ab36ce2f051396c9d49a11ec52
| 329,945 |
def _flip_top_bottom_boundingbox(img, boxes):
"""Flip top bottom only bounding box.
Args:
img: np array image.
boxes(np.ndarray): bounding boxes. shape is [num_boxes, 5(x, y, w, h, class_id)]
"""
height = img.shape[0]
if len(boxes) > 0:
boxes[:, 1] = height - boxes[:, 1] - boxes[:, 3]
return boxes
|
ad196f59f85d5a6027e0a17ce6d543303c102357
| 37,565 |
from typing import Sequence
def pool_keep_high(pool: Sequence[int], keep: int) -> tuple[int, ...]:
"""Keep a number of the highest dice."""
pool = list(pool)
remove = len(pool) - keep
for _ in range(remove):
low_value = max(pool)
low_index = 0
for i, n in enumerate(pool):
if n < low_value:
low_value = n
low_index = i
pool.pop(low_index)
return tuple(pool)
|
ad775509af8b4be7558f287f2aaede6f9a42c3df
| 453,912 |
def get_host_info(node_info, host):
"""
Simple callback that takes the node info from `/_cluster/nodes` and a
parsed connection information and return the connection information. If
`None` is returned this node will be skipped.
Useful for filtering nodes (by proximity for example) or if additional
information needs to be provided for the :class:`~elasticsearch.Connection`
class. By default master only nodes are filtered out since they shouldn't
typically be used for API operations.
:arg node_info: node information from `/_cluster/nodes`
:arg host: connection information (host, port) extracted from the node info
"""
# ignore master only nodes
if node_info.get("roles", []) == ["master"]:
return None
return host
|
a4060d4f38752cfaf6f97ed9ccf3cc7c2c8d8040
| 303,062 |
import json
def json_description(shape, **metadata):
"""Return JSON image description from data shape and other meta data.
Return UTF-8 encoded JSON.
>>> json_description((256, 256, 3), axes='YXS') # doctest: +SKIP
b'{"shape": [256, 256, 3], "axes": "YXS"}'
"""
metadata.update(shape=shape)
return json.dumps(metadata)
|
1d10ed8927891315e88b37ee0cf5a050516abff5
| 117,130 |
def _fidpairs_to_gatename_fidpair_list(fidpairs, num_qubits):
"""
The inverse of :function:`_gatename_fidpair_list_to_fidpairs`.
Converts a list of `(prep,meas)` pairs of fiducial circuits (containing
only single-qubit gates!) to the "gatename fiducial pair list" format,
consisting of per-qubit lists of gate names (see docstring for
:function:`_gatename_fidpair_list_to_fidpairs` for mor details).
Parameters
----------
fidpairs : list
A list of `(prep_fiducial, meas_fiducial)` pairs, where `prep_fiducial`
and `meas_fiducial` are :class:`Circuit` objects.
num_qubits : int
The number of qubits. Qubit labels within `fidpairs` are assumed to
be the integers from 0 to `num_qubits-1`.
Returns
-------
gatename_fidpair_list : list
Each element corresponds to an elmeent of `fidpairs`, and is a list of
`(prep_names, meas_names)` tuples, on per qubit. `prep_names` and
`meas_names` are tuples of single-qubit gate *names* (strings).
"""
gatename_fidpair_list = []
for fidpair in fidpairs:
gatenames_per_qubit = [(list(), list()) for i in range(num_qubits)] # prepnames, measnames for each qubit
prepStr, measStr = fidpair
for lbl in prepStr:
assert(len(lbl.sslbls) == 1), "Can only convert strings with solely 1Q gates"
gatename = lbl.name
iQubit = lbl.sslbls[0]
gatenames_per_qubit[iQubit][0].append(gatename)
for lbl in measStr:
assert(len(lbl.sslbls) == 1), "Can only convert strings with solely 1Q gates"
gatename = lbl.name
iQubit = lbl.sslbls[0]
gatenames_per_qubit[iQubit][1].append(gatename)
#Convert lists -> tuples
gatenames_per_qubit = tuple([(tuple(x[0]), tuple(x[1])) for x in gatenames_per_qubit])
gatename_fidpair_list.append(gatenames_per_qubit)
return gatename_fidpair_list
|
c5952f3e12b5bf629fdfd4063adc3400e931dac2
| 380,735 |
def max_bitrate_ext(val):
"""
Given ESM value, return extended maximum bit rate (Kbps).
Please refer to 10.5.6.5, TS24.008 for more details.
:param val: the value encoded in the ESM NAS message
"""
if val <= 74:
return 8600 + val * 100
elif val <= 186:
return 16000 + (val - 74) * 1000
elif val <= 250:
return 128000 + (val - 186) * 2000
else:
return None
|
ee8c139ad28d836d01b43f1b24d795550844f695
| 672,106 |
import configparser
def get_auth(f):
"""
Get authentication token from config file
:param f: configuration file with credentials
:type f: file
:returns: parsed GitHub token, False if something went wrong
:rtype: string, bool
"""
config = configparser.ConfigParser()
config.read_file(f)
if config.has_section('github') == False:
return False
opts = config.options('github')
ret = ''
for o in opts:
if o == 'token':
ret = config.get('github', o)
if ret == '':
return False
return ret
|
8773c35b3fdf263665478f93b782ce7be5f0ea9a
| 316,017 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.