content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def filter_probes_by_nan_and_sd(data_df, probe_frac_cutoff, probe_sd_cutoff):
""" Filter out probes whose fraction of samples measured is less than probe_frac_cutoff.
Also remove probes with standard deviation higher than probe_sd_cutoff.
Args:
data_df (pandas df)
probe_frac_cutoff (float b/w 0 and 1)
probe_sd_cutoff (float)
Returns:
out_df (pandas df): potentially smaller than original df
"""
# Number of NaNs per probe
num_nans = data_df.isnull().sum(axis=1)
# Number of samples
num_samples = data_df.shape[1]
# Fraction non-NaN per probe
frac_non_nans_per_probe = 1 - num_nans/num_samples
# Probe standard deviations
probe_sds = data_df.std(axis=1)
# Only return probes with more non-NaN data than probe_frac_cutoff
# and lower sd than the cutoff
probes_to_keep = ((frac_non_nans_per_probe > probe_frac_cutoff) &
(probe_sds < probe_sd_cutoff))
out_df = data_df.loc[probes_to_keep, :]
assert not out_df.empty, (
"All probes were filtered out. Try reducing the NaN threshold and/or SD cutoff.")
return out_df
|
1268244d4975be7bcf114b94d33e567fb7cff1b5
| 32,741 |
import importlib
def get_model(params):
"""Return the model class by its name."""
module_name, class_name = params.model.name.rsplit('.', 1)
i = importlib.import_module(module_name)
return getattr(i, class_name)
|
55e360f57488eeeb35d8c1479293d10ffa1fac78
| 32,750 |
def create_model_identifier(name, version):
"""Get a compatible string as a combination of name and version"""
new_name = "%s-%s" % (name, str(version).replace('.', '-'))
return new_name
|
3aec457ff6836b93293b1adcb9e26a5429cfff09
| 32,751 |
def emoji_boolean(condition: bool) -> str:
"""Returns emoji depending on `condition`
Args:
condition (bool): subject condition
Returns:
str: emoji
"""
return "🟢" if condition else "🔴"
|
c2ca508ded5919da4811267893afd32b142cced3
| 32,752 |
def _quantile(sorted_values, q):
"""
For a sorted (in increasing order) 1-d array, return the
value corresponding to the quantile ``q``.
"""
assert ((q >= 0) & (q <= 1))
if q == 1:
return sorted_values[-1]
else:
return sorted_values[int(q*len(sorted_values))]
|
e3356c5911b031c153a4914dcd81052743005eae
| 32,754 |
import random
import math
def rGeom(p):
"""Generate a geometrically distributed random number. p is the success
probability. The numbers are in the range {0, 1, 2,...}"""
# CDF = 1-(1-p)^(k+1) (CDF of geometric distribution)
# (1-p)^(k+1) = 1-CDF (... solve for k ...)
# k+1 = log(1-CDF)/log(1-p)
# k = (log(1-CDF)/log(1-p)) - 1
# insert a uniform random number in [0;1] for CDF to
# obtain geometrically distributed numbers
u = random.random()
if p == 1 : return 0
return math.ceil( (math.log(1-u,1-p))-1 )
|
7eb1d3bbac0c79e1341abb2fa9528ea8c6c88e84
| 32,755 |
def recursive_dict_to_list(dict_data):
""" Returns a list containing all values from
a dictionary and any child contained dictionary.
"""
list_data = []
for v in dict_data.values():
if type(v) == dict:
for v1 in recursive_dict_to_list(v):
list_data.append(v1)
else:
list_data.append(v)
return list_data
|
40f1bd7649d0462ff12a51958f028f098aeead56
| 32,760 |
def cal_avg_distance(points, a, b, d, threshold_inlier):
"""
return average distance of points to the line model.
Parameter
---------
points : array like
[[x1,y1],[x2,y2],...]
a : float
b : float
d : float
thereshold_inlier : float
the threshold of discriminating whether the point is the inlier
Return
------
avg_dis : float
average distance
inlier_rate : float
inliers rate
"""
dis_sum = 0
inlier_num = 0
point_num = len(points)
for point in points:
dis = (a * point[0] + b * point[1] - d) ** 2
dis_sum += dis
if dis < threshold_inlier:
inlier_num += 1
avg_dis = dis_sum / point_num
inlier_rate = inlier_num / point_num
return avg_dis, inlier_rate
|
172d5e266d28810e002af35e868262f5213141a9
| 32,764 |
def full_name(app):
"""Builds App full_name, prepending the App with the name
of the parent App.
:param app: App as returned by queries.get_apps()
:type app: dict
:return: full name of the app
:rtype: string
"""
name = app['name']
if app.get('parentApp'):
parent_app = app['parentApp']['name']
name = f"{parent_app}/{name}"
return name
|
c0b46e63ab662f9c3e33050e9a5f034400a45c58
| 32,771 |
def default_introspection_processing_hook(introspection_response, client, id_token):
"""
Hook to customise the returned data from the token introspection endpoint
:param introspection_response:
:param client:
:param id_token:
:return:
"""
return introspection_response
|
a3c051dd8ec1c11075d169365be6839806b9a9da
| 32,772 |
import random
def get_random_crop_coords(height, width, crop_height, crop_width):
"""
get coordinates for cropping
:param height: image height, int
:param width: image width, int
:param crop_height: crop height, int
:param crop_width: crop width, int
:return: xy coordinates
"""
y1 = random.randint(0, height - crop_height)
y2 = y1 + crop_height
x1 = random.randint(0, width - crop_width)
x2 = x1 + crop_width
return x1, y1, x2, y2
|
3da397b33bba76df2312611731536d4b5ce5e674
| 32,774 |
def get_src(node):
"""
Returns src module of node, None if attr not defined
"""
return hasattr(node, "srcmodule") and getattr(node, "srcmodule") or None
|
28d37a39df61353eec31248da47572df5a5f2c75
| 32,781 |
def min_med_max(x):
"""Compute min, median, max of tensor x."""
return [x.min().item(), x.median().item(), x.max().item()]
|
b894c11e4bf6828d3627a6d79beec8c070c6657f
| 32,783 |
def get_matching_firstlevel_children_from_node(node, child_tag):
"""Takes a xml file as input and returns a list of first
child elements to the root that matches the provided tag """
child_list = node.findall(child_tag)
return child_list
|
7793a27a954c5159f037efbc5ae6902062750ff5
| 32,785 |
import re
def extract_re_group(pattern):
"""
Extract the first captured group using the given pattern.
"""
def callable_(key, data, errors, context):
value = data.get(key) or ''
match = re.match(pattern, value)
if not match:
return
try:
data[key] = match.group(1)
except IndexError:
return
return callable_
|
832c2ce3eb6e182bd22c8ee28e287dd1c207162f
| 32,789 |
def get_object_api_names(api_name, list_objs):
"""
Return a list of object api_names from list_objs
"""
return [o.get(api_name) for o in list_objs]
|
4ab4a1c5375e052f42511c2dbc438e1f157e5073
| 32,791 |
def get_missed_cleavages(sequences:list, n_missed_cleavages:int) -> list:
"""
Combine cleaved sequences to get sequences with missed cleavages
Args:
seqeuences (list of str): the list of cleaved sequences, no missed cleavages are there.
n_missed_cleavages (int): the number of miss cleavage sites.
Returns:
list (of str): the sequences with missed cleavages.
"""
missed = []
for k in range(len(sequences)-n_missed_cleavages):
missed.append(''.join(sequences[k-1:k+n_missed_cleavages]))
return missed
|
7b8f3f2c11eb22d0311cefcfd59b9b5d5f1a4c78
| 32,793 |
def _set_square(mul, square):
"""
Set square if square is None
(for the initialization of methods for powering)
"""
if square == None:
return lambda x : mul(x, x)
else:
return square
|
67689dc3fd5c282fd63e6cda9eb6b97f7d6b92ce
| 32,795 |
def urljoin(*pieces):
"""Join componenet of url into a relative url
Use to prevent double slash when joining subpath
"""
striped = [s.strip('/') for s in pieces]
return '/'.join(s for s in striped if s)
|
6f41e5ae515ae6cee3e19e36a94f45444c71b0ba
| 32,796 |
def get_objects_name(objects):
""" Retrieves the names of objects.
Parameters:
objects (list): Objects to get names.
Returns:
list: Object names.
"""
names = []
for object in objects:
if object.name[-5:-3] == '}.':
names.append(object.name[:-4])
else:
names.append(object.name)
return names
|
f0ff4abb68c54c536338aa48eca0a3f6d57c0ae5
| 32,798 |
import math
def degrees(angle):
"""Convert angle from radians to degrees."""
return 180 * angle / math.pi
|
3cdf03bb5fd34cce80a53f90ed39a2aacb2b6bda
| 32,801 |
def savedata(fullpathfilename, data):
"""
Save data to a file: csv from a pandas dataframe
:param fullpathfilename: Full path to the file
:param data: pandas dataframe to save
:return: True if successful, False if not
"""
try:
data.to_csv(fullpathfilename,
header=True
)
return True
except Exception as e:
print('Exception occurred: {} '.format(e))
return False
|
1a2ab34c04144c95764cebed7c25e54b1f326ec9
| 32,803 |
def translate_keypoints(keypoints, translation):
"""Translate keypoints.
# Arguments
kepoints: Numpy array of shape ``(num_keypoints, 2)``.
translation: A list of length two indicating the x,y translation values
# Returns
Numpy array
"""
return keypoints + translation
|
02acd5ca99e86712e103be81f3b7c2362996446b
| 32,804 |
def relu(x):
"""
ReLU element-wise activation.
Args:
x: array-like, any shape,
array to compute activations for.
Returns:
x_relu: same type and shape as x,
element wise application of ReLU to x.
"""
return x * (x > 0)
|
dd01c5cea3e2c77bd41c9f92dab37f5cf7890b10
| 32,807 |
def can_open_file(gcode_filepath):
"""
check whether a given filepath can be opened. If the filepath throws an error, False is returned. If the file can
be opened, True is returned.
"""
if isinstance(gcode_filepath, str): # if gcode_filepath is a string
try: # try opening file
with open(gcode_filepath, 'r'):
pass
except IOError: # if an error is thrown, return False
return False
else: # if an error isn't thrown, return true
return True
else: # gcode_filepath is not a string, return false
return False
|
6ad51e2d67b89886edb7ca65fa25ca1f7bdfe536
| 32,808 |
def net_to_linkdict(net):
"""
Convert Net object from parse_net_file to dict { (from,to) : Link }
mapping a link indexed by (from,to) tuple to Link data
Parameters:
net - Net object as returned by parse_net_file()
Return value:
dict {(from,to):Link} as described above
"""
return dict( [ ((link.init_node, link.term_node), link )
for link in net.links ] )
|
96f8ad92486078e48ed6d914b9d21cc3ebb96141
| 32,818 |
def get_all_descendants(db, parent):
"""Return all (non-retired) descendants of the parent.
Parameters
----------
db : MongoDatabase
The Mongo database from which request document data can be
retrieved.
parent : str
The parent for which all descendants are desired.
Returns
-------
list(str): The descendants of the parent.
Throws
------
ValueError: If there is no request document corresponding to the
specified parent.
"""
current_request = db.requests.find_one({"_id": parent})
if not current_request:
raise ValueError(parent + " has no request document")
descendants = []
if current_request.get("children"):
for child in current_request["children"]:
if not db.requests.find_one({"_id": child}).get("retired"):
descendants.append(child)
descendants += get_all_descendants(db, child)
# Remove duplicates
return list(set(descendants))
|
a94f6eed1f316cc5aa25ef8e7b1db148aaee05d3
| 32,830 |
def parseWR(str):
"""
Parses the wavelength range.
"""
wr = str.strip('wr=[ ]"')
return [float(i.strip(' ')) for i in wr.split(',')]
|
72ce70fb8a1f012b440e93fbaba7a30088b1b306
| 32,832 |
def three_one(three):
"""
Converts three-letter amino acid codes to one-letter.
Arguments:
three (str): Three letter amino acid code; AMBER and CHARMM
nomenclature for alternative protonation states is supported,
but lost by conversion.
Returns:
str: Corresponding one-letter amino acid code
"""
return {"ALA": "A", "ARG": "R", "ASN": "N", "ASP": "D", "ASH": "D",
"CYS": "C", "CYM": "C", "CYX": "C", "GLN": "Q", "GLU": "E", "GLH": "E",
"GLY": "G", "HIS": "H", "HID": "H", "HIE": "H", "HIP": "H", "HSD": "H",
"HSE": "H", "HSP": "H", "ILE": "I", "LEU": "L", "LYS": "K", "LYN": "K",
"MET": "M", "PHE": "F", "PRO": "P", "SER": "S", "THR": "T", "TRP": "W",
"TYR": "Y", "VAL": "V"}[three.upper()]
|
9de2a582fa57ce1f3dd2e26b91f1a86a2c1f11cb
| 32,834 |
def x_ian(x, word):
"""
Given a string x, returns True if all the letters in x are
contained in word in the same order as they appear in x.
>>> x_ian('srini', 'histrionic')
True
>>> x_ian('john', 'mahjong')
False
>>> x_ian('dina', 'dinosaur')
True
>>> x_ian('pangus', 'angus')
False
x: a string
word: a string
returns: True if word is x_ian, False otherwise
"""
if len(x) == 0:
return True
elif len(word) == 0:
return False
elif x[0] == word[0]:
return x_ian(x[1:], word[1:])
else:
return x_ian(x, word[1:])
|
def96c5dc36df5ae8a17bde26878eefcc0874f74
| 32,838 |
def unquote(s):
"""Adds quotes to a string."""
return '"' + s + '"'
|
be5e94d16c96da61f7302f52bfdee2dc5376102e
| 32,839 |
import torch
def ClassificationAccuracy(output, target):
"""
ClassificationAccuracy on a given batch
Args:
output(:obj:`torch.Tensor`) - predicted segmentation mask
of shape BATCHES x SCORES FOR DIFFERENT CLASSES
target(:obj:`torch.Tensor`) - expected segmentation mask
of shape BATCHES x SCORES FOR DIFFERENT CLASSES
Returns:
Classification Accuracy averaged over the batch of images
"""
predictions = torch.argmax(output.data, 1) # indices of the predicted clases
correct = (predictions == target).sum().item()
total = output.size(0)
return correct / total
|
024efd8715492e7c5a2984b1846840c297edfe27
| 32,848 |
def transition_model(corpus, page, damping_factor):
"""
Return a probability distribution over which page to visit next,
given a current page.
With probability `damping_factor`, choose a link at random
linked to by `page`. With probability `1 - damping_factor`, choose
a link at random chosen from all pages in the corpus.
"""
amountOfPages = len(corpus.keys())
linkedPages = corpus[page]
if len(linkedPages) == 0:
linkedPages = corpus.keys()
output = {page: 0 for page in corpus.keys()}
for page in linkedPages:
output[page] = damping_factor / len(linkedPages)
for page in output.keys():
output[page] += (1 - damping_factor) / amountOfPages
return output
|
7b7c92bf2738b5ad1ad9ab78466ab7051470f1cc
| 32,849 |
def be(entry: object) -> str:
""" Return a stringified version of object replacing Nones with empty strings """
return str(entry).strip() if entry else ''
|
1c54dff6c3137bdeb511e149f177fe189234a70c
| 32,850 |
import requests
def _get_url(url, type_=None, cookies=None):
"""Get content on given HTTP(S) url using Wget user agent.
This method uses :mod:`requests` to make the request. The
`type_` that is passed determines some behavior. Passing
`rss` will set the `Accept` header to request
`'application/rss+xml,application/rdf+xml,application/atom+xml,text/xml'`.
:type url: str
:param url: URL to fetch from
:type type_: str
:param type_: A string indicating the type of resource.
:type cookies: dict
:param cookies: Cookies to send with the request
:returns: Response object
:rtype: requests.Response
"""
head = {
'User-Agent': 'Wget/1.13.4 (linux-gnu)',
'Connection': 'Close',
'Proxy-Connection': 'Keep-Alive'
}
if type_ == 'rss':
head['Accept'] = 'application/rss+xml,application/rdf+xml,application/atom+xml,text/xml'
return requests.get(url, headers=head, cookies=cookies)
|
8eccfdb20bd8783091d8f79cbb200e0f579fa348
| 32,851 |
def count_args(x):
"""Counts number of unique arguments (org and response combined)."""
return x[['org', 'response']].stack().nunique()
|
756db73b2065681ae5f53dfb1e9f6eedf7b8bdeb
| 32,861 |
def bitLeftShift(binIn, n):
"""
Input:
- binIn: a binary number stored as a string. The most significant bit is stored as the first character in the string and so forth.
- n: the number of bits to be shifted and n >= 0.
Output: bin(binIn << n)
"""
pos = 0
allZero = True
for digit in binIn:
# break from loop if finding "1"
if digit == "1":
allZero = False
break
pos += 1
# take care of the case of all 0
if allZero == True:
return "0"
else:
return binIn[pos:]+n*"0"
|
3d21e667d9e983c479c2ad1c0c7937978a9396c8
| 32,863 |
def avarage(num1, num2):
""" (number, number) -> number
Return the avarage of num1 and num2.
>>> avarage(10,20)
15.0
>>> avarage(2.5, 3.0)
2.75
"""
return (num1 + num2) / 2
|
275f7808a650f2c139a0f121d23e8044c59cf69b
| 32,865 |
import struct
def _decode_int(fp):
"""Decode an int tag
:type fp: A binary `file object`
:rtype: int
"""
return struct.unpack('>i', fp.read(4))[0]
|
9badc80814a1ce4e7bb6894b1625ca44d75ba433
| 32,867 |
import re
def sort_alphanumeric(it):
"""
Sorts the given iterable in the way that is expected.
E.g. test.txt, test_1.txt, test_2.txt, test_11.txt
:param iterable it: Iterable to be sorted
:return iterable: Sorted iterable
"""
def _convert(text):
if text.isdigit():
return int(text)
else:
return text
return sorted(it, key=lambda key: [_convert(c) for c in re.split('([0-9]+)',
key)])
|
f7685d4e54c92002864a1c9e4384a97a4187bad3
| 32,869 |
def MultipleSameInput(Input,alphabet_guess_already):
"""if Input is in alphabet_guess_already, return True"""
if Input in alphabet_guess_already:
return True
else:
return False
|
a17ef3fc95582936212d23d03337e742b7b89abe
| 32,870 |
def get_xml_tag_dict(xml_tree, tag, attribute):
"""Searches an XML tree for a tag. Under this tag it get all elements and returns them as a dict
with "attribute" as the key, and the text for the element as the value
Args:
xml_tree (xml.etree.ElementTree): XML-tree to search through
tag (str): An XML tag
attribute (str): An XML attribute
Returns:
dict: Key,Value = tag attribute content, element text. Eg: {"da": "danish"...}
"""
tag_dict = {}
tags = xml_tree.find(tag)
for tag in tags:
tag_value = tag.text
tag_key = tag.attrib.get(attribute)
tag_dict[tag_key] = tag_value
return tag_dict
|
4d4bc983a282ac962fe55f917446f757cbd89c55
| 32,874 |
def mknj2i(item):
"""
Transforms "mknj" notation into tensor index order for the ERI.
Args:
item (str): an arbitrary transpose of "mknj" letters;
Returns:
4 indexes.
"""
notation = "mknj"
notation = dict(zip(notation, range(len(notation))))
return tuple(notation[i] for i in item)
|
190e15bec44503e012cf04c5ec784d2b3d744aac
| 32,875 |
import math
import string
def alphabet_enumeration(length):
"""
Return list of letters : A, B, ... Z, AA, AB, ...
See mapentity/leaflet.enumeration.js
"""
if length == 0:
return []
if length == 1:
return ["A"]
width = int(math.ceil(math.log(length, 26)))
enums = []
alphabet = string.ascii_uppercase
for i in range(length):
enum = ""
for j in range(width):
enum = alphabet[i % 26] + enum
i = i // 26
enums.append(enum)
return enums
|
57a0e980c15b480a6f62018d5898c93f278dca93
| 32,876 |
def msec_to_units(time_ms, resolution):
"""Convert milliseconds to BLE specific time units."""
units = time_ms * 1000 / resolution
return int(units)
|
7654d0ddda09514fedb4ff0f8d67194b0f4c52ae
| 32,877 |
import pickle
async def load_users():
"""
Loads users from 'users.pickle' file
:return: dictionary {id:User}
"""
with open("users.pickle", "rb") as file:
users_info_loaded = pickle.load(file)
print("Users loaded")
return users_info_loaded
|
d0f4e0c745f4dac362373a40cb8ea7684d068f64
| 32,879 |
def big_l_array(p, lp):
"""
Generate L array using pattern and L' array, Theorem 0.2.2, see proof
:param p: the pattern
:param lp: the L' array
:return: the L array
"""
l = [0] * len(p)
l[1] = lp[1]
for i in range(2, len(p)):
l[i] = max(l[i - 1], lp[i])
return l
|
b4159189965e9dd8db451c3f9aa637e8306cc0ba
| 32,883 |
import torch
def make_pad_mask(lengths: torch.Tensor, le : bool = True) -> torch.Tensor:
"""Make mask tensor containing indices of padded part.
See description of make_non_pad_mask.
Args:
lengths (torch.Tensor): Batch of lengths (B,).
Returns:
torch.Tensor: Mask tensor containing indices of padded part.
Examples:
>>> lengths = [5, 3, 2]
>>> make_pad_mask(lengths)
# masks = [[0, 0, 0, 0 ,0],
# [0, 0, 0, 1, 1],
# [0, 0, 1, 1, 1]]
masks = [[1, 1, 1, 1, 1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]]
"""
batch_size = int(lengths.size(0))
max_len = int(lengths.max().item())
seq_range = torch.arange(0,
max_len,
dtype=torch.int64,
device=lengths.device)
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
seq_length_expand = lengths.unsqueeze(-1)
# mask = seq_range_expand >= seq_length_expand
# fix: torch.float32 -> torch.int32
if le:
mask = (seq_range_expand < seq_length_expand).type(torch.int32)
else:
mask = (seq_range_expand >= seq_length_expand).type(torch.int32)
# print(mask)
return mask
|
43b32a4dc7b1053ad80a8d6c47ea39d1835d5a71
| 32,886 |
def between(bound_min, bound_max, value):
"""Checks whether the value is between the min and max values, inclusive."""
return bound_min <= value and value <= bound_max
|
2233fd7670c27a5e75b96822cf047842efb4add0
| 32,889 |
def read_item_dict(item_file):
"""Reads an source/rir dict from a tab separated source/rir file."""
item_dict = {}
with open(item_file, 'r') as f:
for line in f:
line = line.rstrip('\n')
fields = line.split('\t')
item_key = fields[0]
items = fields[1:]
subfolder = items[0].split('/')[0]
if subfolder not in item_dict:
item_dict[subfolder] = {}
item_dict[subfolder][item_key] = items
return item_dict
|
a98acd115f9724838108c6ee62bba20cd5c27e6d
| 32,893 |
def _swift_module_search_path_map_fn(module):
"""Returns the path to the directory containing a `.swiftmodule` file.
This function is intended to be used as a mapping function for modules
passed into `Args.add_all`.
Args:
module: The module structure (as returned by
`swift_common.create_module`) extracted from the transitive
modules of a `SwiftInfo` provider.
Returns:
The dirname of the module's `.swiftmodule` file.
"""
if module.swift:
return module.swift.swiftmodule.dirname
else:
return None
|
f2b568879711a2caf12f615e3c9972044cff9e13
| 32,894 |
import gzip
def _get_open_function_from_extension(filename, kind="yaml"):
"""Returns the function open is the extension is ``kind`` or
'gzip.open' if it is ``kind``.gz'; otherwise, raises ValueError
"""
if filename.endswith(".{}.gz".format(kind)):
return gzip.open
elif filename.endswith(".{}".format(kind)):
return open
else:
raise ValueError("Invalid filename. Should be .{} or .{}.gz".format(kind, kind))
|
61229ec708b78632533e1adc4e491a5f5f169380
| 32,895 |
def normalize_framework(framework: str) -> str:
"""Normalize framework strings by lowering case."""
return framework.lower()
|
64d6bfc623d5e56d37c7ab64fd8d7165b93a7aef
| 32,896 |
import re
def __modify_name_remove(file_name, string, position):
"""
Core method to remove a string from the base name of a file.
"""
file_newname = ""
if position == "any":
file_newname = file_name.replace(string, "")
elif position == "prefix":
file_newname = re.sub("^" + string, "", file_name)
elif position == "suffix":
file_newname = re.sub(string + "$", "", file_name)
return file_newname
|
9dfd5236d34450409f01974cd27f490eed125b3f
| 32,897 |
def lazy_property(func):
"""惰性属性修饰器。
如我们可以定义下面一个 `Circle` 的类,定义其中
计算面积的属性为惰性属性
.. code-block:: python
@dataclass
class Circle:
x: float
y: float
r: float
@lazy_property
def area(self):
print("computing")
return 3.14 * r * r
调用时结果如下,可以发现仅第一次发生了计算:
>>> cir = Circle(0, 1, 1)
>>> cir.area
computing
3.14
>>> cir.area
3.14
"""
attr_name = "_lazy_" + func.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, func(self))
return getattr(self, attr_name)
return _lazy_property
|
a82d07465b91c1334760b581f2e4b9e605b1790f
| 32,899 |
from typing import Sequence
def joiner(values: Sequence, join_str: str = ", ", last_str: str = ", "):
"""Joins values together with an additional `last_str` to format how
the final value is joined to the rest of the list
Args:
values (Sequence): Values to join
join_str (str, optional): What to join values 0 - penultimate value with. Defaults to ", ".
last_str (str, optional): [description]. What to use to join the last
value to the rest. Defaults to ", ".
"""
if len(values) == 1:
return values[0]
return join_str.join(str(v) for v in values[:-1]) + last_str + str(values[-1])
|
c657cf4fcb93226e5d8b726519ba47d2a268017a
| 32,903 |
def shift_tokens_left(input_ids, pad_token_id):
"""Shift input ids one token to the left"""
prev_output_tokens = input_ids.clone()
prev_output_tokens[:, :-1] = input_ids[:, 1:]
prev_output_tokens[:, -1] = pad_token_id
return prev_output_tokens
|
e139bb3a573e66e49994854742945466a4a5aa39
| 32,909 |
def create_layer_set(width=7000, height=7000):
"""Generate layer set data for xml file"""
# I think this sets up the canvas?
layer_set = f"""
<t2_layer_set
oid="3"
width="{width:.1f}"
height="{height:.1f}"
transform="matrix(1.0,0.0,0.0,1.0,0.0,0.0)"
title="Top Level"
links=""
layer_width="{width:.1f}"
layer_height="{height:.1f}"
rot_x="0.0"
rot_y="0.0"
rot_z="0.0"
snapshots_quality="true"
snapshots_mode="Full"
color_cues="true"
area_color_cues="true"
avoid_color_cue_colors="false"
n_layers_color_cue="0"
paint_arrows="true"
paint_tags="true"
paint_edge_confidence_boxes="true"
prepaint="false"
preload_ahead="0"
>"""
return layer_set
|
542090a46c7165f31b237ece4c006abfecdef759
| 32,910 |
def defocus_to_image_displacement(W020, fno, wavelength=None):
"""Compute image displacment from wavefront defocus expressed in waves 0-P to.
Parameters
----------
W020 : `float` or `numpy.ndarray`
wavefront defocus, units of waves if wavelength != None, else units of length
fno : `float`
f/# of the lens or system
wavelength : `float`, optional
wavelength of light, if None W020 takes units of length
Returns
-------
`float`
image displacement. Motion of image in um caused by defocus OPD
"""
if wavelength is not None:
return 8 * fno**2 * wavelength * W020
else:
return 8 * fno**2 * W020
|
3e4099d76dc4e4ae005397eb55ce1f3451808df9
| 32,912 |
def getWeightsFromIds(weights, kernelIds):
"""Get the weights corresponding to the provided indices.
.. note:: The kernel indices are assumed to start with 1 instead of 0.
This is because we use sparse matrices to represent kernel ids,
which would remove zero-based ids. This method subtracts 1 from the
kernel indices for proper indexing of the weight matrix.
:param np.ndarray weights: Array of weight values.
:param np.ndarray kernelIds: Array of indices.
:return: Array of weights.
:rtype: np.ndarray
"""
return weights[kernelIds - 1]
|
e350b88a95988978d6c3827b26db39c594561317
| 32,914 |
def config_file(tmpdir_factory):
"""Creates a sample looker.ini file and returns its path"""
filename = tmpdir_factory.mktemp("settings").join("looker.ini")
filename.write(
"""
[Looker]
# Base URL for API. Do not include /api/* in the url
base_url=https://host1.looker.com:19999
# API 3 client id
client_id=your_API3_client_id
# API 3 client secret
client_secret=your_API3_client_secret
# Set to false if testing locally against self-signed certs. Otherwise leave True
verify_ssl=True
[OLD_API]
base_url=https://host2.looker.com:19999
client_id=your_API3_client_id
client_secret=your_API3_client_secret
verify_ssl=
[BARE_MINIMUM]
base_url=https://host3.looker.com:19999/
[BARE]
# Empty section
[BARE_MIN_NO_VALUES]
base_url=""
[QUOTED_CONFIG_VARS]
base_url="https://host4.looker.com:19999"
verify_ssl='false'
"""
)
return filename
|
978488a8371435b8b2ca48115e32df6d0e389bda
| 32,921 |
import ast
def get_names(expression):
"""get a list of names in expression"""
names = set()
for node in ast.walk(expression):
if type(node) is ast.Name:
names.add(node.id)
return names
|
66f64a944b825a5be4a23c72260914df02d8180b
| 32,926 |
import re
def parse_original(original_file):
"""Parse original wikitable content into a dictionary, keyed by atoms"""
words = {}
key = None
pending = []
# <nowiki> tags are optional. Meant to extract the text inside the
# template.
pat = re.compile(r'\| \{\{ALB\|(?:<nowiki>)?([^<>]*)(?:</nowiki>)?\}\}')
pat2 = re.compile(r'\| ?(.*)')
for line in original_file:
line = line.rstrip()
if line == '|-':
if key is not None:
if key in words:
pending = words[key] + ["*duplicate*"] + pending
words[key] = pending
key = None
pending = []
continue
if key is None:
match = pat.fullmatch(line)
if not match:
raise ValueError("Couldn't match " + line)
key = match[1]
else:
match = pat2.fullmatch(line)
if not match:
raise ValueError("Couldn't match " + line)
pending.append(match[1])
if key is not None:
if key in words:
pending = words[key] + ["*duplicate*"] + pending
words[key] = pending
return words
|
04e46f5e945042065ca7be82110650b162c99084
| 32,931 |
import turtle
def create_turtle(c, s):
"""
Creates a turtle
:param c: turtle's color
:param s: turtle's size
:return: returns the turtle object fully created
"""
t = turtle.Turtle()
t.pencolor(c)
t.pensize(s)
return t
|
3d9d35133a0c8a9c29f9a6a0fa6ff8b101930a08
| 32,937 |
def clean_response(inp):
"""
Remove arbitrary characters from the response string
"""
# Remove all alert characters.
inp = inp.replace('', '')
return inp
|
c5c3b8ab7c4ce3470eba9ec1f3d8ad0765065041
| 32,938 |
def doble_queso(pizza):
"""
(list of str) -> list of str
Agrega queso al principio y final de la pizza si no tiene
>>> doble_queso(['queso', "jamon"])
['queso', 'jamon', 'queso']
>>> doble_queso(["jamon", 'queso'])
['queso', 'jamon', 'queso']
>>> doble_queso(["jamon"])
['queso', 'jamon', 'queso']
>>> doble_queso(['queso', "jamon", 'queso'])
['queso', 'jamon', 'queso']
:param pizza: list of str la pizza a adicionar
:return: pizza con doble queso
"""
nueva_pizza = pizza.copy()
if not ('queso' == nueva_pizza[0]):
nueva_pizza.insert(0, 'queso')
if not ('queso' == nueva_pizza[-1]):
nueva_pizza.append('queso')
return nueva_pizza
|
a8ea9d6b63989e616f00fd9684053fcdffdb1d9d
| 32,939 |
from typing import Iterable
import hashlib
def hash_sample_ids(sample_names: Iterable[str]) -> str:
"""
Return a unique hash string from a set of strings
:param sample_names: set of strings
:return: a string hash
"""
for sn in sample_names:
assert ' ' not in sn, sn
return hashlib.sha256(' '.join(sorted(sample_names)).encode()).hexdigest()[:32]
|
33709efe686c86f71da7230da1b93bfeaa1b7024
| 32,942 |
def parse_substring(allele, pred, max_len=None):
"""
Extract substring of letters for which predicate is True
"""
result = ""
pos = 0
if max_len is None:
max_len = len(allele)
else:
max_len = min(max_len, len(allele))
while pos < max_len and pred(allele[pos]):
result += allele[pos]
pos += 1
return result, allele[pos:]
|
2d3cd3a26f397e28660e5cd116943c3b619e50a8
| 32,945 |
def preprocess(words):
"""Returns a string of words stripped of punctuation"""
punct_str = '!"#$%&\'()*+,-./:;<=>/?@[\\]^_`{|}~«» '
return ' '.join([w.strip(punct_str).lower() for w in words.split()])
|
24827adac7438189d78f3451a34e61a42cecb81f
| 32,947 |
import math
def distance(start, end):
"""Calculate the distance between two points."""
x0, y0 = start
x1, y1 = end
dx = x1 - x0
dy = y1 - y0
start_to_end = math.sqrt(dx*dx + dy*dy)
return start_to_end
|
4b9ef2b58686259a8de29b77cdba9b27753f2332
| 32,950 |
def strpbrk(cpu_context, func_name, func_args):
"""
Locate characters in string.
Returns a pointer to the first occurrence of str1 of any of the characters
that are part of str2, or a null pointer if there are no matches.
"""
str1_ptr, str2_ptr = func_args
str1 = cpu_context.memory.read_data(str1_ptr)
str2 = cpu_context.memory.read_data(str2_ptr)
for offset, ch in enumerate(str1):
if ch in str2:
return str1_ptr + offset
return 0
|
c069193d22c9d1e94f45aa77c46c9046a1239a96
| 32,951 |
def _ray_remote(function, params):
"""This is a ray remote function (see ray documentation). It runs the `function` on each ray worker.
:param function: function to be executed remotely.
:type function: callable
:param params: Parameters of the run.
:type params: dict
:return: ray object
"""
r = function(params)
return r
|
0aae675af23be189b8e15504ccde41e095f4b4d6
| 32,952 |
def get_clean_paragraph(count):
"""
Creates a clean paragraph dict
Returns
--------
dict
A dict with title, content en the count
"""
return {
"title": "",
"content": "",
"count": count
}
|
e6b16c6d9065e428547d14116db1451feaca3668
| 32,954 |
def loss_fn(model, batch):
""" loss_fn as required by do_train """
return model(batch['image'], batch['annotations'])
|
ac5f99a6cea610594dffd5fdf56c4c685acf5241
| 32,955 |
import torch
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + torch.erf(input_tensor / (2.0 ** 0.5)))
return input_tensor * cdf
|
9f4bb523d287314ff4fc68385ea2778f4726deb2
| 32,957 |
def _query_for_log(query: bytes) -> str:
"""
Takes a query that ran returned by psycopg2 and converts it into nicely loggable format
with no newlines, extra spaces, and converted to string
:param query: Query ran by psycopg2
:return: Cleaned up string representing the query
"""
return ' '.join(query.decode().replace('\n', ' ').split())
|
047480bb6ad41621fb7f92ffa6fedfb03cfa4f6b
| 32,962 |
def commit_to_url(full_commit_hash: str) -> str:
"""Provides a remote URL (eg. for GitHub) for a given commit ID
Args:
full_commit_hash (str): commit ID
Returns:
str: URL
"""
return f"https://github.com/hacksoc/hacksoc.org/commit/{full_commit_hash}"
|
f621ac8f3db23d3ce8fa780b0e355be55a4897a7
| 32,971 |
def source_shorthand(source):
"""Expands source aliases (e.g. 4u ==> 4u1820)
"""
if source == '4u':
return '4u1820'
elif source == 'gs':
return 'gs1826'
else:
return source
|
11d372edd97dbf03181473812ced177722c14487
| 32,975 |
def update(input_ch, answer, dashed):
"""
This function update the progress when users make a right guess (string manipulation)
:param input_ch: str, an alphabet users enter in this round
:param answer: str, the final answer
:param dashed: str, the dashed answer
:return decrypted: str, the updated progress
"""
# Create an empty string
decrypted = ''
for i in range(len(answer)):
# Find the position of character users guess right and update it
if answer[i] == input_ch:
decrypted += input_ch
else:
# Remain the part users already solved, and fill the unsolved part with dash sign
if dashed[i] != '-':
decrypted += dashed[i]
else:
decrypted += '-'
return decrypted
|
5e2528fff524d6a4082b04998c9cda68380531b3
| 32,976 |
def read_DDTpos(inhdr):
"""
Read reference wavelength and DDT-estimated position from DDTLREF and
DDT[X|Y]P keywords. Will raise KeyError if keywords are not available.
"""
try:
lddt = inhdr['DDTLREF'] # Ref. wavelength [A]
xddt = inhdr['DDTXP'] # Predicted position [spx]
yddt = inhdr['DDTYP']
except KeyError as err:
raise KeyError("File has no DDT-related keywords (%s)" % err)
# Some sanity check
if not (abs(xddt) < 7 and abs(yddt) < 7):
raise KeyError(
"Invalid DDT position: %.2f x %.2f is outside FoV" % (xddt, yddt))
return lddt, xddt, yddt
|
39cce130305440f14db8e5378ae4e66b8e124b20
| 32,979 |
from pathlib import Path
def get_project_root() -> Path:
"""Return the path to project (i.e. the top level dat_analysis folder which contains src etc"""
return Path(__file__).parent.parent.parent
|
546015b3555ff7ace6109e064dd94ae7db9491de
| 32,980 |
def AR_6(dul):
"""Association release AR-6.
On receiving P-DATA-TF during attempted association release request
send P-DATA indication
State-event triggers: Sta7 + Evt10
Parameters
----------
dul : dul.DULServiceProvider
The DICOM Upper Layer Service instance for the local AE
Returns
-------
str
``'Sta7'``, the next state of the state machine
"""
# Issue P-DATA indication
dul.to_user_queue.put(dul.primitive)
return 'Sta7'
|
03f8464e6483421bddd5d570829506362f14092c
| 32,981 |
def get_votes(conn, track_id):
"""
Get votes for track_id from VoteData table
:param conn:
:param track_id:
:return votes:
"""
sql = """
SELECT votes FROM votedata WHERE track_id = ?;
"""
cur = conn.cursor()
cur.execute(sql, (track_id,))
return cur.fetchone()
|
4f6e572f329c3226811d4073fbe22ce56a62633b
| 32,987 |
from pathlib import Path
def mtimes(file_list):
"""Return a list of modified times for the given list of files"""
files = {}
for f in file_list:
path = Path(f)
files[f] = path.stat().st_mtime
return files
|
d8f99989bb09a048582f1816d6a4a59604e5af68
| 32,989 |
import re
def get_map_seed(instructions):
"""Extract map seed from instructions."""
match = re.search(rb'\x00.*? (\-?[0-9]+)\x00.*?\.rms', instructions)
seed = None
if match:
seed = int(match.group(1))
return seed
|
22d8ecf25ee50b2ee1e3cc358d1441c277ebe569
| 32,992 |
import inspect
def function_path(fn):
"""
Returns the name of the function along with the module containing it:
module.submodule.name
"""
module = inspect.getmodule(fn)
if module is not None:
return '{}.{}'.format(module.__name__, fn.__name__)
else:
return fn.__name__
|
bc83c45e01c4f7b6789b80d727a06c4f31522fce
| 32,996 |
def m1m2_to_nu(m1,m2):
"""Symmetric mass ratio from m1, m2"""
return m1*m2/(m1+m2)**2
|
a9b42f6c5360d190403c6c680cbf4406f2df7150
| 32,997 |
def select_trajectory(mdsys):
""" Extract the trajectory from the simulation data
"""
mdt = mdsys.trajectory
return mdt
|
8c493883389f8479a8ff7d3c9e4c857713d5627b
| 33,000 |
import requests
def get_repo_public_key(username: str, repo_name: str, token: str) -> dict:
"""
Get the public key for a repository via the Github API. At least for private repos, a personal access token (PAT) with the repo scope is required.
:param username: The users github username
:param repo_name: The repositories name
:param token: The PAT of the user with repo scope
:return: A dict containing the public key and its ID
"""
query_url = f'https://api.github.com/repos/{username}/{repo_name}/actions/secrets/public-key'
headers = {'Authorization': f'token {token}'}
r = requests.get(query_url, headers=headers)
return r.json()
|
e237df2ae549a39cb86551fbd9fafb52bd491aad
| 33,002 |
from typing import List
def flatten_once(lst: List) -> List:
"""
Flattens the list lst one level.
"""
return [item for sublist in lst for item in sublist]
|
5b094b6c313cae15f2ce0a56d0783d4f723164ab
| 33,003 |
def reconcile(current, desired):
"""Return sets (to_add, to_remove) indicating elements that should be
added to or removed from ``current`` to produce ``desired``.
"""
to_remove = current - desired
to_add = desired - current
return to_add, to_remove
|
0fc5ad379b26543162457573a22c357d49f1b757
| 33,010 |
def facade_versions(name, versions):
"""
facade_versions returns a new object that correctly returns a object in
format expected by the connection facades inspection.
:param name: name of the facade
:param versions: versions to support by the facade
"""
if name.endswith('Facade'):
name = name[:-len('Facade')]
return {
name: {'versions': versions},
}
|
4378df3da64453ee8bd6f278cee260e877fc2cdd
| 33,014 |
def _from_rgb(rgb):
"""translates an rgb tuple of int to a tkinter friendly color code
"""
return "#%02x%02x%02x" % rgb
|
464cab5007935824b8b7af28e97cc882ec0653ee
| 33,016 |
def is_enrollment_row(tag):
"""
True if the tag is an enrollment row
:param tag: the tag to check
:return: true if the tag is an enrollment row
"""
is_tr = tag.name == 'tr'
cells = tag.find_all('td')
has_2_cells = len(cells) == 2
has_enrollment_title = cells[0].get_text() == 'Enroll' \
if has_2_cells else False
return is_tr and has_2_cells and has_enrollment_title
|
d110c63e9ff2556e455c188fa7cc6754f3d6bc60
| 33,017 |
def to_spine_case(original: str) -> str:
"""
Convert this_thing to this-thing.
"""
return original.lower().replace("_", "-")
|
baf37fca4790018a4cbff77ed263da9cd44b6641
| 33,020 |
def uniq_list(in_list):
"""
Takes a list of elements and removes duplicates and returns this list
:param in_list: Input list
:return: List containing unique items.
"""
seen = set()
result = []
for item in in_list:
if item in seen:
continue
seen.add(item)
result.append(item)
return result
|
a3d69b3d76ae4f62ae0ea3faa6a6ff59d1dcde63
| 33,021 |
def to_set(labels_list):
"""given a list of labels from annotations, return the set of (unique) labels
Parameters
----------
labels_list : list
of lists, i.e. labels from annotations
Returns
-------
labelset
Examples
--------
>>> labels_list = [voc.annot.labels for voc in vds.voc_list]
>>> labelset = to_set(labels_list)
"""
all_labels = [lbl for labels in labels_list for lbl in labels]
labelset = set(all_labels)
return labelset
|
f63e7ad1d3a88c25478a590c13399c04fd763c03
| 33,023 |
def filter_stories(stories, triggerlist):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerlist fires.
"""
storylist = list()
for story in stories:
for T in triggerlist:
if T.evaluate(story):
storylist.append(story)
return storylist
|
a3c1a810ba4a6e67fb8d8a11299150da5d891c20
| 33,028 |
def decode_imsi(imsi64):
"""
Convert from the compacted uint back to a string, using the second two bits
to determine the padding
Args:
imsi64 - compacted representation of imsi with padding at end
Returns:
imsi string in the form IMSI00101...
"""
prefix_len = (imsi64 >> 1) & 0x3
return 'IMSI' + '0' * prefix_len + str(imsi64 >> 3)
|
8652b678aee5c43a68e8d3fc17590e1cac77863a
| 33,030 |
def check_peptide(peptide:str, AAs:set)->bool:
"""
Check if the peptide contains non-AA letters.
Args:
peptide (str): peptide sequence.
AAs (set): the set of legal amino acids. See alphapept.constants.AAs
Returns:
bool: True if all letters in the peptide is the subset of AAs, otherwise False
"""
if set([_ for _ in peptide if _.isupper()]).issubset(AAs):
return True
else:
return False
|
20509525ab8d5a8cc25c305a6e55475236785ddf
| 33,034 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.