content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
from typing import OrderedDict
def custom_sort_exps(exps):
"""
Sort the experiments according to JIRA LCLSECSD-210
Active first, OPS next and then descending run period/alphabetical within that run period
"""
# Python's sort is stable; so we sort multiple times lowest attr first
exps = OrderedDict(sorted(exps.items(), key=lambda x : x[1]["name"]))
# print( [ v["name"] for k, v in exps.items() ] )
exps = OrderedDict(sorted(exps.items(), key=lambda x : x[1]["name"][-2:], reverse=True))
exps = OrderedDict(sorted(exps.items(), key=lambda x : not x[1].get("instrument", "") == "OPS"))
exps = OrderedDict(sorted(exps.items(), key=lambda x : not x[1].get("is_active", False)))
return exps
|
dcd76be3ac6a0c9b8b60b87b1c48b0a2823a363e
| 674,639 |
def find_node(node_name, g):
"""Go through the attributes and find the node with the given name"""
for n, d in g.nodes_iter(data=True):
if d["label"] == node_name:
return n
|
d2415d65d25a4b3706ead5f4a6f450bafb24d40f
| 503,701 |
def already_cached(finder):
"""
Checks to see if the finder class has already been modified.
@param finder - Class that should contain a find_metrics method.
"""
return hasattr(finder, '__cached_find_metrics__')
|
a8604f166b1c7bdd8a67ad8837b7f565f0eb9b77
| 233,102 |
def _split(da, date, period="Y"):
"""
args:
da: xarray data array
the dataarray containing the values with a time index
date: pandas datetime
date of the breakpoint
period: datetime frequency code
shift till after the breakpoint, based on temporal frequency of data
Returns
y1: Array like y-values for data preceeding the breakpoint
x1: Array like x-values for data preceeding the breakpoint
y2: Array like y-values for data occuring after the breakpoint
x2: Array like x-values for data occuring after the breakpoint
"""
# ========== Split the time series ==========
y1 = da.sel(time=slice(None, date)).values
y2 = da.sel(time=slice(
date,None)).values
x1 = da.sel(time=slice(None, date)).time.values.astype('datetime64[%s]'% period).astype(float)
x2 = da.sel(time=slice(date,None)).time.values.astype('datetime64[%s]'% period).astype(float)
return y1, y2, x1, x2
|
35f273318dbbdce9006db30cdffe87ea5c7f98d9
| 211,279 |
def bound_value(value: int, minimum: int=0, maximum: int=9) -> int:
"""
Bounds a value between an upper and lower bound
:param value: the value to bound
:param minimum: the minimal allowed value
:param maximum: the maximal allowed value
:return: the bounded value
"""
return minimum if value < minimum else maximum if value > maximum else value
|
9e5ec5551276d05c0056f97da2666e87569c0f28
| 596,956 |
def get_filename_pref(file_name):
"""Splits the filename apart from the path
and the extension. This is used as part of
the identifier for individual file uploads."""
while '/' in file_name:
file_name = file_name.split('/', maxsplit=1)[1]
while '\\' in file_name:
file_name = file_name.split('\\', maxsplit=1)[1]
file_name_pref = file_name.split('.')[0]
return file_name_pref
|
2ee9998f57c607cadaeacae46960e93f44ce355f
| 211,651 |
def remove_credits(text):
"""Remove first/last line of text if it contains the word 'lyrics'
eg 'Lyrics by songsdatabase.com'
"""
textlines = text.split('\n')
credits = None
for i in (0, -1):
if textlines and 'lyrics' in textlines[i].lower():
credits = textlines.pop(i)
if credits:
text = '\n'.join(textlines)
return text
|
8373116a50d54e8b983b342b431ff41d95dbdd62
| 170,343 |
import logging
import json
def read_json_file_into_memory(json_file):
"""
Purpose:
Read properly formatted JSON file into memory.
Args:
json_file (String): Filename for JSON file to load (including path)
Returns:
json_object (Dictonary): Dictonary representation JSON Object
Examples:
>>> json_file = 'some/path/to/file.json'
>>> json_object = read_json_file_into_memory(json_file)
>>> print(json_object)
>>> {
>>> 'key': 'value'
>>> }
"""
logging.info(f"Reading JSON File Into Memory: {json_file}")
try:
with open(json_file, "r") as json_file_obj:
return json.load(json_file_obj)
except Exception as err:
logging.exception(f"Cannot Read json into memory ({json_file}): {err}")
raise err
|
70c2e6ab6180700ce77469b8afa0d0df8e0eee95
| 7,555 |
from typing import Mapping
def map_nested(func, node, path=[]):
"""Walk on a dictionary as a tree and call `func` on
each leaf.
Args:
func: callable taking two arguments: the leaf and the path to it
(path as list)
node (collections.Mapping): the tree
path: (for recursive use only)
Returns:
nested dict, on leaved of type returned by func.
Source:
https://stackoverflow.com/questions/32935232/python-apply-function-to-values-in-nested-dictionary
"""
new_path = path.copy()
if isinstance(node, Mapping):
for k, v in node.items():
return {
k: map_nested(func, v, new_path + [k])
for k, v in node.items()
}
else:
return func(node, new_path)
|
f43137276c6c1c896723553bba1191a41c2a9403
| 246,143 |
def get_individual(individual, ensembl_json):
"""Return a list with the genotypes of the individual."""
genotypes = []
for individual_genotype in ensembl_json["genotypes"]:
if individual in individual_genotype["sample"]:
genotypes.append(individual_genotype)
return genotypes
|
a3024c8f7ec15b37ceb7d83874a662354a14ee57
| 692,931 |
from functools import reduce
def solve(ar):
"""
Given an array of 5 integers, return the minimal and maximal sum of 4 out of
5 of the integers.
"""
# Just sort the list of integers in place and take the sum of the first 4
# then the last 4.
ar.sort()
minSum = reduce((lambda x, y: x + y), ar[0:4])
maxSum = reduce((lambda x, y: x + y), ar[1:5])
return (minSum, maxSum)
|
68d650c51cbe611c51c0b5754c61b541cb1838f8
| 8,946 |
def _int_to_alph(n: int) -> str:
"""
Integer to a..z.
:param n: Number
:return: Number in AABB..
"""
string = ''
while n > 0:
n, remainder = divmod(n - 1, 26)
string = chr(65 + remainder) + string
return string
|
5d86a4663cad178bc7027cc7404812c6eec25121
| 509,499 |
def gradient_summand(weights, lp):
"""Calculates the gradient summand for a given weight and `LabeledPoint`.
Note:
`DenseVector` behaves similarly to a `numpy.ndarray` and they can be used interchangably
within this function. For example, they both implement the `dot` method.
Args:
weights (DenseVector): An array of model weights (betas).
lp (LabeledPoint): The `LabeledPoint` for a single observation.
Returns:
DenseVector: An array of values the same length as `weights`. The gradient summand.
"""
return (weights.dot(lp.features) - lp.label) * lp.features
|
b34de095fb762aa933570ae58744772205ded5de
| 38,325 |
def max_modified_date(obj, modified):
"""
Return the largest modified date
If the object does not have a date_modified the argument is returned
:param obj:
:param modified:
:return:
"""
if 'date_modified' not in obj or int(obj['date_modified']) < int(modified):
return modified
else:
return obj['date_modified']
|
a40c06fd25ad4e095f79779b3390ab21c04180d2
| 423,042 |
def is_2d(fs):
"""Tests wether a function space is 2D or 3D"""
return fs.mesh().geometric_dimension() == 2
|
24dadb7ff6f6810a0ee9cfbc496bf41050270425
| 70,879 |
def get_spatial_image_size(image_resizer_config):
"""Returns expected spatial size of the output image from a given config.
Args:
image_resizer_config: An image_resizer_pb2.ImageResizer.
Returns:
A list of two integers of the form [height, width]. `height` and `width` are
set -1 if they cannot be determined during graph construction.
Raises:
ValueError: If the model type is not recognized.
"""
if image_resizer_config.HasField("fixed_shape_resizer"):
return [
image_resizer_config.fixed_shape_resizer.height,
image_resizer_config.fixed_shape_resizer.width
]
if image_resizer_config.HasField("keep_aspect_ratio_resizer"):
if image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension:
return [image_resizer_config.keep_aspect_ratio_resizer.max_dimension] * 2
else:
return [-1, -1]
if image_resizer_config.HasField(
"identity_resizer") or image_resizer_config.HasField(
"conditional_shape_resizer"):
return [-1, -1]
raise ValueError("Unknown image resizer type.")
|
87eb97bb3609098f1921671629450fa24529d72e
| 62,415 |
def extract_data(spark, schema):
"""Load data from csv file format. In production, data will be read from SQL db
:param spark: Spark session object.
:return: Spark DataFrame.
"""
df = (
spark.read.format('csv')
.option('sep', ',')
.option('header', 'true')
.schema(schema)
.load('./data/raw_data/rawdata.csv')
)
return df
|
0f604b35d5fbd77a12c690903b80fabf6986de2e
| 564,902 |
def vectorial_product(u, v):
"""
Vectorial product u x v. Vectors u, v should be tuple of
floats or tuple of arrays, (ux, uy, uz) and (vx, vy, vz)
"""
return (u[1]*v[2] - u[2]*v[1],
u[2]*v[0] - u[0]*v[2],
u[0]*v[1] - u[1]*v[0])
|
a35ec59dd8ae631340a1efd57fd958116846c8b0
| 78,793 |
def key_matches_x509_crt(key, crt):
"""
Verify that the public key derived from the given private key matches the
private key in the given X.509 certificate.
:param object key: A private key object created using load_privkey()
:param object crt: An X.509 certificate object created using load_x509()
:rtype bool: True, iff the key matches the certificate
"""
return crt.public_key().public_numbers() == key.public_key().public_numbers()
|
f3a9cd3cbfc9df9d0095c0562c3251174a98c141
| 696,155 |
import random
def _get_random_number(interval):
"""
Get a random number in the defined interval.
"""
return random.randrange(2**(interval-1), 2**interval - 1)
|
f586330e3eba08e5ccc22e31cf8a8a04b20a9aa6
| 72,838 |
def extractArgs(args):
""" Convert from args to a dict of parsed arguments of form {'EXOSIMS_RUN_SAVE_PATH':'/home/user/Doc...'}
Args:
args (parser.parse_args()) - the output from parser.parse_args
Returns:
EXOSIMS_QUEUE_FILE_PATH (string) - full file path to the queue file
numCoresString (string) - string of the number of cores to run ipcluster with
qFargs (dict) - dictionary of paths from parsed runQueue arguments of form {'EXOSIMS_RUN_SAVE_PATH':'/home/user/Doc...'}
"""
myArgs = [arg for arg in args.__dict__.keys() if 'EXOSIMS' in arg and not args.__getattribute__(arg) == None]
paths = dict()
for arg in myArgs:
paths[arg] = args.__dict__[arg][0]
EXOSIMS_QUEUE_FILE_PATH = paths['EXOSIMS_QUEUE_FILE_PATH']
if args.numCores == None:
args.numCores == ['1']
numCoresString = str(int(args.numCores[0]))
return EXOSIMS_QUEUE_FILE_PATH, numCoresString, paths
|
8e25cef87c58d1e3f2ba60fcdaf99c1a6eef2eb7
| 163,473 |
import requests
def get_docket(auth_token, docket_number, court_name, client_matter="", cached=True, normalize=True):
"""
Takes in an authentication token as a string,
a docket number as a string, and the court name as a string,
and returns the docket data as a dictionary.
Optional parameters are client_matter, Cached, and normalize.
Cached is True by default. If turned to false, the most recent docket data will be fetched directly,
this can result in extra charges. set cached to False if you are having trouble retrieving data for a docket.
Normalize normalizes the names of parties when possible.
"""
# We specify the endpoint we will be making the api call to.
endpoint = "https://www.docketalarm.com/api/v1/getdocket/"
# We set the parameters we will send to the endpoint in a dictionary.
# These values come from the arguments that are passed to this function.
params = {
'login_token':auth_token,
'client_matter':client_matter,
'court':court_name,
'docket':docket_number,
'cached':cached,
'normalize':normalize,
}
# we make the api call and store the result in a variable.
# We use the .json() method to convert the result to a python dictionary.
result = requests.get(endpoint, params, timeout=60).json()
# We return the results of the API call as a dictionary.
return result
|
4f38c1bd791f76e0a0bc6e732dc89f36178745dc
| 658,389 |
def get_attribute_name(attribute):
"""
Return the attribute name from the attribute definition
:param attribute: The attribute definition
:return: The attribute's name
"""
splitted_attribute = attribute.split(' ', 2)
return splitted_attribute[0]
|
5850b62783525d5ebb80ef9f5213f9a191749228
| 604,006 |
def _silent_idx(x, y):
"""
Given a list x and an object y, returns the index of y in x. Otherwise
return a None without raising any exceptions.
>>> _silent_idx([1,2,3,4], 2)
1
>>> _silent_idx([1,2,3,4], 5)
"""
if y in x:
return x.index(y)
else:
return None
|
b4ac3ca8082068c5135665a258ab3ecf789536c5
| 158,729 |
import re
def find_problem_name(contents):
"""
Find the name of the problem by reading the first comments if it exists.
If no comments are found 'gdproblem' is used by default.
"""
pattern = re.compile(r"(?<=#).*?\n", re.DOTALL)
match = pattern.search(contents)
if match:
return match.group().strip()
return 'gdproblem'
|
665eefc3265f6ef5b4547c3fb2e2dbbd2ab40fdc
| 89,321 |
def get_artists(tracks):
"""
Returns a dict where:
key: artist_id
value: list of track ids
"""
artists = {}
for _,row in tracks.iterrows():
artist_id = row['artist_id']
if artist_id in artists:
artists[artist_id].append(row['track_id'])
else:
artists[artist_id] = [row['track_id']]
return artists
|
9b7136e5c6e3838d11d8defe8d038917224423ce
| 692,569 |
def line_of_point(point, gdf_lines):
"""Gets index of geometry of a GeoDataFrame, a point is located next to,
with a distance lower than 1e-8.
Parameters
----------
point : shapely.geometry.Point
gdf_lines : geopandas.GeoDataFrame
Returns
-------
int, float or str : Index of GeoDataFrame or Warning, if no geometry found.
"""
ind = None
for k, l in gdf_lines.iterrows():
if l['geometry'].distance(point) < 1e-8:
ind = k
if ind is None:
return Warning('No line found which has point on it!')
return ind
|
621e9fc99edcdcf41047cf05e5d3184117ac0754
| 278,934 |
from typing import List
from typing import Dict
def group_by_author(commits: List[dict]) -> Dict[str, List[dict]]:
"""Group GitHub commit objects by their author."""
grouped: Dict[str, List[dict]] = {}
for commit in commits:
name = commit["author"]["login"]
if name not in grouped:
grouped[name] = []
grouped[name].append(commit)
return grouped
|
239c523317dc8876017d4b61bc2ad8887444085e
| 705,491 |
from pathlib import Path
def shorten_path(file_path, length):
"""
Split the path into separate parts, select the last 'length' elements and join them
"""
return str(Path(*Path(file_path).parts[-length:]))
|
e5583b04e77b3caa87219b232dd330d48f8612d4
| 94,142 |
import re
def snake_casify(name):
"""Turns the given name to follow snake_case convention."""
name = re.sub('\W+', '', name).split()
name = [s.lower() for s in name]
return '_'.join(name)
|
27f1333e1eede40735d0ad8c03e30ad242ac5394
| 583,274 |
def count_if(my_list, key, value):
"""
return number of records in my_list where key==value pair matches
"""
counter = (1 for item in my_list if item.get(key) == value)
return sum(counter)
|
dfd403f652751c000699c67b450ef8b74b1b9ac5
| 372,004 |
def partition(l, size):
"""
Partition the provided list into a list of sub-lists of the provided size. The last sub-list may be smaller if the
length of the originally provided list is not evenly divisible by `size`.
:param l: the list to partition
:param size: the size of each sub-list
:return: a list of sub-lists
"""
return [l[i:i + size] for i in range(0, len(l), size)]
|
6d24bdf1b8e46450b7070c2819180cf40fd418b3
| 694,556 |
def cl_velocity(r, v0, vinf, rstar, beta):
"""
Calculate the velocity for a 1D spherical wind using the velocity law as
given in Castor & Lamers 1979.
Parameters
----------
r: float
The distance to calculate the velocity at
v0: float
The initial velocity at the bottom of the spherical wind
vinf: float
The terminal velocity at the edge of the wind
rstar: float
The radius of the central source (star)
beta: float
The acceleration exponents. Controls how rapidly the velocity
is accelerated to the terminal velocity vinf.
Returns
-------
v: float
The velocity at a point r for a 1D spherical wind
"""
return v0 + (vinf - v0) * (1 - rstar / r) ** beta
|
eb6591cb6fb316bd7f9acfc66dace7b06fe984a6
| 338,021 |
def UnixToDHMS(duration):
"""
Convert duration (in unix_timestamp seconds) to days, hours, minutes and
seconds.
Args
duration: <type 'int'> OR <type 'str'> Duration in seconds.
If given as a string, convert to integer.
Returns
d: days [0+]
h: hours [0-23]
m: minute [0-59]
s: seconds [0-59]
"""
duration = int(duration)
d = duration / (24 * 60 * 60)
h = duration / (60 * 60) % 24
m = duration / 60 % 60
s = duration % 60
return d, h, m, s
|
dacd1a7430b1c2adbb4fed6ac2b2a1781cb23bcc
| 554,311 |
def register_new_user(access, username, password):
""" Register a new user & handle duplicate detection """
if access.user_data(username) is not None:
raise ValueError("User '%s' already exists!" % username)
if username in access.pending_users():
raise ValueError("User '%s' has already registered!" % username)
access.register(username, password)
if access.need_admin():
access.approve_user(username)
access.set_user_admin(username, True)
return True
return False
|
25b98c4def9da81d71176aed196f61b2e71d64c5
| 10,306 |
def get_repository(gh, data):
"""Gets the repository from hook event data."""
return gh.repository(
data['repository']['owner']['login'], data['repository']['name'])
|
ebeb2b99f5daeb774bd762e6648319f667ddb12e
| 456,854 |
import math
def embedding_dim(vocab_size: int):
"""Calculate a reasonable embedding size for a vocabulary.
Rule of thumb is ``6 * sqrt(sqrt(vocab_size))``.
Args:
vocab_size: Size of the input vocabulary.
Returns:
The embedding size to use.
Raises:
ValueError: if ``vocab_size`` is invalid.
"""
if not vocab_size or (vocab_size <= 0):
raise ValueError("Invalid vocab_size %g." % vocab_size)
return int(round(6.0 * math.sqrt(math.sqrt(vocab_size))))
|
b3b25215655ddb6970ec3d1e9ba6555cddb2dead
| 504,202 |
def _getWordCount(start, length, bits):
"""
Get the number of words that the requested
bits would occupy. We have to take into account
how many bits are in a word and the fact that the
number of requested bits can span multipe words.
"""
newStart = start % bits
newEnd = newStart + length
totalWords = (newEnd-1) / bits
return totalWords + 1
|
a182c1fa89b3b95f1a654aeac8570eae42b88fa1
| 104,891 |
def SkipIfAnyInputIsNone(func):
"""Decorator for check objects' _Check() to skip upon any missing input.
Mark that the check should be skipped if any of its input fields are not
present in the flight log.
Args:
func: A Check's _Check() member function to apply this decorator.
Returns:
The decorator's wrapper function.
"""
def Wrapper(self, *attributes):
for attrib in attributes:
if attrib is None:
return
return func(self, *attributes)
return Wrapper
|
dbb58e52dc965d34285716133917f0bada7ab738
| 400,859 |
def _instance2sub(instance_number, total_perturbations):
"""
Converts an instance number (ii) to initial condition index (ci) and
perturbation index (pi) subscripts
instances use 1-based indexes and vary according to this function:
ii = ci * len(PERTURBATIONS) + pi + 1
where both pi and ci use 0-based indexes.
"""
perturbation_index = (instance_number - 1) % total_perturbations
initial_condition = (instance_number - 1 - perturbation_index) // total_perturbations
return initial_condition, perturbation_index
|
08acc4ab70ac62d6b801da284633c892dd798f52
| 511,065 |
def MDD(series):
"""Maximum Drawdown (MDD) is an indicator of downside
risk over a specified time period.
MDD = (TroughValue – PeakValue) ÷ PeakValue
"""
trough = min(series)
peak = max(series)
mdd = (peak - trough) / peak
return round(mdd, 3)
|
71ef94e58345b24c3a64311f3652b24c8f1eeafc
| 679,692 |
async def check_health() -> str:
"""Returns ok if there is a connection to the API endpoint"""
return "Ok"
|
9830c9f6626aa2fca007bb82ca79ea2b3c0fc6ad
| 356,687 |
from typing import Union
from pathlib import Path
import logging
def prepare_file_handler(filepath: Union[str, Path]) -> logging.FileHandler:
"""Returns a :class:`~logging.FileHandler` and creates the necessary
directories on the fly, if needed.
Args:
filepath: Absolute path to the targeted logfile.
"""
logpath = Path(filepath)
if not logpath.is_absolute():
raise ValueError("Value of filepath must be an absolute path.")
if logpath.is_dir():
raise ValueError("Value of filepath must point to a file.")
logdir = logpath.parent
logdir.mkdir(exist_ok=True)
return logging.FileHandler(str(logpath))
|
9f01091921bea1c26adcd3e87a4bc8759e680f53
| 240,403 |
def AC_constraint_check(csp, x, Xi, Xj):
""" Check if all AC constraints are satisfied
Args:
csp: CSP class object with current state of the sudoku puzzle board
x: Current value evaluated at field Xi
Xi: Current field id (row + column combination)
Xj: Selected neighbor's field id (row + column combination)
Returns: Boolean whether all AC contraints are satisfied (satisfied = True; constraint violation = False)
"""
for neighbor in csp.values[Xj]:
if Xj in csp.neighbors[Xi] and neighbor != x:
return False
return True
|
1a933af5cbaab6309ddcdc92d3c5216dbe39d88c
| 201,942 |
def client(application, api_client):
"""
Client configured not to retry requests.
By default, the failed requests are retried by the api_client.
As 404 is the desired outcome of one of the tests, the client is
configured not to retry requests to avoid long time execution.
"""
# this will ensure all is up
assert application.test_request().status_code == 200
return api_client(disable_retry_status_list={404})
|
b616319c2bdaf60740795c847cc55928fd5cd55f
| 383,866 |
def get_site_len_list(len_in):
"""
Read in site lengths (one length per row from file len_in).
Return list of lengths.
"""
site_len_list = []
with open(len_in) as f:
for line in f:
site_len = line.strip()
site_len_list.append(int(site_len))
f.closed
assert site_len_list, "site_len_list empty (no lengths read in from %s)" %(len_in)
return site_len_list
|
cca7f381b30b58de675c6d5ff2094f9c2b6c23d3
| 567,721 |
def mnormalize(matrix):
"""Scale a matrix according to the value in the center."""
width = len(matrix[0])
height = len(matrix)
factor = 1.0 / matrix[int(height / 2)][int(width / 2)]
if 1.0 == factor:
return matrix
for i in range(height):
for j in range(width):
matrix[i][j] *= factor
return matrix
|
287195485dd393e366c31df30dd2b6db5457088f
| 435,099 |
def convert_dt_to_utc_epoch(dt):
"""Convert datetime to UTC epoch seconds.
Note that the timestamp method assumes that an offset-naive
datetime instance is in the local timezone and converts its
offset to UTC before making it a floating point number.
"""
return dt.timestamp()
|
7ad595858948913f5ee4020222e4c7db2d7533b6
| 299,107 |
def get_function_at(bv, addr):
"""Gets the function that contains a given address, even if that address
isn't the start of the function"""
functions = bv.get_functions_containing(addr)
return functions[0] if (functions is not None and len(functions) > 0) else None
|
e12eaa0d504fb80193cbb8e2394afa0ae6a6cf38
| 503,860 |
def set_to_list(setstring, delimiter="|", affix="|"):
"""Turn a set string into a list."""
if setstring == affix:
return []
setstring = setstring.strip(affix)
return setstring.split(delimiter)
|
d7ede5607107a3e63ba9a13cb7011e49bde12933
| 40,791 |
def is_subdomain(x, y):
""" Returns True if x is a subdomain of y, otherwise return False.
Example:
is_subdomain('pajlada.se', 'pajlada.se') = True
is_subdomain('test.pajlada.se', 'pajlada.se') = True
is_subdomain('test.pajlada.se', 'pajlada.com') = False
"""
if y.startswith('www.'):
y = y[4:]
return x.endswith('.' + y) or x == y
|
57254825a6bebb860066cff38e3c7fce38eed2b7
| 491,893 |
def tiempo_a_segundos(dias: int, horas: int, mins: int, seg: int) -> int:
""" Unidades de tiempo a segundos
Parámetros:
dias (int): Número de dias del periodo de tiempo
horas (int): Número de horas del periodo de tiempo
mins (int): Número de minutos del periodo de tiempo
seg (int): Número de segundos del periodo de tiempo
Retorno:
int: Número de segundos al que equivale el periodo de tiempo dado como parámetro
"""
dias_a_horas = dias * 24
horas_a_minutos = (dias_a_horas + horas) * 60
minutos_a_segundos = (horas_a_minutos + mins) * 60
segundos_totales = minutos_a_segundos + seg
return int(segundos_totales)
|
bba94ec8128638e6aae96e932003a019b8ce0b5f
| 123,756 |
def tidy_split(df, column, sep='|', keep=False):
"""
Split the values of a column and expand so the new DataFrame has one split
value per row. Filters rows where the column is missing.
Params
------
df : pandas.DataFrame
dataframe with the column to split and expand
column : str
the column to split and expand
sep : str
the string used to split the column's values
keep : bool
whether to retain the presplit value as it's own row
Returns
-------
pandas.DataFrame
Returns a dataframe with the same columns as `df`.
"""
indexes = list()
new_values = list()
df = df.dropna(subset=[column])
for i, presplit in enumerate(df[column].astype(str)):
values = presplit.split(sep)
if keep and len(values) > 1:
indexes.append(i)
new_values.append(presplit)
for value in values:
indexes.append(i)
new_values.append(value)
new_df = df.iloc[indexes, :].copy()
new_df[column] = new_values
return new_df
|
5a3d4651d9feb99ed9b240cb0d53d8217c3c8021
| 589,292 |
import typing
import pathlib
def is_path_obj(obj: typing.Any) -> bool:
"""Is given object 'input' a pathlib.Path object?
:param obj: a pathlib.Path object or something
:return: True if 'obj' is a pathlib.Path object
>>> obj = pathlib.Path(__file__)
>>> assert is_path_obj(obj)
>>> assert not is_path_obj(__file__)
"""
return isinstance(obj, pathlib.Path)
|
178342dc5301d2440b78411a2737f889b49c73f9
| 587,761 |
def import_coldcard_wif(filename):
"""Reads a exported coldcard Wif text file and returns the WIF and used path"""
next_var = ""
import_password = ""
path = ""
with open(filename) as fp:
for line in fp:
if line.strip() == "":
continue
if line.strip() == "WIF (privkey):":
next_var = "wif"
continue
elif "Path Used" in line.strip():
next_var = "path"
continue
if next_var == "wif":
import_password = line.strip()
elif next_var == "path":
path = line
next_var = ""
return import_password, path.lstrip().replace("\n", "")
|
1ab017a3ee2053821bef4b0ddc6f1710e47e5450
| 555,434 |
def string_to_boolean(string):
"""Converts input string of True or False to a boolean True or False"""
string = string.lower().strip()
true_strings = ["true", "t"]
false_strings = ["false", "f"]
if string in true_strings: return True
elif string in false_strings: return False
raise ValueError("Bad Boolean Value: " + string)
|
6e9aacfe7a4fbd36875c0b047b830ebc359a8a95
| 393,276 |
def number2binary(v, dynamic_padding=False, padded_length=None):
""" Convert an integer value to the equivalent string of 1 and 0 characters. """
s = ""
while v:
s = [ "0", "1" ][v & 1] + s
v >>= 1
if dynamic_padding:
w = 4
while w < len(s):
w <<= 1
else:
w = len(s) if padded_length is None else padded_length
return "0"*(w-len(s)) + s
|
2bf7a334f5c38be6c827254d7a3c3875b28c8a9f
| 687,305 |
def ordered_pair(a, b):
"""Returns (a, b) if a < b and (b, a) otherwise
"""
if a > b:
return b, a
return a, b
|
fb80c4dd2b8c2321610384ee153b5833db81eaa0
| 351,885 |
def determine_format(request, serializer, default_format='application/json'):
"""
This overrides the default tastyie determine_format.
This is done because we want to default to "application/json"
even though most browsers will send along "application/xml" in the
Accept header if no "format" is provided.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param serializer: The serializer being used.
:type serializer: :class:`crits.core.api.CRITsSerializer`
:param default_format: The format to respond in.
:type default_format: str
:returns: str
"""
# First, check if they forced the format.
if request.GET.get('format'):
if request.GET['format'] in serializer.formats:
return serializer.get_mime_for_format(request.GET['format'])
if request.GET.get('file'):
default_format = 'application/octet-stream'
# No valid 'Accept' header/formats. Sane default.
return default_format
|
39225c3c60d8b787a953f6c0ab6ddf1e1e7bbdc8
| 248,878 |
def _GetEnvironmentVars(benchmark_spec):
"""Return a string containing HPCG-related environment variables.
Args:
benchmark_spec: benchmark spec
Returns:
string of environment variables
"""
return ' '.join([
'NUM_GPUS=%s' % benchmark_spec.total_gpus,
'OMP_NUM_THREADS=%s' % benchmark_spec.cpus_per_rank
])
|
60d968b924376f738f0615638e50c71b0c66c09d
| 575,679 |
import yaml
def dict_to_yaml(dict_data):
"""Return YAML from dictionary.
:param dict_data: Dictionary data
:type dict_data: dict
:returns: YAML dump
:rtype: string
"""
return yaml.dump(dict_data, default_flow_style=False)
|
eba7896f63d499ef6c55b057320933dc0ff86333
| 10,536 |
from datetime import datetime
def sqltime(timestamp = None):
"""
Convert the Unix-Timestamp to a SQL datetime
If no timestamp given, returns the current datetime
"""
if timestamp:
return datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S')
else:
return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
de23002d7e75434444857cdb278082866932bdb4
| 212,233 |
def split(versionstring):
"""Split the version string 'X.Y.Z' and return tuple (int(X), int(Y), int(Z))"""
assert versionstring.count('.') == 2, "Version string must be of the form str('X.Y.Z')"
return tuple([int(x) for x in versionstring.split('.')])
|
10b37f025b2ed06e16ee37e6ef165887cf2abef0
| 533,176 |
def dict_extremum(data: dict, type=0) -> tuple:
"""
找到dict中value的极值,并返回key,若极值有多个,则返回多个key
Args:
data: dict数据,且value必须为数值类型
type: 1表示执行最大值操作,0表示最小值操作
Returns:
极值,键的列表
"""
values = list(data.values())
if type == 1:
# 极大值
ex = max(values)
elif type == 0:
# 极小值
ex = min(values)
else:
raise Exception("The value of 'type' should only be 0 or 1.")
# 拿到所有的key
keys_ = [k for k, v in data.items() if v == ex]
return ex, keys_
|
dd04c3bf7d93947db43780329519e432d277e5ad
| 211,292 |
import random
import math
def scheduleGamesForPlayer(nPlayers, pIndex):
""" Given the total number of players (nPlayers), and the index of the current
player (pIndex), returns a list of games scheduled against the player such
that pIndex will play against each player (including herself) at least
once, at most twice, in the least number of games possible, with no game
containing two opponents of the same type. These games occur in
randomized order (and opponent pairs are also randomized).
Games are returned as a 2D list of player indices,
result = scheduleGamesForPlayer(...)
result[0] is the first game to be played
result[0][0] is always pIndex
result[0][1] and result[0][2] are the opponents
Example usage:
> scheduleGamesForPlayer(10, 1)
produces 5 games as follows:
> [[1, 0, 4], [1, 5, 9], [1, 3, 7], [1, 8, 2], [1, 1, 6]]
"""
playerList = [i for i in range(nPlayers)]
random.shuffle(playerList)
nBasicGames = int(math.floor(nPlayers / 2))
schedule = []
for i in range(nBasicGames):
schedule.append([pIndex, playerList[i*2], playerList[i*2+1]])
# If number of players is odd, we schedule one extra game with the
# remaining opponent and a randomly selected player. The additional
# opponent cannot be the same as this last outstanding opponent.
if nPlayers % 2 == 1:
r = random.randint(0, nPlayers-2)
if r == playerList[-1]:
r += 1
schedule.append([pIndex, playerList[-1], r])
return schedule
|
80e24d5207359332e41353576aa796e5a6086d8d
| 564,125 |
def sort_dict(d: dict) -> dict:
"""Returns a dict, sorted alphabetically by its keys."""
return {key: d[key] for key in sorted(d)}
|
d9a6af14ce01214ed43d03a00fedd4490e6a1cd4
| 323,969 |
from math import floor
def get_suffix(exp):
""" Returns a human-friendly resistor suffix """
suffix = floor(exp / 3)
ret = ''
if suffix == -1:
ret += 'm'
elif suffix == 0:
pass
elif suffix == 1:
ret += 'k'
elif suffix == 2:
ret += 'M'
elif suffix == 3:
ret += 'G'
else:
ret += '* 10^{} '.format(exp)
return ret + 'Ohm'
|
8218d9b6a9b5f4f7ca80ba1261e977044338c16b
| 538,269 |
import copy
def make_all_strokes_dashed(svg, unfilled=False):
"""
Makes all strokes in the SVG dashed
:param svg: The SVG, in xml.etree.ElementTree format
:param unfilled: Whether this is an unfilled symbol
:return: The resulting SVG
"""
stroke_elements = [ele for ele in svg.findall('.//*[@stroke]') if ele.attrib['stroke'] != 'none']
if not unfilled:
stroke_elements_with_dash = [copy.deepcopy(ele) for ele in stroke_elements]
for ele in stroke_elements:
ele.attrib['stroke'] = '#ffffff'
for ele in stroke_elements_with_dash:
ele.attrib['stroke-dasharray'] = '2 2'
svg.extend(stroke_elements_with_dash)
else:
for ele in stroke_elements:
ele.attrib['stroke-dasharray'] = '2 2'
return svg
|
fe1a0e6aaf72ec53edfd93da948ae953a3e7ae3c
| 686,871 |
def __partition3way__(arr, lo, hi):
"""
Function to achieve 3-way partitioning for quicksort
1. Start with 3 pointers (lt, i, gt), choose first element (lo) as pivot
2. Invariant: everything to the
(a) left of lt is less than pivot
(b) between lt and i is equal to pivot
(c) right of gt is larger than pivot
3. In each iteration, while i<gt do from left to right :
(a) if arr[i] < arr[lo] exchange arr[lt] and arr[i], increment lt and i
(b) if arr[i] == arr[lo] increment only i
(b) if arr[i] > arr[lo] exchange arr[gt] and arr[i], decrement gt
4. Return lt, gt
"""
if hi <= lo:
return lo, hi
# Define lt and gt pointers
lt = lo
i = lt + 1
gt = hi
v = arr[lo]
while i <= gt:
if arr[i] < v:
arr[i], arr[lt] = arr[lt], arr[i]
i += 1
lt += 1
elif arr[i] > v:
arr[i], arr[gt] = arr[gt], arr[i]
gt -= 1
else:
i += 1
return lt, gt
|
7d51e71ab9b98803d7ac75cf91b5c8f08eafc15a
| 603,031 |
def match_all(value):
"""Validator that matches all values."""
return value
|
b6ef320b3d6efff6b87e823bf767a258220cb600
| 265,690 |
def jaccard_score(list1, list2):
"""
Jaccard similarity score
The Jaccard coefficient measures similarity between finite sample sets, and
is defined as the size of the intersection divided by the size of the union
of the sample sets
https://en.wikipedia.org/wiki/Jaccard_index
:param input_gene_list:
:param background_gene_list:
:return:
"""
N = len(set(list1).intersection(list2))
D = len(set(list1).union(list2))
if D == 0:
return 0
else:
return 1.0*N/D
|
cdd033b7ebae87c3e85c0ed26e69369e7a7a1239
| 339,889 |
import torch
def convert_to_one_hot(
targets: torch.Tensor,
num_class: int,
label_smooth: float = 0.0,
) -> torch.Tensor:
"""
This function converts target class indices to one-hot vectors,
given the number of classes.
Args:
targets (torch.Tensor): Index labels to be converted.
num_class (int): Total number of classes.
label_smooth (float): Label smooth value for non-target classes. Label smooth
is disabled by default (0).
"""
assert (
torch.max(targets).item() < num_class
), "Class Index must be less than number of classes"
assert 0 <= label_smooth < 1.0, "Label smooth value needs to be between 0 and 1."
non_target_value = label_smooth / num_class
target_value = 1.0 - label_smooth + non_target_value
one_hot_targets = torch.full(
(targets.shape[0], num_class),
non_target_value,
dtype=torch.long if label_smooth == 0.0 else None,
device=targets.device,
)
one_hot_targets.scatter_(1, targets.long().view(-1, 1), target_value)
return one_hot_targets
|
052f6415f30f3ac60c704a448be3d099c3dcef83
| 178,237 |
def apply_filter(d, fos, y):
"""
This function is used to apply all the useful filers
:param y: year
:param d: jason as a dictionary
:param fos: field of study
:return: the result of the filer
"""
if "paperAbstract" in d.keys() and \
"title" in d.keys() and \
"fieldsOfStudy" in d.keys() and \
"sources" in d.keys() and \
"year" in d.keys():
return len(d["paperAbstract"]) > 0 and "DBLP" in d["sources"] and fos in d["fieldsOfStudy"] and d["year"] > y
return False
|
3c952ff21b0a5f3f6950435ef609e0afc683fa64
| 274,073 |
def sphinx_doi_link(doi):
"""
Generate a string with a restructured text link to a given DOI.
Parameters
----------
doi : :class:`str`
Returns
--------
:class:`str`
String with doi link.
"""
return "`{} <https://dx.doi.org/{}>`__".format(doi, doi)
|
561d1892361865786f47a9a1d3f8e18f6475da77
| 103,614 |
def all_combos(list_of_combos: list):
""" returns all possible combinations in a list of lists
e.g. [[1, 2], [3, 4]] -> ["1 3", "1 4", "2 3", "2 4"]
"""
result = []
# find the longest list in list_of_combos
longest = max(list_of_combos, key=len)
for i in range(len(longest) * (len(list_of_combos) - 1)):
result.append([])
for i in list_of_combos:
iterator = 0
# get all combinations of the list
for j in i:
# add to the result
for k in range(round(len(longest) / len(i))):
result[iterator].append(j)
iterator += 1
return result
|
adff1682a2447be00429e6c7bc909ae1d24769d3
| 55,681 |
import requests
def fetch_remote_job(job_system_url, job_id):
"""
Helper funtion to fetch remote jobs.
:param job_system_url: url to the job system
:param job_id: only fetch the job whith this ID
:returns: a job, raises an HTTPError otherwise
"""
response = requests.get(
"%s/api/jobs/%s" % (job_system_url, job_id),
headers={'content-type': 'application/json'}
)
response.raise_for_status()
return dict(
response.json(),
**{
'etag': response.headers['ETag'],
'last_modified': response.headers['Last-Modified']
}
)
|
d738c9a41686cab14a04b15eedcbe8bb7b57df5c
| 353,632 |
def _escaped_str(text: str) -> str:
"""Escape the text and returns it as a valid Golang string."""
return '"{}"'.format(
text.replace('\\', '\\\\').replace('"', '\\"').replace('\a', '\\a').replace('\f', '\\f').replace('\t', '\\t')
.replace('\n', '\\n').replace('\r', '\\r').replace('\v', '\\v'))
|
d3e2c3e949e45d5e7d7faf431c2e6318c5d217cf
| 362,332 |
import heapq
def dijkstra(graph, source, target):
"""
Finds the shortest path and shortest distance from source to target
:param graph: Dictionary linking the vertices using list of tuples
:param source: The vertex to start the path.
:param target: The vertex to end the path.
:return: shortest path and distance from source to target
"""
INF = float('Inf')
predecessors = {x: x for x in graph}
distances = {x: INF for x in graph}
distances[source] = 0
temp = []
heapq.heappush(temp, [source, distances[source]])
while temp:
u = heapq.heappop(temp)
u_dist = u[1]
u_idx = u[0]
if u_dist == distances[u_idx]:
for v in graph[u_idx]:
v_idx = v[0]
u2v = v[1]
if distances[u_idx] + u2v < distances[v_idx]:
distances[v_idx] = distances[u_idx] + u2v
heapq.heappush(temp, [v_idx, distances[v_idx]])
predecessors[v_idx] = u_idx
if distances[target] == INF:
return None, None
else:
path = []
vertex = target
while True:
path.append(str(vertex))
if vertex == predecessors[vertex]:
break
vertex = predecessors[vertex]
return path[::-1], distances[target]
|
27df82387ff3139941f1c8d2b27a7e294e52e0c8
| 622,460 |
def get_output_shape(data):
"""Returns output shapes."""
return (3,)
|
e418bbf96679dd37856c8734a3f095d691667ced
| 575,363 |
def is_valid_hgt(value: str) -> bool:
"""
Return if value is a valid hgt (height).
Parameter
---------
value: str
a height in 'cm' or 'in'.
Return
------
bool
True if height is valid, False othewise.
"""
height, unit = value[:-2], value[-2:]
if not all((char.isdigit() for char in height)):
return False
if unit == "in":
return 59 <= int(height) <= 76
if unit == "cm":
return 150 <= int(height) <= 193
return False
|
f4ef47e4670f0922af508b91185d7a8636a52146
| 190,674 |
def get_dependency_links(filename):
"""
Parse a requirements pip file looking for the --find-links directive.
Args:
filename: pip requirements requirements
Returns:
list of find-links's url
"""
with open(filename) as file:
parsed_requirements = file.read().splitlines()
dependency_links = list()
for line in parsed_requirements:
line = line.strip()
if line.startswith('--find-links'):
dependency_links.append(line.split('=')[1])
return dependency_links
|
ac95b2117ea603f95a728559ff9b843dd682780e
| 562,807 |
def parse_requirements(file_text):
"""
Parse a python requirements.txt string into a list of package names
:param file_text: requirements.txt text
:return: List of package names
"""
lines = file_text.split("\n")
packages = []
for line in lines:
if len(line) == 0:
continue
packages.append(line.split("=="))
return packages
|
254cf7a92a30b4b9004c93510313b455f806ee3f
| 272,013 |
def launch_coef_scores(args):
"""
Wrapper to compute the standardized scores of the regression coefficients, used when computing the number of
features in the reduced parameter set.
@param args: Tuple containing the instance of SupervisedPCABase, feature matrix and response array.
@return: The standardzed scores of the coefficients.
"""
spca, X, y = args
scoefs = spca._compute_stnd_coefs(X, y)
return scoefs
|
02423ef564b55dfcc37bddadcc813edffba05795
| 704,786 |
def _estimate_num_tokens(sentence):
"""Estimates the number of tokens a sentence may have."""
return len(sentence.split())
|
50d73d700e72cc17cc437c154d70c0c5a949889a
| 285,523 |
def getPixelFromLabel(label):
"""
Function to get the pizel from the class label. This reverse mapping is use to generate an image file fom available class labels.
:param label: class label
:type label: int
:return: (r,g,b) equivalent of class label color.
:rtype: tuple
"""
if label == 0:
return (0, 255, 0)
if label == 1:
return (255, 0, 0)
else:
return (255, 255, 153)
|
746ae72f4adff1615f399ff5b591b7013ebdce46
| 35,611 |
def groupOp(pair1, pair2, n1, n2):
"""Apply group operation to two pairs:
(g1,h1) x (g2, h2)
G beeing multiplicative, H additive, thus
(g1*g2 , h1+h2)"""
return ((pair1[0] * pair2[0]) % n1, (pair1[1] + pair2[1]) % n2)
|
85ad96a29e028545bf63a22a8c9a1d9aaf7404d8
| 304,285 |
def inflate_cost(raw_cost, current_cpi, cpi_time_variant):
"""
Calculate the inflated cost associated with raw_cost, considering the current CPI and the CPI corresponding
to the date considered (cpi_time_variant).
Returns:
The inflated cost
"""
return raw_cost * current_cpi / cpi_time_variant
|
bf3b495abffb9a9002a069180eca8fb49d605f28
| 383,813 |
def _get_class_name(obj):
"""Return a name which includes the module name and class name."""
class_name = getattr(obj, '__name__', obj.__class__.__name__)
module_name = obj.__module__
if module_name is not None:
return '{}.{}'.format(module_name, class_name)
return class_name
|
3e9ad769fe3a3264a1eba30da4ef0039d2be50b9
| 609,389 |
from typing import List
def get_indices(text: str, sub: str) -> List[str]:
"""Gets all the indexes of occurrence of 'sub' in 'text'"""
indices = []
processed_chars = 0
working_part = text
while sub in working_part:
idx = processed_chars + working_part.index(sub)
indices.append(idx)
processed_chars = idx + len(sub)
working_part = text[processed_chars:]
return indices
|
2f44044e0f764986888ae9a62fdf60b5d80f4ef3
| 392,028 |
from pathlib import Path
def rglob(path, glob):
"""Return list of paths from `path` matching `glob`."""
p = Path(path)
return list(map(str, p.rglob(glob)))
|
3db756de57e2aea0a45239f2db1eb9eca92d184e
| 397,255 |
def inGigabytes(nbytes):
"""Convert bytes to gigabytes"""
return nbytes / (1024. ** 3)
|
6389c8f42a4c6f3bdb51e078e4d6ea987d80f409
| 292,007 |
def pop_stream(cache, user_id):
"""
Pop an item off the stack in the cache. If stack
is empty after pop, it deletes the stack.
:param cache: werkzeug BasicCache-like object
:param user_id: id of user, used as key in cache
:return: top item from stack, otherwise None
"""
stack = cache.get(user_id)
if stack is None:
return None
result = stack.pop()
if len(stack) == 0:
cache.delete(user_id)
else:
cache.set(user_id, stack)
return result
|
7b644493cb3b86720cb79bbf9954535ce0f2337e
| 217,333 |
from datetime import datetime
def format_modified_time(entry):
"""Formats the modified time (seconds since epoch) for display on screen"""
return datetime.fromtimestamp(entry['mtime']).strftime('%b %-d %H:%M')
|
51589bae554732f89bf24bfd0f70aa1e57eb7a9d
| 648,776 |
from typing import List
def argmax(array: List[float]) -> int:
"""Simple argmax implementation for lists of floats."""
index, value = max(enumerate(array), key=lambda x: x[1])
return index
|
30ac274ff31c8e0da83d07cca9d48c17a1d08b2c
| 646,704 |
import torch
def generate_all_anchors(anchors, H, W):
"""
Generate dense anchors given grid defined by (H,W)
Arguments:
anchors -- tensor of shape (num_anchors, 2), pre-defined anchors (pw, ph) on each cell
H -- int, grid height
W -- int, grid width
Returns:
all_anchors -- tensor of shape (H * W * num_anchors, 4) dense grid anchors (c_x, c_y, w, h)
"""
# number of anchors per cell
A = anchors.size(0)
# number of cells
K = H * W
shift_x, shift_y = torch.meshgrid([torch.arange(0, W), torch.arange(0, H)])
# transpose shift_x and shift_y because we want our anchors to be organized in H x W order
shift_x = shift_x.t().contiguous()
shift_y = shift_y.t().contiguous()
# shift_x is a long tensor, c_x is a float tensor
c_x = shift_x.float()
c_y = shift_y.float()
centers = torch.cat([c_x.view(-1, 1), c_y.view(-1, 1)], dim=-1) # tensor of shape (h * w, 2), (cx, cy)
# add anchors width and height to centers
all_anchors = torch.cat([centers.view(K, 1, 2).expand(K, A, 2),
anchors.view(1, A, 2).expand(K, A, 2)], dim=-1)
all_anchors = all_anchors.view(-1, 4)
return all_anchors
|
a7a7a09eca5bbd0a4de8874d3d056651455c47ac
| 640,409 |
def escape_quotes(text):
"""Escape single and double-quotes for JavaScript"""
return text.replace("'", r"\'").replace('"', r'\"')
|
a831a1bbfb307301f58847ce747638d22183585a
| 252,128 |
def req_version(req):
"""Builds the version string for the requirement instance
:param req: requirement object
:returns: the version in desired format
:rtype: string or NoneType
"""
return ''.join(req.specs[0]) if req.specs else None
|
54628c3a1c402e4a73df286f00b77ccdb727329c
| 596,932 |
import socket
def _create_socket_address(ip_address, port):
"""Convert a given IPv6 address (string) and port number into a socket address"""
# `socket.getaddrinfo()` returns a list of `(family, socktype, proto, canonname, sockaddr)` where `sockaddr`
# (at index 4) can be used as input in socket methods (like `sendto()`, `bind()`, etc.).
return socket.getaddrinfo(ip_address, port)[0][4]
|
5f79cdaa98885a97f5d119c6031c935013b3eeae
| 56,033 |
def get_gqx(record, sample):
"""
Get GQX value from a pyvcf record
:param record: an instance of a pyvcf Record
:param sample: sample name
:return: float
"""
fmt = record.genotype(sample)
if hasattr(fmt.data, "GQ") and record.QUAL:
return min(float(fmt.data.GQ), record.QUAL)
elif hasattr(fmt.data, "GQ"):
return float(fmt.data.GQ)
elif record.QUAL:
return record.QUAL
else:
return None
|
150b9f9131b2be9b2701734b459fc20c0fe739a7
| 154,113 |
def PyEval_GetBuiltins(space):
"""Return a dictionary of the builtins in the current execution
frame, or the interpreter of the thread state if no frame is
currently executing."""
caller = space.getexecutioncontext().gettopframe_nohidden()
if caller is not None:
w_globals = caller.get_w_globals()
w_builtins = space.getitem(w_globals, space.newtext('__builtins__'))
if not space.isinstance_w(w_builtins, space.w_dict):
w_builtins = w_builtins.getdict(space)
else:
w_builtins = space.builtin.getdict(space)
return w_builtins # borrowed ref in all cases
|
1f2cf2f807c3ed6a73183481bb08452f84c92bdc
| 690,012 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.