content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def sum_of_numbers(list_of_numbers):
"""Returns sum of the numbers in the list."""
return sum(list_of_numbers)
|
aa2b1e54b79d3082541ec64ff7c82077bcef1f6e
| 89,204 |
def legendre_symbol(a, p):
"""
Computes the Legendre symbol
:param a: number
:param p: prime number
:return: Returns 1 if `a` has a square root modulo p, -1 otherwise.
"""
ls = pow(a, (p - 1) // 2, p)
if ls == p - 1:
return -1
return ls
|
8a0238b2a4e89c36b1bd0f6a5a99d656c3a52eab
| 49,141 |
def EdgeTouchesMeridian(a, b, longitude):
"""Determines whether the edge from a to b touches a given meridian."""
# To ensure that our crossing counts have the correct parity, we include the
# left endpoint but not the right endpoint.
low, high = min(a.lon, b.lon), max(a.lon, b.lon)
if high - low >= 180: # edge crosses the 180-degree meridian
return longitude >= high or longitude < low
return low <= longitude < high
|
14f001737b8deb3678af733e173a02c030070f0d
| 273,051 |
def sniff_encoding(path):
"""Attempt to sniff and return the file's encoding."""
encodings = ['utf-8', 'latin-1']
for encoding in encodings:
try:
f = open(path, encoding=encoding)
f.read()
f.close
return encoding
except ValueError:
continue
return None
|
ed214cc618cb4e3cd0e802f3d51674b54270437d
| 175,949 |
def concatOverlapPep(peptide, j, prefixPeptide):
"""
Called by self.createOverlap(), this function takes two peptides which have an identical prefix/suffix sequence
and combines them around this like sequence. Eg: ABCDE + DEFGH = ABCDEFGH
:param peptide: the peptide with matching suffix sequence
:param j: the length of the matching suffix
:param prefixPeptide: the peptide with matching prefix seqeunce.
:return concatPep: the peptide resulting from concatenation around the matching prefix/suffix.
"""
concatPep = peptide[0:j] + prefixPeptide
return concatPep
|
84cae79adcc1d6b4eeef8e8d679c963f925f3efa
| 592,703 |
def get_properties(dataset, property_offsets=None):
"""
Extract properties and return values in a dictionary:
{
'properties':
{
'datetime': time,
'odc:creation_datetime': creation_time,
...
}
}
"""
props = dict()
props['datetime'] = dataset.center_time
props['odc:processing_datetime'] = dataset.indexed_time
return {'properties': props}
|
b525a6eb950a1763f1eb591e9b47d18ce3305cb9
| 669,177 |
def left_rotate(value, shift):
"""Returns value left-rotated by shift bits. In other words, performs a circular shift to the left."""
return ((value << shift) & 0xffffffff) | (value >> (32 - shift))
|
243c7eb2bc6bf558f65d98935f5708cbef3cdbd9
| 437,094 |
import re
def process_text(text):
"""
This method is responsible for performing all pre-processing steps for the text.
It converts text to all lower case and performs some basic cleanup using regex.
"""
text = text.encode('ascii', errors='ignore').decode()
text = text.lower()
text = re.sub(r'http\S+', ' ', text)
text = re.sub(r'#+', ' ', text )
text = re.sub(r'@[A-Za-z0-9]+', ' ', text)
text = re.sub(r"([A-Za-z]+)'s", r"\1 is", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"won't", "will not ", text)
text = re.sub(r"isn't", "is not ", text)
text = re.sub(r"can't", "can not ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub('\W', ' ', text)
text = re.sub(r'\d+', ' ', text)
text = re.sub('\s+', ' ', text)
text = text.strip()
return text
|
a49fe0861b00f7e92612cc265c5c19dfd935ecb1
| 629,767 |
def mapProteinsContigs(listProteins, listContigs):
"""
map the list of proteins with the contig into two dictionaries.
:param listProteins: List of all the proteins of the bacteriophage
:param listContigs: List of all the contigs of the bacteriophage
:type listProteins: List[Protein]
:type listProteins: List[Contig]
:return: two dictionaries with the proteins and contigs mapped
:rtype dictProteins: dictionary{int: list[Proteins]}
:rtype dictContigs: dictionary{int: Contig}
"""
dictProteins = {}
dictContigs = {}
for element in listProteins:
if element.fk_id_contig in dictProteins.keys():
dictProteins[element.fk_id_contig].append(element)
else:
dictProteins[element.fk_id_contig] = [element]
for element in listContigs:
dictContigs[element.id_contig] = element
return dictProteins, dictContigs
|
a2a3f8be075ac3bcc2dce3edcee534790b18d5a1
| 230,616 |
def get_metric(report, metric_uuid: str):
"""Return the metric with the specified uuid."""
return [subject["metrics"][metric_uuid] for subject in report["subjects"].values()
if metric_uuid in subject["metrics"]][0]
|
edc36bd26d69cb9230daffff9ca75b4306656727
| 589,571 |
def PVI(ax,ay,az,lag):
"""
Routine to calculate the Partial Variance Increment between three time series, normalized
Input:
ax: series 1
ay: series 2
az: series 3
lag: The number of points to shift each of the series ax, ay, and az
Output:
mag: normalized pvi
"""
ax = ax.copy()
ay = ay.copy()
az = az.copy()
# take the three factors of mag_field and find the derivatives
dax=ax.shift(-lag)-ax
day=ay.shift(-lag)-ay
daz=az.shift(-lag)-az
# calc the magnitude of the derivatives of mag_field and return the pvi
mag = dax.pow(2)+day.pow(2)+daz.pow(2)
return (mag.div(mag.mean())).pow(.5)
|
a21e00cb2501f99c42404c430f3f45b6d42aaf95
| 502,879 |
def gcd(a, b):
""" Find greatest common divisior using euclid algorithm."""
assert a >= 0 and b >= 0 and a + b > 0
while a > 0 and b > 0:
if a >= b:
a = a % b
else:
b = b % a
return max(a, b)
|
33be26cdb143df526ade519a0f0e3a216e9d3529
| 63,195 |
def _get_blogname_from_payload(post_payload):
"""retrieves payload --> broken_blog_name, or payload --> blog --> name"""
if 'broken_blog_name' in post_payload:
return post_payload['broken_blog_name']
return post_payload['blog']['name']
|
846f64ddfb814c9eecf286fee13d838fb85d8543
| 139,689 |
def middle(items: list):
"""Return middle item in list."""
length = len(items) // 2
return items[length]
|
0ffa955a599068de5044e09d41d3dea58f2bc368
| 233,473 |
import re
def slugify(text):
"""Create a good-enough slug for the given text"""
return re.sub(r'\W', '-', text).lower()
|
9e53cfe861ef01dd7cdd288de774b02a1e9f1c4e
| 599,061 |
def get_num_days_between(day1: int, day2: int) -> int:
"""Return number of days between two days as their number in week"""
one_week = 7
return day2 - day1 if day1 <= day2 else day2+one_week - day1
|
98a9e2d71f5d19216d05dc0195cb85e462641799
| 124,008 |
def to_celsius(kelvin_temp):
"""convert kelvin to celsius."""
return (kelvin_temp - 273.15)
|
5de90e70471942db5dc0517b7ba4fb6b246e75f6
| 558,632 |
def _get_all_nearest_neighbors(method, structure):
"""Get the nearest neighbor list of a structure
Args:
method (NearNeighbor) - Method used to compute nearest neighbors
structure (IStructure) - Structure to study
Returns:
Output of `method.get_all_nn_info(structure)`
"""
return method.get_all_nn_info(structure)
|
5e1e33c7b06951933d8603a75006c6895b742293
| 16,345 |
def process_views(df):
"""Takes a dataframe and quits commas.
Parameters
----------
df :
The dataframe to search.
Returns
-------
The dataframe processed.
"""
df['views'] = df['views'].str.replace(',', '').astype(float)
return df
|
233a27ea67c7205cb1c7cedaf789ac70fbc91cee
| 650,146 |
def dumb_indent(numspaces, text):
"""
Indents input text by specified number of spaces.
"""
lines = text.splitlines(True)
out = ""
for line in lines:
for i in range(0, numspaces):
out += " "
out += line
return out
|
d45643182c41225652e6786557635515cd3056f8
| 481,906 |
def find_n_max_vals(list_, num):
"""Function searches the num-biggest values of a given list of numbers.
Returns the num maximas list and the index list wrapped up in a list.
"""
li_ = list_.copy()
max_vals = [] #the values
max_ind = []# the index of the value, can be used to get the param
while num > 0:
max_val = max(li_)
max_id = li_.index(max_val)
max_vals.append(max_val)
max_ind.append(max_id)
li_[max_id] = 0 #better than deleting
num -= 1 # count down
return [max_vals, max_ind]
|
48e274a2e2feac04b285b883ce5948c8f39caff3
| 6,903 |
def _invert_permutation(perm):
"""Calculate invert permutation."""
out = [0] * len(perm)
for i, value in enumerate(perm):
out[value] = i
return tuple(out)
|
fa9b0d181aaf3bfe771e0a57358f57e087769efb
| 150,839 |
def a_record(query, ipaddr):
""" Formats an A record using fields in 'query' and ipaddr, suitable for
printing in a 'DATA' reply to pdns.
Example:
ndt.iupui.donar.measurement-lab.org IN A 60 -1 192.168.1.2\\n
"""
reply = "%(name)s\t"
reply += "%(class)s\t"
reply += "A\t"
reply += "%(ttl)s\t"
reply += "%(id)s\t"
reply += ipaddr+"\n"
return reply % query
|
1054af3028644e856b611757980365f7abb9742e
| 570,129 |
import math
def roundup(x, to):
"""Rounding up to sertain value
>>> roundup(7, 8)
>>> 8
>>> roundup(8, 8)
>>> 8
>>> roundup(9, 8)
>>> 16
:param x: value to round
:param to: value x will be rounded to
:returns: rounded value of x
:rtype: int
"""
return int(math.ceil(x / to)) * to
|
c15e4a5a751a428ee395fc96ee4845a95b3432f4
| 677,722 |
def constant_outfile_iterator(outfiles, infiles, arggroups):
"""Iterate over all output files."""
assert len(infiles) == 1
assert len(arggroups) == 1
return ((outfile, infiles[0], arggroups[0]) for outfile in outfiles)
|
1e022ef1b3f41dc3252d8b49994bd60831023ed9
| 88,055 |
import math
def quantile_index(n, quantile):
"""Returns index of the specified quantile in a sorted dataset of n elements.
Args:
n: Size of the sorted dataset.
quantile: A value in [0, 1] indicating the desired quantile.
Returns:
Index of the specified quantile. If the quantile is between points at
indices i and i+1, returns i.
"""
return int(math.floor((n - 1) * quantile))
|
b30a63adfb886e1a3b83706004a3ff731af88ea5
| 298,035 |
import warnings
def update(data):
"""Update the data in place to remove deprecated properties.
Args:
data (dict): dictionary to be updated
Returns:
True if data was changed, False otherwise
"""
updated = False
if 'include' in data:
msg = ("included configuration files should be updated manually"
" [files={0}]")
warnings.warn(msg.format(', '.join(data['include'])))
# Spack 0.19 drops support for `spack:concretization` in favor of
# `spack:concretizer:unify`. Here we provide an upgrade path that changes the former
# into the latter, or warns when there's an ambiguity. Note that Spack 0.17 is not
# forward compatible with `spack:concretizer:unify`.
if 'concretization' in data:
has_unify = 'unify' in data.get('concretizer', {})
to_unify = {'together': True, 'separately': False}
unify = to_unify[data['concretization']]
if has_unify and data['concretizer']['unify'] != unify:
warnings.warn(
'The following configuration conflicts: '
'`spack:concretization:{}` and `spack:concretizer:unify:{}`'
'. Please update manually.'.format(
data['concretization'], data['concretizer']['unify']))
else:
data.update({'concretizer': {'unify': unify}})
data.pop('concretization')
updated = True
return updated
|
2e604cde4455bb1ab784651798fb3be0cd3733db
| 13,247 |
import re
def cleanup_str(data: str) -> str:
"""
Invokes ``lower`` on thes string and removes all
characters that do not satisfy ``[a-z0-9]`` pattern.
This method is mostly used to make sure kubernetes scheduler gets
the job name that does not violate its validation.
"""
if data.startswith("-"):
data = data[1:]
pattern = r"[a-z0-9\-]"
return "".join(re.findall(pattern, data.lower()))
|
94c2f5832d30e061f82ed07e12b3e8a1abaec1af
| 163,028 |
def avg(iterable):
"""
Return the average value of an iterable of numbers
"""
# the iterable can be an iterator that gets exhausted
# while `sum` and `len` will return 0
list_copy = list(iterable)
if not list_copy:
return None
return sum(list_copy) / len(list_copy)
|
077d254eb620883c7e5a730ecb7c0a1af3378040
| 547,275 |
from operator import itemgetter
def list_unique(hasDupes):
"""Return the sorted unique values from a list"""
# order preserving
d = dict((x, i) for i, x in enumerate(hasDupes))
return [k for k, _ in sorted(d.items(), key=itemgetter(1))]
|
0ba0fcb216400806aca4a11d5397531dc19482f6
| 740 |
def strip_suffix(target, suffix):
"""
Remove the given suffix from the target if it is present there
Args:
target: A string to be formatted
suffix: A string to be removed from 'target'
Returns:
The formatted version of 'target'
"""
if suffix is None or target is None:
return target
s_len = len(suffix)
if target[-s_len:] == suffix:
return target[:-s_len]
return target
|
d2125465e690144969e093cab9333fbc90c2d557
| 292,839 |
def dm_dalton_ppm(calculated_mass, experimental_mass):
"""
Difference in dalton and ppm
Parameters
----------
calculated_mass: int
calculated mass of peptide sequence
experimental_mass: int
experimental mass of peptide sequence
Returns
-------
tuple
delta in ppm and delta in dalton
"""
dm_dalton = calculated_mass - experimental_mass
dm_ppm = (dm_dalton/calculated_mass)*1000000.0
return dm_dalton, dm_ppm
|
2260064cb4952bbffcad88b01f98dfc4185bdb63
| 390,394 |
import sqlite3
from typing import Optional
def get_previous_sha(db: sqlite3.Connection) -> Optional[str]:
"""Gets the latest inserted SHA."""
result = db.execute(
# Use ROWID as a free, auto-incrementing, primary key.
'SELECT sha FROM metric_data ORDER BY ROWID DESC LIMIT 1',
).fetchone()
return result[0] if result else None
|
6be49ec9bff0fbdd2e31f38fd9fe624762d539be
| 124,542 |
def _int_str_to_bit_str(int_str, num_bits, msb_first):
"""
Convert an integer string to num_bits bits string.
Example:
int_string_to_bit_str('0x5', 4) -> '0101'
int_string_to_bit_str('0x3', 4, msb_first=false) -> '1010'
"""
data = int(int_str, 0) # use radix 0 to automatically deduce radix
# convert to num_bits binary string with leading 0's
bit_str = format(data, f'0{num_bits}b')
return bit_str if msb_first else bit_str[::-1]
|
237b87ecf578834517b5137ed7ffbb92b284520d
| 670,205 |
def factorial(num):
"""
Find the factorial of a number
:type num: integer
:param num: The number to find the factorial for.
>>> factorial(4)
24
"""
if num == 0:
return 1
return num * factorial(num - 1)
|
1454262fc21e05d294e91aa3fba9182d4d23521d
| 643,033 |
def determinant(q_form):
"""
The determinant of a tensor, given in quadratic form
Parameters
----------
q_form : ndarray
The quadratic form of a tensor, or an array with quadratic forms of
tensors. Should be of shape (x, y, z, 3, 3) or (n, 3, 3) or (3, 3).
Returns
-------
det : array
The determinant of the tensor in each spatial coordinate
"""
# Following the conventions used here:
# http://en.wikipedia.org/wiki/Determinant
aei = q_form[..., 0, 0] * q_form[..., 1, 1] * q_form[..., 2, 2]
bfg = q_form[..., 0, 1] * q_form[..., 1, 2] * q_form[..., 2, 0]
cdh = q_form[..., 0, 2] * q_form[..., 1, 0] * q_form[..., 2, 1]
ceg = q_form[..., 0, 2] * q_form[..., 1, 1] * q_form[..., 2, 0]
bdi = q_form[..., 0, 1] * q_form[..., 1, 0] * q_form[..., 2, 2]
afh = q_form[..., 0, 0] * q_form[..., 1, 2] * q_form[..., 2, 1]
return aei + bfg + cdh - ceg - bdi - afh
|
e69720131de7005e0e6f7aefc8d1741fc6818a8c
| 648,581 |
def _feature_micro_recall(feature_stats, value_stats):
"""Computes micro-averaged recall from the supplied counts."""
num_actual_positives = feature_stats["correct"]
for value in value_stats:
num_actual_positives += value_stats[value]["fn"]
return (feature_stats["correct"] / num_actual_positives * 100.0
if num_actual_positives != 0.0 else 0.0)
|
729f9fad96049bbd8427b3900fa6c354f6b8b619
| 539,819 |
import yaml
def get_settings(filename: str = '1_settings.yaml') -> dict:
"""Return settings dictionary.
:param filename: path to yaml settings file, defaults to `1_settings.yaml`
:return: settings dictionary object
:rtype: dict
"""
return yaml.load(open(filename), Loader=yaml.FullLoader)
|
a3d29fadbc83f32c2463aad80a3731c286b5735f
| 246,017 |
def ptl2(tkl):
"""
Use this to generate the list of tokens in a form easy to copy/paste
into a test.
"""
s = ''
for x in tkl:
s += ' ( "{}", {} ), \n'.format(x.type, repr(x.attr))
s += '\n'
return s
|
086ec57962db0e1c5426a4fd7878b57abba31f5a
| 357,399 |
def ucfirst(string):
"""Clone of PHP ucfirst function, uppercase first character of string."""
return string[0].upper() + string[1:]
|
895acc51777ea3f137a3aa10af51b9372c8d046f
| 420,034 |
from typing import List
def _make_array(parts: List[str]) -> str:
"""
Utility to format an array of strings for passing to command line.
:param parts: List of strings.
:return: Formatted string.
"""
return "\"" + ",".join(parts) + "\""
|
3c364ee9b483274c2aad1f8df6afcebaabd09ed1
| 15,454 |
def encode_name(name):
"""
encode_name encodes special characters to be xml-compatible entities
"""
if name is None:
return name
return name.replace("&", "&")
|
36cf1535c0d292a0dfa0e4f02a28e050410dd13b
| 561,484 |
def hdus_consistent(a_hdulist, b_hdulist):
"""Check that two HDUs are consistent
Parameters
----------
a_hdulist, b_hdulist: pyfits.HDUList
Returns
-------
result : bool
Return True
Raises
------
RuntimeError
Will be raised if any of the following occur:
- Different number of extensions
- Corresponding extensions are of different types.
"""
# Check that there are the same number of HDUs
if len(a_hdulist) != len(b_hdulist):
raise RuntimeError('HDUs have different lengths: FITS A = %d, FITS B = %d' % \
(len(a_hdulist), len(b_hdulist)))
# Loop through the HDUs and check types
# If one is different, then abort.
for index in range(len(a_hdulist)):
if a_hdulist[index].header.get('XTENSION') != \
b_hdulist[index].header.get('XTENSION'):
raise RuntimeError('Extension %d different: FITS A = %s, FITS B = %s' % \
(index,
a_hdulist[index].header.get('XTENSION'),
b_hdulist[index].header.get('XTENSION')))
return True
|
9a3a213d2b93f540d70364f0fa43d04873c09617
| 343,574 |
import socket
def create_socket(port, host=None, connect=True):
"""Creates and returns a socket
Args:
port (int): port number to use
connect (bool, optional): whether socket should be connected (default=True)
host (str, optional): hostname to connect to (default="localhost")
Returns:
socket: socket created
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if host is None:
host = "localhost"
if connect:
s.connect((host, port))
return s
|
da1dd329599882591a8440f9b47dffc6ec83fce0
| 636,483 |
import codecs
def base64_decode(text):
"""
Base64 decodes given text.
"""
return codecs.decode(text.encode('utf-8'), "base64").decode('utf-8', 'ignore')
|
561a11568b55dc15a1e5dd3c40726f1e0243f482
| 170,141 |
def list_to_eslist(pylist):
""" Converts python list of strings to a string of that list that can be used in ES.
Args:
pylist (list): Each element is a str.
Returns:
str: A representation of the list with each item in double quotes.
"""
eslist = '[ '
for item in pylist[:-1]:
eslist += '"' + item + '", '
eslist += '"' + pylist[-1] + '" ]'
return eslist
|
f8c56db5f5d1ade84664ebe97424722f65538db7
| 460,635 |
import re
def _ParseContinue(s):
"""Parses a Unicode continuation field.
These are of the form '<Name, First>' or '<Name, Last>'.
Instead of giving an explicit range in a single table entry,
some Unicode tables use two entries, one for the first
code value in the range and one for the last.
The first entry's description is '<Name, First>' instead of 'Name'
and the second is '<Name, Last>'.
'<Name, First>' => ('Name', 'First')
'<Name, Last>' => ('Name', 'Last')
'Anything else' => ('Anything else', None)
Args:
s: continuation field string
Returns:
pair: name and ('First', 'Last', or None)
"""
match = re.match("<(.*), (First|Last)>", s)
if match is not None:
return match.groups()
return (s, None)
|
d10d081966db2bf2eea0647d1402f195280cdbbb
| 250,049 |
def get_unique(l):
""" Get unique values from list
Placed outside the class beacuse `list` conflicts our internal
method with the same name.
"""
return list(set(l))
|
6451ad4e6f8e38f7d16ef582c2dcb97dc2f6eaeb
| 583,201 |
def parse_csv_data(csv_filename: str) -> list[str]:
""" Fetch covid data from CSV.
Args:
csv_filename: Name of the CSV file to fetch data from
Returns:
list_of_lines: Data from CSV in a list, each str element being a new line of csv.
"""
list_of_lines = []
with open(csv_filename, 'r', encoding="UTF-8") as file:
# open the file specified by parameter "csv_filename" using read
for line in file: # loop through file
list_of_lines.append(line) # append data to list "list_of_lines"
return list_of_lines
|
c502002ad373f4d95997aad2a832393c1eba63b3
| 408,586 |
import pickle
def load_meta(fname, data_id=''):
"""Load a metadata file.
Parameters
----------
fname : str
Path to TFRecord folder
Returns
-------
meta : dict
Metadata file
"""
with open(fname+data_id+'_meta.pkl', 'rb') as f:
meta = pickle.load(f)
return meta
|
2f63d0f2fc1c25fbab59b3c667458e2daf397526
| 645,841 |
import hashlib
import json
def dict_hash(dictionary) -> str:
"""MD5 hash of a dictionary."""
dhash = hashlib.md5()
# We need to sort arguments so {'a': 1, 'b': 2} is
# the same as {'b': 2, 'a': 1}
encoded = json.dumps(dictionary, sort_keys=True).encode()
dhash.update(encoded)
return dhash.hexdigest()
|
04421c1a765dd946cd2fc3103c9f84a81728b330
| 451,799 |
def angle_between_bearings(bearing1:float, bearing2:float) -> float:
"""Return angle between two bearings
Parameters
----------
bearing1 : float
Bearing in degrees.
bearing2 : float
Bearing in degrees.
Returns
-------
angle
Angle in degrees.
"""
assert 0 <= bearing1 <= 360, 'bearing1 out of bounds!'
assert 0 <= bearing2 <= 360, 'bearing2 out of bounds!'
abs_diff = abs(bearing1 - bearing2)
return min(abs_diff, abs(360 - abs_diff))
|
5b07059e258bed8b2135ebf68a3c02e87bada0f2
| 173,486 |
def _convert_to_integer(srs, d):
"""Convert series to integer, given a dictionary.
Args:
srs (pd.Series): A series.
d (dict): A dictionary mapping values to integers
Returns:
pd.Series: An series with numeric values.
"""
return srs.map(lambda x: d[x])
|
3a3d3672dce1c1f919e7b0d54fbce5a015333fe2
| 328,809 |
from typing import Optional
from pathlib import Path
def is_name(path: Optional[str]) -> bool:
"""Check if the given path is a name."""
if path is None:
return False
name = str(Path(path.lower()).name)
if name == path:
return True
return False
|
80dcd26285557c614cd90ea9ff55b3f4ba349dd4
| 531,105 |
def expand_region_with_neighbors(G, region):
"""Expands a given region with its neighboring nodes according to the graph.
Args:
G: The spatial connectivity graph over the input points.
region: The set of points currently in the region.
Returns:
The expanded region.
"""
# Collect POIs neighboring a given region according to the graph
neighbors = [n for v in region for n in list(G[v]) if n not in region]
region_ext = set(region.copy())
# update region
for n in neighbors:
region_ext.add(n)
return region_ext
|
aa5651fcece359abe432ea23bfa077f796d3527c
| 171,083 |
def lemmatize_word(lm,word):
"""Lemmatizes a word using the nltk library.
Since we don't know the part of speech, this method performs 2 lemmatizations (once as a noun and once as a verb)
The verson of the word that differs from the input word is returned.
This is not always guaranteed to generate a correct answer, but it's good enough for our purposes.
"""
candidateN = lm.lemmatize(word,'n')
candidateV = lm.lemmatize(word,'v')
if candidateN == word:
return candidateV
else:
return candidateN
|
60c16e9e2c75897d9d71661b61e96ff82b8882d0
| 125,999 |
def _return_false(err: Exception):
"""An error handler that does not handle any error"""
return False
|
15cedcae32af13f4d55eaf25ab88ac39cc2ac697
| 236,648 |
def search(f):
"""Return the smallest non-negative integer x for which f(x) is a true value."""
x = 0
while True:
if f(x):
return x
x += 1
|
ef5de47cafb8fd6c6ad6e4a88d9e71ea9e42eeff
| 295,648 |
def obj_in_list_always(target_list, obj):
"""
>>> l = [1,1,1]
>>> obj_in_list_always(l, 1)
True
>>> l.append(2)
>>> obj_in_list_always(l, 1)
False
"""
for item in set(target_list):
if item is not obj:
return False
return True
|
60cc73703de07076abd1070524cad3bbce8a6572
| 351,310 |
def write_dict(data, delims="{}"):
"""Writes a formatted string from a dictionary.
The format of the output is as for a standard python dictionary,
{keyword[0]: arg[0], keyword[1]: arg[1],..., keyword[n]: arg[n]}. Note the
space after the commas, and the use of curly brackets.
Args:
data: The value to be read in.
delims: An optional string of two characters giving the first and last
character to be printed. Defaults to "{}".
Returns:
A formatted string.
"""
rstr = delims[0]
for v in data:
rstr += str(v) + ": " + str(data[v]) + ", "
rstr = rstr.strip(", ")
rstr += delims[1]
return rstr
|
b9eebf04345042fe17cc93082f82e765638438eb
| 145,770 |
import shlex
import sh
def run_command(command, **kwargs):
"""
Runs the command using sh
Args:
command (str): The command to run
Returns:
the executed sh process
"""
command_l = shlex.split(command)
return getattr(sh, command_l[0])(*command_l[1:], **kwargs)
|
b8028f973bee6f0aef859cf8080e89b5810dc82f
| 224,370 |
def get_model_pk_attr_name(model_class):
"""
Get the primary key attribute name from a Declarative model class
:param Type[DeclarativeMeta] model_class: a Declarative class
:return: str: a Column name
"""
primary_key_columns = list(filter(lambda attr_col: attr_col[1].primary_key, model_class.__mapper__.columns.items()))
if len(primary_key_columns) == 1:
return primary_key_columns.pop()[0]
elif len(primary_key_columns) < 1:
raise RuntimeError(f"Couldn't find attribute for {model_class}")
else:
raise RuntimeError("Multiple primary keys still not supported")
|
8cfaf86c3e89bac531961a65c9c482de676e89cb
| 144,369 |
from typing import Tuple
def bbox2str(bbox: Tuple[float, float, float, float]) -> str:
"""Return a string representation suited for hOCR.
:param bbox: a bounding box (left, top, right, bottom)
:return: a string representation for hOCR
"""
(x0, y0, x1, y1) = bbox
return f"bbox {int(x0)} {int(y0)} {int(x1)} {int(y1)}"
|
e98cf699a87e0d56d99f36863ced36765b23f8f9
| 228,451 |
def number(text):
"""Return floating point number."""
return float(text.replace('.', '').replace(',', '.'))
|
7224d7c5275b13f119ba94440881de995962fa2b
| 467,536 |
def db_model_repr(self):
"""Create a automatic meaningful repr for db.Model classes
Usage example:
class MyClass(db.Model):
__repr__ = db_model_repr
"""
fields = [str(x).split('.')[-1] for x in self.__table__.c]
values = ["{}={!r}".format(field, getattr(self, field)) for field in fields]
return "{}({})".format(self.__class__.__name__, ', '.join(values))
|
0bb179377326436025dbbb2dd924844577981f97
| 126,179 |
import torch
def generate_diagonal_mask(size: int) -> torch.Tensor:
"""
Generate a diagonal mask for the sequence.
The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = torch.zeros(size, size)
mask = mask.fill_diagonal_(1)
mask = mask.masked_fill(mask == 0, float(
"-inf")).masked_fill(mask == 1, float(0.0))
return mask
|
a73f71036d300125a8c590ad61c1d9b605977587
| 262,697 |
def fetch_bases(fasta, contig, start, length):
"""
Returns a subsection from a specified FASTA contig. The start coordinate is 1-based.
"""
zero_base_start = start - 1
end = zero_base_start + length
new_ref = fasta.fetch(reference=contig, start=zero_base_start, end=end)
return new_ref
|
e9ab1e986f99780de3a029f0c3ef40923173e4e1
| 114,022 |
def static_vars(**kwargs):
"""Python decorator to declare static variables on a method.
NOTE: Relies on the Python feature that allows attributes to be added to a
function. See:
http://stackoverflow.com/questions/279561/what-is-the-python-equivalent-of-static-variables-inside-a-function
for source of decorator code and additional information.
"""
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
|
0a60a8596c4a293c5f3a185fd07df890dff6a754
| 305,377 |
def cup(die1, die2):
"""Construct a cup that contains die1 and die2.
>>> cup(dice(1, 1), dice(1, 2))
[[1], [1, 2]]
"""
return [die1] + [die2]
|
64d24022791aad4f9d4d46e907623c9e9ee0b593
| 607,130 |
from functools import reduce
def sum(l):
"""
Returns the sum of the items in the container class.
This is more general than the build-in 'sum' function, because it is not specific for numbers.
This function uses the '+' operator repeatedly on the items in the contrainer class.
For example, if each item is a list, this will return the concatenation of all of them
"""
return reduce(lambda x,y: x+y, l)
|
c64bc8aec1af669af69494aa37fd515d3d7efad5
| 9,036 |
def signed_area(pr):
"""Return the signed area enclosed by a ring using the linear time
algorithm at http://www.cgafaq.info/wiki/Polygon_Area. A value >= 0
indicates a counter-clockwise oriented ring."""
xs,ys = pr.exterior.xy
xs.append(xs[1])
ys.append(ys[1])
L = len(xs)
return sum(xs[i]*(ys[i+1]-ys[i-1]) for i in range(1, L-1))/2.0
|
7ac40287c43a367b0c50731f2470b0bf05e28490
| 230,978 |
def NumToStr(number, formatString = ''):
"""Converts number to string using an optional format string."""
if formatString == '':
return str(float(number))
else:
if not formatString[0] == '%':
raise ValueError('invalid format string (first character should be %)')
fmt = '{:'+formatString[1:]+'}'
return fmt.format(float(number))
|
2baf8a6e2d0d6ec07ae42aa741acd2dd38a961ae
| 281,988 |
from typing import Dict
from typing import Any
from typing import Optional
def _add_extra_kwargs(
kwargs: Dict[str, Any], extra_kwargs: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""
Safely add additional keyword arguments to an existing dictionary
Parameters
----------
kwargs : dict
Keyword argument dictionary
extra_kwargs : dict, default None
Keyword argument dictionary to add
Returns
-------
dict
Keyword dictionary with added keyword arguments
Notes
-----
There is no checking for duplicate keys
"""
if extra_kwargs is None:
return kwargs
else:
kwargs_copy = kwargs.copy()
kwargs_copy.update(extra_kwargs)
return kwargs_copy
|
cfc4c17f608c0b7fe1ae3046dc220d385c890caa
| 708,171 |
def generateJS(tag, atr, evalString):
""" Generates js string for web element searching. """
js = """
function find_by_tag_and_attr(tag, atr, evalString) {
const elements = document.getElementsByTagName(tag);
const arrayLength = elements.length;
const results = [];
for (let i = 0; i < arrayLength; i++) {
if (elements[i].getAttribute(atr) == evalString) {
results.push(elements[i])
}
}
return results;
}
return find_by_tag_and_attr(tag="%s", atr="%s", evalString="%s");
""" % (tag, atr, evalString)
return js
|
d7655958049aa6f74c6ed65eea089a62ee296e76
| 277,232 |
def as_channel(value):
"""Always return a channel name:
.. code-block:: python
>>> print(as_channel('chan'))
#chan
>>> print(as_channel('#chan'))
#chan
>>> print(as_channel('&chan'))
&chan
"""
if not value.startswith(('#', '&')):
return '#' + value
return value
|
6bb792200d4a21a8efc1f857a585be46445bf9ee
| 337,677 |
import yarl
def forwarded_url(request) -> "yarl.URL":
"""Returns the URL with the correct protocol scheme.
Looks for the X-Forwarded-Proto header and replaces the request URL's
protocol scheme if applicable.
:param request: The request needed to build the URL.
:return: The corrected URL.
"""
if forwarded_protocol := request.headers.get("X-Forwarded-Proto"):
return request.url.with_scheme(forwarded_protocol)
else:
return request.url
|
51766a5fc22322a8b616185a933c5a9418e759e9
| 250,273 |
def filter_blue(rgb, red_upper_thresh, green_upper_thresh, blue_lower_thresh, output_type="bool",
display_np_info=False):
"""
Create a mask to filter out blueish colors, where the mask is based on a pixel being below a
red channel threshold value, below a green channel threshold value, and above a blue channel threshold value.
Args:
rgb: RGB image as a NumPy array.
red_upper_thresh: Red channel upper threshold value.
green_upper_thresh: Green channel upper threshold value.
blue_lower_thresh: Blue channel lower threshold value.
output_type: Type of array to return (bool, float, or uint8).
display_np_info: If True, display NumPy array info and filter time.
Returns:
NumPy array representing the mask.
"""
r = rgb[:, :, 0] < red_upper_thresh
g = rgb[:, :, 1] < green_upper_thresh
b = rgb[:, :, 2] > blue_lower_thresh
result = ~(r & g & b)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
return result
|
1723b498145f5c481103455bbe870876d63c0ef3
| 514,181 |
def istrue(value):
"""
Accepts a string as input.
If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
``True``.
If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
``False``.
``istrue`` is not case sensitive.
Any other input will raise a ``KeyError``.
"""
return {
'yes': True, 'no': False,
'on': True, 'off': False,
'1': True, '0': False,
'true': True, 'false': False,
}[value.lower()]
|
b1b14852cb814c9ed64a4074b32dffe6e4c0de49
| 631,518 |
def rescale(frac, mult):
"""
frac: fraction positives
mult: negative multiplier
End cases 0 and 1 work here.
Doing the math: wlog, p+n=1, so frac=p=1-n (for p=%pos, n=%neg)
"""
#return p/(p+(1-p)*n) # to show similarity to wrong version below
return frac / (frac + (1-frac) * mult)
|
b3f80676bda1e76f0add5e5f8489f0f271e0050b
| 289,876 |
def format_timedelta(td):
"""formats a timedelta into the largest unit possible
"""
total_seconds = td.total_seconds()
units = [(604800, 'w'), (86400, 'd'), (3600, 'h'), (60, 'm'), (1, 's')]
for seconds, unit in units:
if total_seconds >= seconds and total_seconds % seconds == 0:
return "%r%s" % (int(total_seconds / seconds), unit)
if total_seconds >= 0.001:
if (total_seconds / 0.001) % 1 == 0:
return "%r%s" % (int(total_seconds * 1000), 'ms')
else:
micro = int(total_seconds / 0.000001)
micro += int(total_seconds % 0.000001)
return "%r%s" % (micro, 'us')
return "%r%s" % (int(total_seconds * 1000000), 'us')
|
68a257b345220330b99dffee3502d096c696e352
| 588,517 |
def quadratic_sum(n: int) -> int:
"""calculate the quadratic num from 1 ~ n"""
return sum(n ** 2 for n in range(1, n + 1))
|
7693dac427d1429db76a57aa4627aa282b6560d3
| 423,076 |
def onlydigits(value):
"""
Filtre `value` pour ne garder que les chiffres.
On peut ainsi retirer toutes les sauts de lignes présents
dans le fichier `score.txt`.
Parameters
----------
value : str
La chaîne à filtrer
Returns
-------
str
La chaîne obtenue après filtrage
"""
final_chain = ""
for i in value:
if '0' <= i <= '9':
final_chain += i
return final_chain
|
46d0c8c271bbec3e49792dbab6183e982b107387
| 624,735 |
def split_spaces(s):
"""Split a string at multiple (1 <) spaces.
Args:
s (string): Input string with more than one space between parts
"""
cs = s.strip().split(" ")
return [c.strip() for c in cs if c != ""]
|
6d934b4e07a335634ef5e2a3b2b4b64e29deeace
| 209,497 |
def show_channel(package, name, channel_class):
"""
Show channel info
Args:
package (:obj:`str`): package name in the format "namespace/name" or "domain.com/name"
name (:obj:`str`): channel name to inspect
channel_class (:obj:`appr.models.channel_base:ChannelBase`): the implemented Channel class to use
Returns:
:obj:`dict`: channel info
* channel (str): channel name
* current (str): latest/default release associated to the channel
* releases (list): list channel's releases
Example:
>>> appr.api.impl.registry.list_channels("tit/rocketchat", 'dev')
{'channel': u'dev', 'current': '2.0.0-beta', 'releases': [u'1.10.2']}
Raises:
:obj:`appr.api.exception.ChannelNotFound`: channel not found
See Also:
* :obj:`appr.api.registry.show_channel`
"""
c = channel_class.get(name, package)
return c.to_dict()
|
5bcedb2359d16129ed075ed32f35e87e06305ad3
| 540,441 |
def _check_prefix(library_basename, filename_prefixes):
"""Return the prefix library_basename starts with or None if none matches
"""
for prefix in filename_prefixes:
if library_basename.startswith(prefix):
return prefix
return None
|
871553d2c28dc059127973aa61e4af5830dceee8
| 315,564 |
import six
def get_json_type_for_python_value(value):
"""
Return JSON type string for the provided Python value.
:rtype: ``str``
"""
if isinstance(value, six.text_type):
return "string"
elif isinstance(value, (int, float)):
return "number"
elif isinstance(value, dict):
return "object"
elif isinstance(value, (list, tuple)):
return "array"
elif isinstance(value, bool):
return "boolean"
elif value is None:
return "null"
else:
return "unknown"
|
bf104a73196d6c917136e97d136da1e7307b6410
| 552,670 |
def _is_incomplete_cds(row, start_exon, end_exon):
"""
Return True if there are signals of an incomplete CDS coming from the exon.
In particular, if the start or end phase is different from 0 or -1.
"""
if end_exon:
return row['EndPhase'] in {1, 2}
if start_exon:
return row['StartPhase'] in {1, 2}
return False
|
8568321c770319dbc0582e8e8492b5cfa5e2d7ab
| 287,911 |
def is_nursery_rule_path(path: str) -> bool:
"""
The nursery is a spot for rules that have not yet been fully polished.
For example, they may not have references to public example of a technique.
Yet, we still want to capture and report on their matches.
The nursery is currently a subdirectory of the rules directory with that name.
When nursery rules are loaded, their metadata section should be updated with:
`nursery=True`.
"""
return "nursery" in path
|
a2ae109f81d328ba6de82db3fc2fda0986575e92
| 340,487 |
import torch
def quantile(data:torch.Tensor, fraction:torch.Tensor) -> torch.Tensor:
"""
Compute the quantile for given input data and list of fractions.
Parameters
----------
data: torch.Tensor
input data
fraction: torch.Tensor
list of fractions
Returns
-------
quantile (torch.Tensor)
"""
return torch.quantile(data, fraction, dim=-1).swapaxes(0, -1)
|
a12a43bdc6e6655efaacb1d1484bd55aea426ea3
| 462,703 |
def createHeaderDoxygenString( moduleConfig ):
"""Creates the doxygen string that goes to the top of the header file"""
string = '/** \\namespace ' + moduleConfig['Namespace'][-1] + '\n'
string += '* @brief Namespace declaration for modules of type: ' + moduleConfig['Namespace'][-1] + '.\n'
string += '*/\n\n'
string += '/** \\file\n'
string += '* @brief Header file for module: ' + moduleConfig["Class Name"] + '.\n'
string += '*/\n\n'
string += '/** \\dir ' + moduleConfig["Relative Path"] + '\n'
string += '* @brief Contains code, documentation, and scripts for module: ' + moduleConfig["Class Name"] + '.\n'
string += '*/\n\n'
return string
|
59e4c08911e9fca6012cf85d83e0246afff552fb
| 525,120 |
def is_keymap_dir(keymap):
"""Return True if Path object `keymap` has a keymap file inside.
"""
for file in ('keymap.c', 'keymap.json'):
if (keymap / file).is_file():
return True
|
33aa9b3ff7ae59b3e1f3092a82688cd59d99d4f2
| 254,229 |
def get(isamAppliance, check_mode=False, force=False):
"""
Retrieve the tracing levels
"""
return isamAppliance.invoke_get("Retrieve the tracing levels",
"/isam/cluster/tracing/v1")
|
fee615d4291898e3d02b417f26b9e648463886e4
| 308,849 |
def sum_range_numba(a: int):
"""Compute the sum of the numbers in the range [0, a)."""
x = 0
for i in range(a):
x += i
return x
|
4a4f177fe6d9b58b92e480bde0a9e1aa8205e060
| 262,881 |
def find_max(items: list):
"""From a list of items, return the max."""
max_ = None
for x in items:
if not max_:
max_ = x
continue
if max_ < x:
max_ = x
return max_
|
8a5d11ba44ef354a937e6be7919ea8a9648aaa18
| 429,988 |
def parse_field_path(field_path):
"""
Take a path to a field like "mezzanine.pages.models.Page.feature_image"
and return a model key, which is a tuple of the form ('pages', 'page'),
and a field name, e.g. "feature_image".
"""
model_path, field_name = field_path.rsplit(".", 1)
app_name, model_name = model_path.split(".models.")
_, app_label = app_name.rsplit(".", 1)
return (app_label, model_name.lower()), field_name
|
cdca76e066efa5bf1571122ae1dc20d69a863a84
| 547,237 |
def hexToBytes(hexStr):
"""
Provide hex sting in format 'ab ab ab...'
Returns the byte values
"""
bInt = [int(hexStr[i*3:i*3+2],16) for i in range(int((len(hexStr)+1)/3))]
return bInt
|
a745fa8e14a2fe32fe8435d7594f878481209941
| 542,585 |
def get_hostname_to_data_dict(fio_data):
"""Create dictionary mapping hostname to its fio data.
Returns:
Dict[str, List[dict]] - hostname to its fio data
"""
hostname_data_dict = {}
for jb in fio_data['client_stats']:
if jb['jobname'] == 'All clients':
continue
hostname = jb['hostname']
if hostname not in hostname_data_dict:
hostname_data_dict[hostname] = [jb]
else:
hostname_data_dict[hostname].append(jb)
return hostname_data_dict
|
15c3b2b2159332634648a62f7ca31bc4d660e498
| 109,392 |
def split_by_length(str, length, rejoin_with=None):
"""
split a string by a given length
"""
str_arr = []
counter = 0
while counter<len(str):
str_arr.append(str[counter:counter+length])
counter += length
if rejoin_with:
return rejoin_with.join(str_arr)
else:
return str_arr
|
673d6477cd19d326d6018c0c11f4eff3dd6a833b
| 621,255 |
def execute_query(driver,query):
"""Execute the provided query using the provided Neo4j database connection and driver.
Parameters:
driver A Neo4j bolt driver object
query A Cypher query to be executed against the Neo4j database
"""
with driver.session() as session:
results = session.run(query)
return results
|
9015689fb2bf1674d96de8aee315d002f0f03a9e
| 630,239 |
def in_to_mm(inches):
"""
Convert inches to millimeters
"""
return inches * 25.4
|
3e5be1fe8badc0b92de67dc770a7f6aaa482222f
| 58,731 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.