content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
import re
def request_url_to_fixture(url: str):
"""Convert a URL path to a fixtures filename ('/account/events' -> 'account_events')"""
params = url.split('?')
return '_'.join([param for param in re.split('/|-', params[0]) if param != ''])
|
0b20c02cf804c55de6697f58e61ab5390dad9125
| 574,211 |
def match_inner(x, y, i, j):
"""
Match whether the token is inside the entity
:param x: The start of the entity
:param y: The end of the entity
:param i: The start of the token
:param j: The end of the token
"""
return x <= i and y >= j
|
9119e26aab7780f85e4881728c1c2419edd04b00
| 345,703 |
def rad_to_deg(theta_rad):
""" Convert radians to degrees. """
return theta_rad * 57.2957795
|
abee5d77b9d0e9d3bf1c2a103b33e684a25a4ee2
| 133,594 |
def ctx(app, request):
"""Push a new application context and closes it automatically
when the test goes out of scope.
This helps to avoid creating application contexts manually
either by calling app.app_context() with python `with` statement or
by pushing or popping manually.
"""
ctx = app.app_context()
ctx.push()
def teardown():
ctx.pop()
request.addfinalizer(teardown)
return ctx
|
49292e4a56d89828101a9c6f675dff67c610f531
| 329,422 |
def inpath(entry, pathvar):
"""Check if entry is in pathvar. pathvar is a string of the form
`entry1:entry2:entry3`."""
return entry in set(pathvar.split(':'))
|
911c168d8bfab913e000f0286571b7e16813e970
| 109,853 |
def dont_give_me_five(start: int, end: int) -> int:
"""This function returns the count of all numbers except numbers with a 5."""
count = 0
for item in range(start, end+1):
if '5' not in str(item):
#print (item)
count += 1
return count
|
83d1fe6bebba342df608b51e2a59476c0978f0d2
| 604,440 |
def format_bool(boolean: bool) -> str:
"""Convert a boolean to a string."""
return "\N{BALLOT BOX WITH CHECK}" if boolean else ""
|
7d2b792430cebd1240664911a4a18757973fca0d
| 372,313 |
def _Lentz_Dn(z, N):
"""
Compute the logarithmic derivative of the Ricatti-Bessel function.
Args:
z: function argument
N: order of Ricatti-Bessel function
Returns:
This returns the Ricatti-Bessel function of order N with argument z
using the continued fraction technique of Lentz, Appl. Opt., 15,
668-671, (1976).
"""
zinv = 2.0 / z
alpha = (N + 0.5) * zinv
aj = -(N + 1.5) * zinv
alpha_j1 = aj + 1 / alpha
alpha_j2 = aj
ratio = alpha_j1 / alpha_j2
runratio = alpha * ratio
while abs(abs(ratio) - 1.0) > 1e-12:
aj = zinv - aj
alpha_j1 = 1.0 / alpha_j1 + aj
alpha_j2 = 1.0 / alpha_j2 + aj
ratio = alpha_j1 / alpha_j2
zinv *= -1
runratio = ratio * runratio
return -N / z + runratio
|
c34905103535b5c84a8c8df543f47ad838fe1cf1
| 97,715 |
def equivalent(list1, list2):
"""Ensures that two lists are equivalent, i.e., contain the same items."""
if len(list1) != len(list2):
return False
set1 = set(list1)
for item in list2:
if item not in set1:
return False
return True
|
2a14eab905143bd7549e5e8edc6242ea78351c46
| 277,878 |
def geo2pix(geo_transform, xcoord, ycoord):
"""
Computes the indexes of the pixel in the image array corresponding to a point with given coordinates in the
spatial reference system.
"""
xpix = int((xcoord-geo_transform[0])/geo_transform[1])
ypix = int((ycoord - geo_transform[3]) / geo_transform[5])
return xpix, ypix
|
baecc3894621168c7bd201495795530779fab840
| 315,712 |
def add_dict(field, dict_of_values):
"""
Add a given dict of values to a given array field.
"""
def transform(element):
assert isinstance(element[field], dict), \
"add_dict called on a non-dict object. type(element[%s]) = %s" % (field, type(element[field]))
element[field].update(dict_of_values)
return transform
|
b565263578750845ae2a423141078783cd99b05c
| 588,982 |
def d11_2_A11(d11, SRM_ratio=4.04367):
"""
Convert Delta to Abundance notation.
Default SRM_ratio is NIST951 11B/10B
"""
return SRM_ratio * (d11 / 1000 + 1) / (SRM_ratio * (d11 / 1000 + 1) + 1)
|
04f3a1a58356d27328e315494a04c78b8ddae9ec
| 369,170 |
def accuracy(letters, target_string):
""" Comparing accuracy to the correct answer.
Args:
letters(np.array): (num_chars, )
target_string(str)
Return:
float: accuracy.
"""
count = 0
assert len(letters) == len(target_string)
for i in range(len(target_string)):
if letters[i] == target_string[i]:
count += 1
return count / len(target_string)
|
5898a086997d3b9ff9f9bcf84b747dd553a0e4cb
| 703,404 |
def is_comment(line, comments):
"""
A utility method to tell if the provided line is
a comment or not
Parameters
----------
str line: The line string in a file
list comments: A list of potential comment keywords
"""
return line.lstrip(' ')[0] in comments
|
7c4dc2fdd6efad35a216562e9655db42cc1f9d7b
| 353,753 |
def is_in_interval(num1: float, num2: float, frac: float = 0.1) -> bool:
"""Check that the num1 is in the interval defined by num2 and its fraction."""
num2_frac = num2 * frac
_min = num2 - num2_frac
_max = num2 + num2_frac
return _min <= num1 <= _max
|
b11e00be2d2346ddeea08b5fe789db4f09ee568a
| 590,002 |
def decipher(
ciphertext: str,
key: list[int],
/,
) -> str:
"""
Decrypts ciphertext encrypted by columnar transposition cipher using supplied key.
IMPORTANT: We assume that the text is padded so that length of ciphertext is a multiple of key length
"""
if len(ciphertext) % len(key):
raise ValueError("Ciphertext length must be multiple of key length!")
full_columns = len(ciphertext) // len(key)
output = [list(' '*len(key)) for _ in range(full_columns)]
for i, column in enumerate(key):
for row in range(full_columns):
output[row][i] = ciphertext[row + full_columns*column]
output = ''.join([''.join(row) for row in output])
if (padding_index := output.find('\ufffd')) != -1:
return output[:padding_index]
return output
|
4c818a68e4d60103a91ac3e8d7ab15a0b11bab09
| 524,501 |
def encode_special_characters(string: str) -> str:
"""Make string safe for urls as required by REST APIs"""
char_mapping = {"#": ";23", "&": ";26", "/": "|", "*": ";2A"}
for char, encoded_char in char_mapping.items():
string = string.replace(char, encoded_char)
return string
|
0d016c5fdb6013bbcb8b8e3ff1dbfb3205298590
| 89,478 |
def get_params(model, only_trainable=False):
"""
Get the parameter number of the model.
If only_trainable is true, only trainable parameters will be counted.
"""
if not only_trainable:
return sum(p.numel() for p in model.parameters())
else:
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
b6c3b92b5ed32a1a38149669c43ae74127945c9f
| 521,461 |
from pathlib import Path
def find_parent_directory_containing(filename, path=None):
"""Find a directory, on the path to 'path' containing filename
if no 'path' - path from cwd
Returns None if no such found, pathlib's Path to the directory if found
"""
if not path:
path = Path.cwd()
else: # assure pathlib object
path = Path(path)
while True:
if (path / filename).exists():
return path
if path.parent == path:
return None
path = path.parent
|
f5fa21d6a03710bdd050aa14266fdc84ab84ca58
| 345,974 |
import asyncio
async def measure_response_time(session, url):
"""
Fetching an url by performing GET request. Measure the response time.
:param aiohttp.ClientSession session: All requests in one session, for connection pooling.
:param str url: Full URL for request.
:return dict: Dict containing measurements info. For example:
{
"url": "https://google.com",
"status_code": 200,
"time": 0.05735192599968286
}
"""
start = asyncio.get_event_loop().time()
try:
async with session.get(url) as response:
await response.read()
stop = asyncio.get_event_loop().time()
return {"url": url, "status_code": response.status, "time": stop - start}
except Exception:
stop = asyncio.get_event_loop().time()
return {"url": url, "status_code": None, "time": stop - start}
|
7442b4cb16df8eef25169558af9f3a779f0ccc98
| 571,460 |
def contrasting_text_color(hex_str: str) -> str:
"""Get a contrasting foreground text color for specified background hex color
:param hext_str: A hex string color ('#XXXXXX') for which to determine a black-or-white
foreground color.
:return: '#FFF' or '#000'.
"""
r, g, b = (hex_str[1:3], hex_str[3:5], hex_str[5:])
luminance = (int(r, 16) * 0.299 + int(g, 16) * 0.587 + int(b, 16) * 0.114) / 255
if luminance > 0.5:
return "#000"
else:
return "#FFF"
|
f9901a07e29bd1a55fedfc9cf4035670b88b8609
| 322,530 |
def GMLPointList(point):
"""Translates the list of coordinates of one point to a string representation (GML)."""
x = point[0]
y = point[1]
z = point[2]
return "%s %s %s" % (x, y, z)
|
bc17b41e74ae42db8bb1475376c352a39e1d946d
| 401,739 |
from datetime import datetime
def new_returntime(value=None):
"""Convert the value to datetime."""
if value is None: return None
return datetime.fromtimestamp(value)
|
aa5aa995c1e78f8e7002651fe5e15da776750185
| 256,940 |
def _atomReprAsInt(s: str) -> int:
"""Translate CLVM atom repr to int."""
if s.startswith("0x"):
return int(s, base=16)
elif s.startswith('"'):
return int.from_bytes(s[1:-1].encode("ascii"), "big")
return int(s)
|
0fc8cf80b719e38b0d520fc415331978f036afa0
| 105,077 |
def d_sig(f): # pragma: no cover
"""
Calculates the derivative of a sigmoid function
"""
return f * (1 - f)
|
73809554d0f854c5b21147e6e9ae57be1636d05c
| 130,863 |
def get_primary_uri_field(fields):
"""Find the first field name containing URI"""
for f in fields:
if "URI" in f.field_name:
return f
return None
|
696ea1b15dad3c377585674100a12c128518c4c0
| 625,921 |
def standardize_role(role):
"""Convert role text into standardized form."""
role = role.lower()
if any(c in role for c in {'synthesis', 'give', 'yield', 'afford', 'product', 'preparation of'}):
return 'product'
return role
|
2bc4f972f5aa7836c541d7304880222d3d2e7930
| 230,900 |
def read_torrent_file(filepath):
"""Reads torrent file from filesystem and returns its contents."""
f = open(filepath, 'rb')
contents = f.read()
f.close()
return contents
|
644c7e0c3b39957a3eba54bd8455768c290d4795
| 619,141 |
def inclusion_explicit_no_context(arg):
"""Expected inclusion_explicit_no_context __doc__"""
return {"result": "inclusion_explicit_no_context - Expected result: %s" % arg}
|
2e0880d3940c961f6ccc2715e7fa300f9b969787
| 644,403 |
def _strip_cutouts_ztf(alert_dict: dict) -> dict:
"""Drop the cutouts from the alert dictionary.
Args:
alert_dict: ZTF alert formated as a dict
Returns:
`alert_data` with the cutouts (postage stamps) removed
"""
cutouts = ["cutoutScience", "cutoutTemplate", "cutoutDifference"]
alert_stripped = {k: v for k, v in alert_dict.items() if k not in cutouts}
return alert_stripped
|
4906e5e24980c16e058f4b577a4ba7fdf4a07cbd
| 560,192 |
def recall(target, prediction):
"""Recall = TP/(TP + FN)"""
if len(target) == 0:
return 0
tp = sum(1 for p in prediction if p in target)
return tp / len(target)
|
ee0e45a8353690cdd878c20869ebfc3e62211987
| 280,873 |
def get_dict_from_result(db_result):
"""
Returns a list of dictionaries where each dictionary is {field:value} pairs for one row.
"""
d, a = {}, []
for rowproxy in db_result:
# rowproxy.items() returns an array like [(field0, value0), (field1, value1)]
for column, value in rowproxy.items():
# build up the dictionary
d = {**d, **{column: value}}
a.append(d)
return a
|
7045f560d85381cfc3c0a7a64dd6a28874a1bdfc
| 617,998 |
def filter_value(entry, values=None):
"""
Returns True if it should be filtered.
Only take calls with filter values in the list provided
if None provided, assume that filter_value must be PASS or blank '.'
"""
if values is None:
return len(entry.filter) != 0 and 'PASS' not in entry.filter
return values.intersection(entry.filter)
|
57ee5ab67fa07cb8c1379d303e9d636718025f45
| 703,303 |
def getCharacterFromGame(gtitle: str) -> str:
"""Return a query to get characters with lower Ryu Number to a game.
The query will retrieve the name and Ryu Number of a character whose Ryu
Number is exactly one less than the Ryu Number of the game whose title
is passed. This is used primarily for path-finding towards Ryu.
The resulting query takes the following form for game_character as C:
`(C.name: str, C.ryu_number: int)`
"""
return (f"SELECT DISTINCT C.name, C.ryu_number "
f"FROM appears_in "
f"INNER JOIN game_character AS C ON cname=C.name "
f"INNER JOIN game AS G ON gtitle=G.title "
f"WHERE gtitle LIKE '{gtitle}' AND C.ryu_number=G.ryu_number-1;"
)
|
299919a60e1d9a24c67d1efd200e9f2f804c2309
| 681,450 |
def isStorageFirewallEnabled(self, storageAccount):
"""Returns True if the given "storageAccount" is storage firewall enabled"""
if storageAccount.network_rule_set.virtual_network_rules:
return True
return False
|
eb0ff2fb9457ed4e5049e34e6568b9f5cb5a5be8
| 400,847 |
def get_mackowiak_id(object_id, seqname, start, end, strand):
""" This function creates a "Mackowiak"-style identifier for, e.g., open
reading frames. The form of the identifier is:
<object_id>_<seqname>:<start>-<end>:<strand>
example: ENSMUST00000033123_7:46175479-46179843:-
start should always be less than end, regardless of the strand; that
is, they are bed-style coordinates.
The ids are "Mackowiak"-style in that they follow the scheme used in
this paper:
Mackowiak, S., et al. Extensive identification and analysis of
conserved small ORFs in animals. Genome Biology, 2015, 16, 179.
Args:
object_id (string): an identifier for the object from which this
feature was extracted, such as a transcript identifier
seqname, start, end, strand: the relevant genomic information
Returns:
string: the mackowiak-style identifier
"""
mackowiak_id = "{}_{}:{}-{}:{}".format(object_id, seqname, start, end, strand)
return mackowiak_id
|
a32290388e8d9f84ad84f2b5218bd54ff67e3bba
| 314,328 |
def newline_list_of_strings(result_key, data, option_value=None):
"""Return a string from a list of strings while appending newline"""
return "\n".join(data)
|
b9086af7d95ec01586baa3ecbf63930860029a9e
| 194,493 |
def getVal(x, y, M):
"""
Returns the value of the element (x,y) in M.
"""
if (x,y) in M:
return M[(x,y)]
elif (y,x) in M:
return M[(y,x)]
else:
return 0
pass
|
3a260b1b8501b7501a456a8146ad5f1fb5f3b771
| 606,012 |
import hashlib
def hash_file(filepath):
"""
Hashes the contents of a file. SHA1 algorithm is used.
:param filepath: Path to file to hash.
:type filepath: ``str``
:return: Hashed value as a string
:rtype: ``str``
"""
HASH_BLOCKSIZE = 65536
hasher = hashlib.sha1()
with open(filepath, "rb") as f:
buf = f.read(HASH_BLOCKSIZE)
while buf:
hasher.update(buf)
buf = f.read(HASH_BLOCKSIZE)
return hasher.hexdigest()
|
c9a374ec7aa471c3f226431be55770d108145f41
| 658,560 |
def _cast_value_to_type(value, cast_type_string):
"""Return the value casted to the type specified by the cast_type_string,
as defined in the type maps in the ReferenceGenome object's variant_key_map field
"""
if cast_type_string == 'Integer':
return int(value)
elif cast_type_string == 'Float':
return float(value)
elif cast_type_string == 'String':
return str(value)
else:
raise Exception("Unsupported type " + cast_type_string)
|
d01a83c465703c66134ece06df7f67deb366525a
| 631,416 |
import pathlib
def get_full_img_path(img_root_path, csv_path):
"""Merge csv root path and image name."""
root_dir = pathlib.Path(csv_path).parent
img_path = root_dir / pathlib.Path(img_root_path)
return str(img_path)
|
582c86466cfd886099b34736c40d440195434396
| 82,206 |
def read_file(filename):
""" Return the content of a file as a list of strings, each corresponding to a line
:param filename: string: location and name of the file
:return: content of filename
"""
with open(filename, 'r') as ofile:
content = ofile.read().splitlines()
return content
|
58a2718265fef848e484178e407aee6f7017a52a
| 700,897 |
import re
def escape_property_name_for_id(property_name: str) -> str:
"""Filter. Escape unsafe characters in a property name so that it can be used in a HTML id"""
escaped = re.sub("[^0-9a-zA-Z_,.-]", "_", str(property_name))
if not escaped[0].isalpha():
escaped = "a" + escaped
return escaped
|
95f3e6bf077d3de733fbd1521f30132e71da6e6e
| 340,051 |
import math
def primes_in_range(n):
"""Use Sieve of Eratosthenes find all primes in range 1..n"""
is_prime = [True] * n # is_prime[i] tells if i + 1 is a prime number
is_prime[0] = False
for prime_candidate in range(2, int(math.sqrt(n)) + 1):
if is_prime[prime_candidate - 1]:
# mark every multiple of prime_candidate as non-prime
for k in range(2, (n // prime_candidate) + 1):
non_prime = prime_candidate * k
assert non_prime <= n
is_prime[non_prime - 1] = False
primes = []
for i, i_is_prime in enumerate(is_prime):
if i_is_prime:
primes.append(i + 1)
return primes
|
f3ee68546c12b9aeac39cc8ea60bdbf10350f8a4
| 150,056 |
import torch
def tfidf_transform(tfidf_vectorizer, corpus_data, cuda0):
"""
Apply TFIDF transformation to test data.
Args:
vectorizer_train (object): trained tfidf vectorizer
newsgroups_test (ndarray): corpus of all documents from all categories in test set
Returns:
X_test (ndarray): tfidf word-document matrix of test data
"""
corpus_list = corpus_data["text"].tolist()
tfidf_vector = tfidf_vectorizer.fit_transform(corpus_list)
X_test = torch.t(torch.tensor(tfidf_vector.todense(), dtype=torch.float64, device=cuda0))
return X_test
|
6f18b7114579412e1442b1c6220f34b7f643a2ab
| 701,849 |
def layer2namespace(layer):
"""
converts the name of a layer into the name of its namespace, e.g.
'mmax:token' --> 'mmax'
"""
return layer.split(':')[0]
|
8706003d04243a26b963ff8c6855f49a9bc9ed94
| 565,727 |
def _str_to_bool(value: str) -> bool:
"""
Parse string into bool. It tries to match some predefined values.
If none is matches, python bool(value) is used.
:param value: string to be parsed into bool
:return: bool value of a given string
"""
if isinstance(value, str):
if value.lower() in ['0', 'false', 'no']:
return False
if value.lower() in ['1', 'true', 'yes']:
return True
return bool(value)
|
5b16da2512018b34feaa20f82625ace6f4380ddd
| 145,732 |
def _get_var_meta(cr, case_name, var):
"""
Return the metadata for the variable of the given name in the given case.
Parameters
----------
cr : CaseReader
The case reader housing the data.
case_name : str
The case from which the outputs with available metadata is to be returned.
var : str
The output whose metadata is desired.
Returns
-------
list
A dictionary of the metadata for the given output
"""
case = cr.get_case(case_name)
case_outputs = case.list_outputs(prom_name=True, units=True, shape=True, val=False,
residuals=False, out_stream=None)
for _, meta in case_outputs:
if meta['prom_name'] == var:
return meta
case_inputs = case.list_inputs(prom_name=True, units=True, shape=True, val=False,
out_stream=None)
for _, meta in case_inputs:
if meta['prom_name'] == var:
return meta
raise KeyError(f'No output named {var} found')
|
7c16e474b9bbd3b5a83869a342793e35f2b252c1
| 601,739 |
def getFirstNonClippedPositionInRead(alignedSegment, readSeq):
"""Gets the coordinate of the first non-clipped position in the read relative to the
complete read sequence (including any hard clipped bases).
If the alignment is on the reverse strand the coordinate is negative, e.g. the reverse strand coordinate of
the 2nd position of the read sequence is -1 (0 based).
"""
if alignedSegment.cigar[0][0] == 5: #Translate the read position to the original
#coordinates by removing hard clipping
readOffset = alignedSegment.cigar[0][1]
else:
readOffset = 0
if alignedSegment.is_reverse: #SEQ is reverse complemented
readOffset = -(len(readSeq) - 1 - readOffset)
readOffset += alignedSegment.query_alignment_start #This removes any soft-clipping
return readOffset
|
28dfba57e47de456ef9fefa9e344235a447e3b7f
| 137,922 |
def replace_in_string(phrase: str, l_to_replace: str, replacement: str):
"""
Take the 'phrase' string and replace all the 'l_to_replace' occurrence whit 'replacement' string
:param phrase: string that will be modified
:param l_to_replace: string to be replaced
:param replacement: string that will replace 'l_to_replace'
:return: 'phrase' string modified
"""
return phrase.replace(l_to_replace, replacement)
|
8815b0bb69754ee739e0c2d8f5a0fd79e8ebe1b3
| 352,143 |
import ast
def _to_dict(contents):
"""Parse |contents| as a dict, returning None on failure or if it's not a
dict."""
try:
result = ast.literal_eval(contents)
if isinstance(result, dict):
return result
except (ValueError, TypeError):
pass
return None
|
4e9acba2e247c72dc7927e37087415e5606d6b73
| 663,516 |
import zipfile
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = f.read(f.namelist()[0]).split()
return data
|
08d8844025e5bb10a66301e35b8bbe4f37b886c9
| 89,057 |
def arguments(*args):
"""A Decorator which you wrap around a function or a method to indicate
which magical objects you want passed to your functions which are
embedded in a Resource's template.
The magical values you can pass are:
LOCATION
REQUEST
SITE
RESOURCE
METHOD
URI
For example, to pass a function the SiteMap instance, do this:
@arguments(SITE)
def foo(site):
print "The site!"
The values which you pass a string argument to get variables out of the
request environment (which also take a default) are:
QUERY
QUERIES
ARG
ARGS
COOKIE
HEADER
For example, to pass a query parameter to a function, with a default
if the parameter is not present, do this:
@arguments(QUERY('foo', 'default'))
def foo(foo):
print foo
All the magic objects which take string values have a __getattr__
implementation for syntatic convenience when you do not need to
pass a default value (None will be used as the default value):
@arguments(QUERY.foo)
def foo(foo):
print foo
"""
def decorate(func):
func.arguments = args
return func
return decorate
|
debcfe4e7ca05150f8a29fc7455ae788a58d1d36
| 262,065 |
def _add(a, b):
"""
Simple function that adds two things.
Will be decorated for testing.
"""
output = a + b
return output
|
4ce7c27c3fe362934b96f5867013b9f1432da17c
| 341,147 |
from typing import Callable
from typing import Any
from typing import Sequence
import functools
def apply_middlewares(
func: Callable[..., Any], middlewares: Sequence[Callable[..., Any]]
) -> Callable[..., Any]:
"""
Apply a list of middlewares to a source function.
- Middlewares must be structured as: ``middleware(next, *args, **kwargs)``
and call the next middleware inline.
>>> def square(x): return x ** 2
>>> def double(next, x): return next(x * 2)
>>> def substract_one(next, x): return next(x - 1)
>>> final = apply_middlewares(square, [double, substract_one])
>>> final(2) # ((2 - 1) * 2) ^ 2
4
>>> final(10) # ((10 - 1) * 2) ^ 2
324
"""
tail = func
for mw in middlewares:
if not callable(mw):
raise TypeError("Middleware should be a callable")
tail = functools.partial(mw, tail)
return tail
|
04685a34f401eb884e8bc352a8be285fb3b9a53e
| 65,560 |
def ishexdigit(c):
"""
>>> ishexdigit('0')
True
>>> ishexdigit('9')
True
>>> ishexdigit('/')
False
>>> ishexdigit(':')
False
>>> ishexdigit('a')
True
>>> ishexdigit('f')
True
>>> ishexdigit('g')
False
>>> ishexdigit('A')
True
>>> ishexdigit('F')
True
>>> ishexdigit('G')
False
"""
return c.isdigit() or ord('a') <= ord(c.lower()) <= ord('f')
|
b450b243bc40ea4f5c84ddfdeddcd8022839def3
| 46,959 |
def get_confirmation(question: str, default_yes: bool = True) -> bool:
"""Retrieve y/n answer from user, with default."""
question = question + (" ([y]/n): " if default_yes else " (y/[n]): ")
resp = input(question)
while resp not in ["y", "n", ""]:
resp = input(question)
if (resp == "" and not default_yes) or resp == "n":
return False
return True
|
365bfe72001429e09f0cec40595025f399d40488
| 147,022 |
def is_uvular(sound):
"""
Check if a sound is uvular or not.
"""
return sound.obj.place == "uvular"
|
97e2d08975624576dd8b6c375a2a4241fdc48162
| 616,802 |
def translate(point, offset):
"""
Translate a point by an offset.
Arguments:
point: a point, e.g., (x, y)
offset: an offset, e.g., (x, y)
Returns: a translated point
"""
return (point[0] + offset[0], point[1] + offset[1])
|
6d8256459cf5c6c150762d13fe8b3c80f90cf9e8
| 366,556 |
def _invert_and_aggregate(the_dict):
"""
Transform a dictionary like this:
{ key1: value1,
key2: value2,
key3: value2 }
Into a dictionary of lists like this:
{ value1: [ key1 ],
value2: [ key2, key3 ] }
See tests.py for an example:
"""
_ = {}
# invert it
for k, v in the_dict.items():
_.setdefault(v, []).append(k)
return _
|
64bf7275398d862965ce1825c953ede29a175e8d
| 284,399 |
def otsu_segmentation(image, k, mask=None):
"""
Otsu image segmentation
This is a very fast segmentation algorithm good for quick explortation,
but does not return probability maps.
ANTsR function: `thresholdImage(image, 'Otsu', k)`
Arguments
---------
image : ANTsImage
input image
k : integer
integer number of classes. Note that a background class will
be added to this, so the resulting segmentation will
have k+1 unique values.
mask : ANTsImage
segment inside this mask
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> seg = mni.otsu_segmentation(k=3) #0=bg,1=csf,2=gm,3=wm
"""
if mask is not None:
image = image.mask_image(mask)
seg = image.threshold_image('Otsu', k)
return seg
|
0ea5175b1458731503569c789925168537f5d42d
| 207,157 |
def find_max_time_overlap(hypseg, reflist):
"""Find reference segment which encompasses the maximum time of the hypothesis segment."""
hbeg, hend = hypseg[0], hypseg[2]
times = []
for [rlbl, rseg] in reflist:
b = max(hbeg, rseg[0])
e = min(hend, rseg[2])
times.append(e - b)
return times.index(max(times))
|
7f73b07bb9a5e88578ce3b444ad894c2dd210653
| 242,913 |
def convert_green(string):
"""Return green text"""
return f"{string}"
|
3b0f86ec22e5959fa2545138615757b6c808abf3
| 575,179 |
def annotate(msg, geoc, filter_region=None):
"""
Annotate message with geocountry info
Params:
msg - dict object
geoc - GeoCountry object
filter_region - region to be filtered
"""
content = geoc.annotate(msg)
content_region = content.get("embersGeoCodeCountry", {}).get("region", None)
if content_region is None:
return None
if filter_region is not None and filter_region != content_region:
return None
return content
|
edf2b926436d64a9eb350e30efe0ca590de82b46
| 286,309 |
import yaml
def gen_lookup(file):
""" Generating the lookup table between api-endpoints and elasticsearch
instances from the configuration of the lod-api.
lookup table is of the form:
{"/api/endpoint": "http://elasticsearchhost:port/index/doctype",
"/resource": "http://elasticsearchhost:9200/resource/data",
"/works": …,
}
returns: dict.
"""
es_lookup = dict()
# read complete api configuration
with open(file, 'r') as instream:
config = yaml.safe_load(instream)
# generate lookup for source indices
for source, instance in config["source_indices"].items():
key = "/source/{}".format(source)
# remove trailing slash from index-URL
es_lookup[key] = instance.rstrip("/")
# generate remaining index lookups
es_address = "http://{h}:{p}".format(h=config["es_host"],
p=config["es_port"])
for ctx in config["indices"]:
doctype = config["indices"][ctx].get("type")
index = config["indices"][ctx].get("index")
es_lookup["/" + index] = "{h}/{i}/{t}".format(
h=es_address, i=index, t=doctype)
return es_lookup
|
a007a0be1c18b585b9ebf66b194e55458f764146
| 684,398 |
def lc(value):
"""Lower case and remove any periods to normalize for comparison."""
if not value:
return u''
return value.lower().replace('.','')
|
7f1c0e703f632931e7117f198ba4592c3b174e0f
| 598,868 |
def _get_not_none(meta, key, none_subst=dict):
"""
Return meta[key] if key is in meta and its value is not None, otherwise
return none_subst().
Some recipes have an empty build section, so it'll be None and we can't
do a chained get.
"""
ret = meta.get(key)
return ret if (ret is not None) else none_subst()
|
52e7d2efd48009e549f800dda1922e2d344b19f7
| 280,384 |
def _parse_description(metadata_field):
"""
Parse the description field from the metadata if available.
Limit to the first 2000 characters.
"""
try:
if 'description' in metadata_field:
return metadata_field['description'][:2000]
except TypeError:
return None
|
f32a2c01330a15dc82c5b9c34235ae96ff030e50
| 355,284 |
import typing
import struct
def read_uint24(stream: typing.BinaryIO) -> int:
"""Reads a Uint24 from stream"""
return struct.unpack("<I", stream.read(3) + b"\x00")[0]
|
e6c044b210fd8a3b3100f9f1258f66c8ccef3edc
| 148,694 |
def response_creator(text, card):
"""
Builds a response with speech part and Alexa appcard contents
:param text: text to be spoken
:param card: text for the app card
:return: JSON object to be returned
"""
text_item = {"type": "PlainText", "text": text}
card_item = {"type": "Simple", "title": "Stop Info", "content": card}
reprompt = {
"outputSpeech": {"text": "Which stop do you want to know about?",
"type": "PlainText"}}
response = {"version": "1.0",
"response": {"outputSpeech": text_item, "card": card_item,
"reprompt": reprompt,
"shouldEndSession": True}}
return response
|
cc1ce310616fc7de60b636698e3d288403db8af6
| 59,070 |
import re
def escape_description(desc):
""" Escape a string for use as an argument description.
:param str desc: the description
:returns: esacped description
:rtype: str
"""
desc = desc.replace('\n', ' ')
desc = desc.replace('\'', '')
# Special meaning in zsh optspec, so substitute inoffensive braces.
desc = desc.replace('[', '{')
desc = desc.replace(']', '}')
desc = re.sub(' +', ' ', desc)
# Descriptions are way too long. Kill everything after the first sentence.
desc = re.sub('\.\s.*$', '', desc)
return desc
|
b1d7e377a41a59109549a194dbb6f225deb218c2
| 556,764 |
def leiaint(msg):
"""
--> Função que faz a validação da entrada de dados do tipo inteiro (int)
:param msg: Mensagem para entrada de dados do usuário
:return: retorna o valor digitado, caso este tenha sido um número inteiro
:print: Escreve uma mensagem de erro na tela, caso o valor digitado não seja do tipo inteiro
"""
while True:
n = str(input(msg)).strip()
if n.replace('-', '').isnumeric():
return int(n)
else:
print(f'\033[0;31mErro! Digite um número inteiro válido.\033[m')
|
29b96b26055d965e987bf1e39a96b1924751f56c
| 45,996 |
def unzip(seq):
"""
The inverse of the builtin ``zip`` function, this method transposes groups of elements into new
groups composed of elements from each group at their corresponding indexes.
Examples:
>>> list(unzip([(1, 4, 7), (2, 5, 8), (3, 6, 9)]))
[(1, 2, 3), (4, 5, 6), (7, 8, 9)]
>>> list(unzip(unzip([(1, 4, 7), (2, 5, 8), (3, 6, 9)])))
[(1, 4, 7), (2, 5, 8), (3, 6, 9)]
Args:
seq (Iterable): Iterable to unzip.
Yields:
tuple: Each transposed group.
"""
return zip(*seq)
|
2aff6f3d9e79e52f1e0ce16c1080eab102c1b512
| 115,062 |
def kvp_string_to_rec(string):
"""Take an input string 'a=b,c=d,e=f' and return the record
{'a':'b','c':'d','e':'f'}"""
rec = {}
for kvp in string.split(','):
arr = kvp.split('=')
if len(arr) > 2:
raise Exception("Cannot convert %s to KVP" % string)
rec[arr[0]] = arr[1]
return rec
|
1161b84ca2e929b4b8c00e68c7ed8ed4fb424f5e
| 246,613 |
def _get_bbox_feature(image_size, feature_map, x_min, x_max, y_min, y_max):
"""Get bounding box features.
The feature is the center pixel of a bounding box.
Args:
image_size (tuple): Size of an image in [x, y].
feature_map (numpy.ndarray(numpy.float)): Feature map of size [x, y, n_features] of whole
image where the bounding box lies in.
x_min (int): Minimum X coordinate of bounding box.
x_max (int): Maximum X coordinate of bounding box.
y_min (int): Minimum Y coordinate of bounding box.
y_max (int): Maximum Y coordinate of bounding box.
Returns:
numpy.ndarray(numpy.float): Feature of size [n_features.]
"""
feature_map_size = feature_map.shape[:2]
bbox_center_orig_images = ((x_min + x_max) / 2,
(y_min + y_max) / 2)
bbox_center_relative = [center / size
for center, size in zip(bbox_center_orig_images, image_size)]
bbox_center_feature_map = [int(relative_position * size)
for size, relative_position
in zip(feature_map_size, bbox_center_relative)]
bbox_feature = feature_map[bbox_center_feature_map[0], bbox_center_feature_map[1], :]
return bbox_feature
|
a5e61183686661015ee6c401b7661ee6c39d7d5e
| 637,042 |
def compute_ema(df, column_source, column_target_ema, time_periods):
"""
Compute Exponential Moving Average (EMA).
:param df: dataframe (sorted in ascending time order)
:param column_source: name of source column in dataframe with values to compute EMA (e.g. close price)
:param column_target_ema: prefix of target column in dataframe for EMA results
:param time_periods: list of time periods (number of days for EMA)
:return: modified dataframe
"""
# compute EMA for each time period and add results back to dataframe
for time_period in time_periods:
key_ema = column_target_ema + "-{:d}".format(time_period)
ema_series = df[column_source].ewm(span=time_period, adjust=False).mean()
df[key_ema] = ema_series
return df
|
95fc2db10f3ba8abe8e790ac9698d024175f6485
| 441,397 |
def bin2hexstring(bin_str):
"""
二进制串转十六进制串,按照 4:1 比例转换
:param bin_str: 二进制串
:return: 十六进制串
"""
bin_len = len(bin_str)
left = 0
right = 4
re_str = hex(int(bin_str[left:right], 2))[2:]
for i in range(right, bin_len, 4):
left = right
right += 4
re_str += hex(int(bin_str[left:right], 2))[2:]
return re_str
|
823ba4ef86ebcf7e30a29c3718768c6a654acad5
| 708,040 |
def adapt_dbm_reset_ast(dbm_reset_ast):
"""Transforms the expression ast of a reset into a clock reset ast.
Args:
dbm_reset_ast: The reset expression ast.
Returns:
The clock reset ast.
"""
clock = dbm_reset_ast["expr"]["left"]
val = dbm_reset_ast["expr"]["right"]
return {"clock": clock, "val": val, "astType": "ClockReset"}
|
7614c2a4edfa3c6c4b7b728de1604f8ca6e81553
| 63,148 |
def bottomupsegment(sequence, create_segment, compute_error, max_error):
"""
Return a list of line segments that approximate the sequence.
The list is computed using the bottom-up technique.
Parameters
----------
sequence : sequence to segment
create_segment : a function of two arguments (sequence, sequence range) that returns a line segment that approximates the sequence data in the specified range
compute_error: a function of two argments (sequence, segment) that returns the error from fitting the specified line segment to the sequence data
max_error: the maximum allowable line segment fitting error
"""
segments = [create_segment(sequence,seq_range) for seq_range in zip(range(len(sequence))[:-1],range(len(sequence))[1:])]
mergesegments = [create_segment(sequence,(seg1[0],seg2[2])) for seg1,seg2 in zip(segments[:-1],segments[1:])]
mergecosts = [compute_error(sequence,segment) for segment in mergesegments]
while min(mergecosts) < max_error:
idx = mergecosts.index(min(mergecosts))
segments[idx] = mergesegments[idx]
del segments[idx+1]
if idx > 0:
mergesegments[idx-1] = create_segment(sequence,(segments[idx-1][0],segments[idx][2]))
mergecosts[idx-1] = compute_error(sequence,mergesegments[idx-1])
if idx+1 < len(mergecosts):
mergesegments[idx+1] = create_segment(sequence,(segments[idx][0],segments[idx+1][2]))
mergecosts[idx+1] = compute_error(sequence,mergesegments[idx])
del mergesegments[idx]
del mergecosts[idx]
return segments
|
bb656a1413d643cc4b6fc1aa42b62bfbf796cdcd
| 372,175 |
def from_ell_to_index(ell):
"""
Returns the range of column values assuming a matrix with columns ordered
with the m multipole , m ranging from -ell to +ell
"""
return ell ** 2, ell ** 2 + 2 * ell + 1
|
aef533a7e3c149a61a803c037d8a9d96c59cb87e
| 375,671 |
def is_ascii(some_string):
"""Check if a string only contains ascii characters"""
try:
some_string.encode('ascii')
except UnicodeEncodeError:
return False
else:
return True
|
dac0db05037c588f553d445a1e4f756f07db1ee7
| 151,773 |
def input_prompt(prompt):
"""
Get user input
"""
return input(prompt)
|
76724d278cf68ef2b16b7aa35ee61336a373f9ca
| 683,497 |
def excel_column_number(raw_column_name, index=1):
"""Given a column name, give me the column number
A=1
Z=26
AA=27
AZ=52
BA=53 etc"""
def char_value(c):
return ord(c)-64
value = 0
for char in raw_column_name.upper():
value = value * 26
value = value + char_value(char)
return value - 1 + index
|
129bd81b75c9aa04756fac24c36c7d86bb3c9148
| 139,444 |
def exists_in_perso(perso, asn):
"""
:param perso: personal AS
:param asn: These ASn exist in AS_rank dataset
:return: True if an ASn from AS_rank dataset exists in personal dataset else returns False
"""
return asn in perso.asn.values
|
701760e9d0bbf3317a0bbc8e4da3811b20cdc4b8
| 85,264 |
import time
def create_time_string(time_format='%Y%m%d%H%M%S'):
"""Returns current time formatted as `time_format`
Parameters
----------
time_format : str
Refer https://docs.python.org/3/library/time.html#time.strftime for options
Returns
-------
str
time as string in requested format
"""
return time.strftime(time_format)
|
bae261dd0fb5473129a58d2d996cce44cbe494b3
| 309,723 |
def parse_int(int_str):
"""Parse an int or return None."""
try:
return int(int_str)
except (TypeError, ValueError):
return None
|
ce5b2c24afaad834e9513bffa423a96df1b39d57
| 279,108 |
import aiohttp
async def download_file(slack_api_token, file):
"""Downloads a file from slack into memory and returns the raw bytes."""
headers = {'Authorization': 'Bearer {}'.format(slack_api_token)}
async with aiohttp.ClientSession() as session:
async with session.get(file["url_private"], headers=headers) as resp:
return await resp.read()
|
3d9890ea6e1b9601be2d1000a44ea92203af5d2e
| 240,819 |
import requests
import pickle
def api_request(file, thresh=0.5):
"""
Post request to serverless backend api where our model is lcoated
Receives a csv with the classes and polygons classified by our model
Parameters
----------
file: .tiff file
Tiff file to be classified by our model
thresh: float
Threshold applied. To be classified a prediction values has to be equal or greater than threshold
"""
try:
req = requests.post('https://hxy1cn1sl8.execute-api.us-east-1.amazonaws.com/Prod/segment_tiff',
data=file, params={'threshold': thresh})
polygons = pickle.loads(req.content)
polygons.to_csv(f'src/data/polygons.csv', index=False)
except:
return api_request(file)
|
717302d8c8144cb65953298eeef74efa5324a7c0
| 75,849 |
def get_mean_accuracy(n):
"""
Get the mean accuracy from a confusion matrix n.
Parameters
----------
n : dict
Confusion matrix which has integer keys 0, ..., nb_classes - 1;
an entry n[i][j] is the count how often class i was classified as
class j.
Returns
-------
float
mean accuracy (in [0, 1])
Examples
--------
>>> n = {0: {0: 10, 1: 2}, 1: {0: 5, 1: 83}}
>>> get_mean_accuracy(n)
0.8882575757575758
"""
t = []
k = len(n[0])
for i in range(k):
t.append(sum([n[i][j] for j in range(k)]))
return (1.0 / k) * sum([float(n[i][i]) / t[i] for i in range(k)])
|
8c505f3cd660f3a5096c00bb04ee0d996aa974cb
| 601,326 |
def get_train_validate(full_df, features, split):
"""Generate training and validation sets with features."""
X = full_df[features + ['demand']]
print('[dtypes of features (including demand):]')
print(X.dtypes.value_counts())
print('\nSplit train and validation sets on day', split)
X_train = X[X['day'] <= split]
X_val = X[X['day'] > split]
y_train = X_train.pop('demand')
y_val = X_val.pop('demand')
days_train = len(X_train['day'].unique())
days_val = len(X_val['day'].unique())
print('')
print(days_train, 'days in train set.')
print('X_train:', X_train.shape)
print('y_train:', y_train.shape)
print('')
print(days_val, 'days in validation set.')
print('X_val:', X_val.shape)
print('y_val:', y_val.shape)
return X_train, X_val, y_train, y_val
|
e6186cd902840a5cbd4ba25af79a8c1497311313
| 185,106 |
def kwargs_to_ctypes_array(argument, kwargs, dtype):
"""
Convert an iterable argument from kwargs into a ctypes array variable.
If the argument is not present in kwargs, returns ``None``.
Parameters
----------
argument : str
The name of the argument.
kwargs : dict
Dictionary of keyword arguments.
dtype : ctypes type
The ctypes array type (e.g., ``ctypes.c_double*4``)
Returns
-------
ctypes_value : ctypes array or None
Examples
--------
>>> import ctypes as ct
>>> value = kwargs_to_ctypes_array('bla', {'bla': [10, 10]}, ct.c_int*2)
>>> type(value)
<class 'gmt.clib.utils.c_int_Array_2'>
>>> b = 1
>>> should_be_none = kwargs_to_ctypes_array(
... 'swallow', {'bla': 1, 'foo': [20, 30]}, ct.c_int*2)
>>> print(should_be_none)
None
"""
if argument in kwargs:
return dtype(*kwargs[argument])
return None
|
715c8cd1d0fd6cbb526e32f9f83e01178742bf42
| 562,954 |
import pathlib
def click_convert_to_path(ctx, param, value):
"""
Callback function for click.option to ensure all pathes are PosixPathes.
"""
assert value is not None
if isinstance(value, str):
value = pathlib.Path(value)
return value
|
ec0cd8c97f7bbac4fbbca1e897fe41fb10444ac0
| 144,964 |
import re
import json
def _load(body):
"""This function cleans Google's response and coverts
it to JSON.
Args:
body (str): HTTP body
Returns:
dict
"""
clean = re.sub(r'^[^\[]*\n\[', '[', body)
return json.loads(clean)
|
2174586923af0f63a65eb58fed3aa5b6edb0f8b4
| 437,517 |
def find_bad_sitezero(X):
"""Returns indices of bad rows from the early days of Site 0 (UCF)."""
return X[(X.timestamp < 3378) & (X.site_id == 0) & (X.meter == 0)].index
|
65bd43455879ed74228bbc1c6224229b14c647aa
| 208,954 |
def parse_signed_url_for_bot_uid(url: str):
"""Parses a bot UID from a signed URL.
Assumes the signed URL follows the following scheme:
[...]/{bot_uid}.zip?[...]
:param url:
:return:
"""
endpoint = url.split("?", maxsplit=1)[0]
file_name = endpoint.split("/")[-1]
bot_uid = file_name.split(".")[0]
return bot_uid
|
dd5fbf078405e5c18cdaeca6df36f143792d8105
| 177,139 |
def _find_image_object(images_list, image_name):
"""
Find and return an image object from images_list with a name that matches image_name
:param images_list: <list> List of <DockerImage> objects
:param image_name: <str> Name of image as per buildspec
:return: <DockerImage> Object with image_name as "name" attribute
"""
ret_image_object = None
for image in images_list:
if image.name == image_name:
ret_image_object = image
break
return ret_image_object
|
7b868c5e3f58a4a318d873eb9c64a0f6b16d104a
| 197,450 |
import re
def humansort_key(strng):
"""Human/natural sort key-gathering function for sorted()
Source: http://stackoverflow.com/a/1940105
"""
if isinstance(strng, tuple):
strng = strng[0]
return [w.isdigit() and int(w) or w.lower()
for w in re.split(r'(\d+)', strng)]
|
f633a05d9a599a0ec8e85782118dca47c06bba41
| 603,603 |
def z_score(val, v_mean, v_std):
"""Calculate a z-score given a value, mean, and standard deviation.
NOTE: The z_score() of a constant vector is 0
"""
score = 0 if v_std == 0 else (val - v_mean) / v_std
return score
|
1381041fc2edca12fc4ece8df0d3e80a7347ed62
| 221,043 |
from datetime import datetime
def times(fields):
"""Return a starting and ending datetime, given `fields` like:
[u'CLOCK:', u'[2013-03-08', u'Fri', u'14:24]--[2013-03-08', u'Fri',
u'15:41]', u'=>', u'1:17']
"""
return (datetime.strptime(fields[1] + fields[3][:5], u'[%Y-%m-%d%H:%M'),
datetime.strptime(fields[3][9:] + fields[5], u'%Y-%m-%d%H:%M]'))
|
8a1fb78e893e71c96dc2a04c1897bfc4b7a1d367
| 51,949 |
def gatt_procedure_write_handle_async(stack_connection, connection_handle, handle, data, timeout=15 * 1000):
"""
Writes data to target attribute handle and returns a GATTRequest object that contains either
a GATTResponse for a GATTError. This is an asynchronous function that issues the request
and returns a GATTRequest object. The GATTRequest can then be manually managed using the
has_response and has_error class instance functions to wait for a response.
:param stack_connection: Connection to the pybt core that allows us to send BLE packets
:type stack_connection: blesuite.pybt.core.Connection
:param connection_handle: Connection handle to send the request to
:type connection_handle: int
:param handle: Target handle
:param data: Data to write to handle
:type handle: int - base 10
:type data: str
:param timeout: Timeout for the ATT request (miliseconds)
:type timeout: int
:rtype: blesuite.pybyt.core.GATTRequest
:return: GATTRequest object
"""
request = stack_connection.write_req_async(handle, data, connection_handle, timeout=timeout)
return request
|
4a5d316d2cc0cdfec31d64e66e7c7dcf2026dc58
| 407,032 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.