content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def pow__mod_c(a, k, c):
"""computes a^k (mod c),
we assume a,k>=1, c > 1 integers"""
if (k == 0):
return 1
elif (k & 1):
return ((a * pow__mod_c(a, k//2, c)**2) % c)
else:
return ((pow__mod_c(a, k//2, c)**2) % c)
|
ab21e214e2f6aae2b32825db3e4e67b8d6a51efe
| 39,322 |
import math
def _log2(n):
"""Returns the log base 2 of an integer."""
return math.log(n)/math.log(2)
|
76921c835801e489d648482eadb5f1b52525a21b
| 39,327 |
def mask(seq, keep_start, keep_end):
"""Mask the sequence leaving only [keep_start, keep_end) unmasked"""
return 'N' * keep_start + seq[keep_start:keep_end] + 'N' * (len(seq) - keep_end)
|
8df0b7d14e1ab2b2a5b9901539e2d2fa210f0eb4
| 39,331 |
def player_turn_to_board_location(move):
"""Convert the players move to a board location."""
move -= 1
row, column = divmod(move, 3)
return row, column
|
7b7a29775842f6224ba9c161bdfc17215e5079cd
| 39,337 |
def left(n):
"""Is node n a left descendant of the root in a zero-indexed boolean tree?"""
while n > 2:
n = (n-1)//2
return n == 1
|
52c81381bddd048cfb1e07f67f6028a3cfc15514
| 39,339 |
import yaml
def parse_feature_extraction_config(file_path):
"""
Parse feature extraction configuration dictionary from yaml file.
Args:
file_path (str): path to yaml file.
Returns:
(dict): dictionary in specified form specifying which features to extract as well
as additional parameters for the feature extraction process.
"""
with open(file_path, 'r') as f:
return yaml.safe_load(f)
|
c812da87d6eb86a7bc5bb6448378a383f19dd09f
| 39,343 |
def lsn_to_hex(num: int) -> str:
""" Convert lsn from int to standard hex notation. """
return "{:X}/{:X}".format(num >> 32, num & 0xffffffff)
|
34f48b305434ce324fba4677d916b4a0e2e2bcdf
| 39,345 |
import warnings
def _filter_dict(d, keys):
"""Filter a dictionary to contain only the specified keys.
If keys is None, it returns the dictionary verbatim.
If a key in keys is not present in the dictionary, it gives a warning, but does not fail.
:param d: (dict)
:param keys: (iterable) the desired set of keys; if None, performs no filtering.
:return (dict) a filtered dictionary."""
if keys is None:
return d
else:
keys = set(keys)
present_keys = keys.intersection(d.keys())
missing_keys = keys.difference(d.keys())
res = {k: d[k] for k in present_keys}
if len(missing_keys) != 0:
warnings.warn("Missing expected keys: {}".format(missing_keys), stacklevel=2)
return res
|
aebb8b43261c879d90555cba0ccd921999dbdcbf
| 39,347 |
def get_esc_str(lvl=0, quote="\""):
"""
Returns the escape string based on the level and quote character.
"""
lvl = (2 ** lvl) - 1 # compute escape level using 2^n - 1
return ("\\" * lvl) + quote
|
263e98dd5d2b86f46a4bd49718c2073d206eb82c
| 39,364 |
def dominated(monom, monom_set):
"""
Returns true iff the monom is coordinate-wise <=
than one of the monoms in the set
"""
for m in monom_set:
if all([monom[i] <= m[i] for i in range(len(m))]):
return True
return False
|
2808adac581b863f547f71b855ad78bf29af72f1
| 39,365 |
from pathlib import Path
def get_pdf_files(directory: Path) -> list:
"""Return list of all 'VypListek*.pdf' files in entered directory."""
files = []
directory = Path(directory)
for item in directory.iterdir():
if not item.is_file():
continue
if 'VypListek' in item.name and item.suffix == '.pdf':
files.append(item)
return files
|
ffab6033546a509c1ef6a2bf2d25961268849b31
| 39,369 |
def group_features_group_filter(groups, person, feature):
"""This returns a list of groups filtered such that the given person has
a role listed in the given feature for each group."""
feature_groups = set([])
for g in groups:
for r in person.role_set.filter(group=g):
if r.name.slug in getattr(r.group.type.features, feature):
feature_groups.add(g)
return list(feature_groups)
|
dd82852803d6d2141efeb76fe43ffbe111c31ad7
| 39,371 |
def course_key(course):
"""Returns the course key.
Args:
course: The course object.
Returns:
A key that may be used to sort course objects.
"""
return (course.campus, course.department, course.code)
|
98253c3cbcc386b052fea15a4db8beebb88a750d
| 39,372 |
def join_root(root, path):
"""Prepends `root` to the absolute path `path`.
"""
p_root, p_loc = path.split_root()
assert p_root == b'/'
return root / p_loc
|
db5a25ef516addc59311cd4c60604d1c8d783444
| 39,374 |
from datetime import datetime
import pytz
def utc_from_timestamp(timestamp: float) -> datetime:
"""Return a UTC time from a timestamp."""
return pytz.utc.localize(datetime.utcfromtimestamp(timestamp))
|
09d81910f23fa9d7a081d5e39857c5160c743dd2
| 39,375 |
from typing import Dict
from typing import Any
from typing import Tuple
from typing import Optional
def calculate_input_shapes(
settings: Dict[str, Any]
) -> Tuple[Tuple[Optional[int], ...], Tuple[Optional[int], ...]]:
"""
Calculate shapes of inputs based on settings of experiment.
:param settings:
configuration of an experiment
:return:
shapes of discriminator input and generator input
"""
data_shape = settings['data']['shape']
n_channels = data_shape[0]
internal_size = settings['discriminator']['setup']['internal_size']
frame_size = settings['discriminator']['setup']['frame_size']
fragment_size = internal_size + 2 * frame_size
d_input_shape = (None, fragment_size, fragment_size, n_channels)
g_input_shape = (None, settings['generator']['setup']['z_dim'])
return d_input_shape, g_input_shape
|
dc846f77ce28ac68b6bd1c1e5c900e1d4c8d0bcb
| 39,378 |
def _Call(func, name):
"""Call a pkg_resources function.
Args:
func: A function from pkg_resources that takes the arguments
(package_or_requirement, resource_name); for more info,
see http://peak.telecommunity.com/DevCenter/PkgResources
name: A name of the form 'module.name:path/to/resource'; this should
generally be built from __name__ in the calling module.
Returns:
The result of calling the function on the split resource name.
"""
pkg_name, resource_name = name.split(':', 1)
return func(pkg_name, resource_name)
|
cc8d1fec01804cc224737fb3ec49f169c1726a71
| 39,379 |
def system_module(mod):
"""A simple way to determine if a module is a system module"""
try:
return "lib/python" in mod.__file__
except AttributeError:
return True
except TypeError:
return False
|
d1aacf198ad12788c655c2948b391925b7d73800
| 39,380 |
def _text(x: float, y: float, text: str, fontsize: int = 14):
"""Draw SVG <text> text."""
return f'<text x="{x}" y="{y}" dominant-baseline="middle" ' \
f'text-anchor="middle" font-size="{fontsize}px">{text}</text>'
|
782bd918a6daf43ad1b31695eecb83ba45791323
| 39,381 |
def parse_sections(lines):
"""Parse the input document into sections, separated by blank lines.
A list of lists is returned. Each item is the list of lines for a section.
"""
secid = 0
section = []
for line in lines:
if not line.strip():
secid += 1
continue
if len(section) == secid:
section.append([line])
else:
section[secid].append(line)
return [v for v in section if v]
|
6127f8bfbec56e77e475684d626e44cb75ac8daa
| 39,386 |
def intersection(x1, x2):
"""Histogram Intersection
Parameters
----------
x1: numpy.ndarray
Vector one
x2: numpy.ndarray
Vector two
Returns
-------
distance: float
Histogram intersection between `x1` and `x2`
"""
assert(len(x1) == len(x2))
minsum = 0
for i in range(len(x1)):
minsum += min(x1[i], x2[i])
return float(minsum) / min(sum(x1), sum(x2))
|
055530363d62b0993eee8a9c72c7f759982a8376
| 39,389 |
def i2u(x):
"""Converts an integer array to a unicode string."""
return bytearray(x).decode('utf-8')
|
805caeddbe9f7225de6519d589fd81ec47ec966a
| 39,390 |
def classify(s, data_set, suffixes=None):
"""
Return True or some classification string value that evaluates to True if
the data in string s is not junk. Return False if the data in string s is
classified as 'junk' or uninteresting.
"""
if not s:
return False
s = s.lower().strip('/')
if any(d in s for d in data_set):
return False
if suffixes and s.endswith(suffixes):
return False
return True
|
e0f832724d2686f6aa0f78daefd009bc1c09c973
| 39,392 |
def convert_angle(start, end):
"""開始角startと終了角endを適正に変換する
start -60, end 60をstart 300, end 420とする
Args:
start (int): ポリゴンの開始角
end (int): ポリゴンの終了角
Returns:
int, int: start, endを0以上の数値に変換した値
"""
if start < 0:
start = 360 + start
if start > end:
end = end + 360
return start, end
|
5d7f56174e7739068bdf6e1e958dfcb806cd7ede
| 39,395 |
def version_to_float(version):
"""
Convert version string to float.
"""
if version.endswith('b'):
return float(version[:-1])
return float(version)
|
2dfa2003fdf7f6344ebccb00cc12c618eb863708
| 39,397 |
import math
def bl_area(sides: list) -> float:
"""
This function calculate area of triangle.
:param sides: List of 3 sides of triangle
:return: Area of triangle
"""
half_perimeter = (1 / 2) * (sides[0] + sides[1] + sides[2])
return math.sqrt(
half_perimeter * (half_perimeter - sides[0]) * (half_perimeter - sides[1]) * (half_perimeter - sides[2]))
|
aa2a87a27d06371870bee16f3a53310df7d50fef
| 39,398 |
def regex_remove_df_columns(df, search_string_list):
"""
Remove columns in a dataframe based on a list of search strings
:param df: Pandas dataframe
:param search_string_list: A list of regex strings to search for.
Columns matching these will be removed
"""
for search_string in search_string_list:
df = df.drop(df.filter(regex=search_string).columns, axis=1)
return df
|
2404a9c1a41f0cb8c4c0d5e10ad50ee453a51e16
| 39,400 |
import io
def get_df_as_file(df, float_format='%.f'):
""" Returns a string buffer of the dataframe """
string_buffer = io.StringIO()
df.to_csv(
string_buffer,
sep='\t',
na_rep='Unknown',
index=False,
header=False,
float_format=float_format
)
string_buffer.seek(0)
return string_buffer
|
da09fd20c1bc2a1244af3f29c735ec694e1ee45f
| 39,401 |
import math
def kernel(x, f, *args):
"""
The kernel function
:param x: float. Gaussian deviate
:param f: function to integrate
:param args: list. Additional arguments for function f
:return: float
"""
return 1.0 / ((2.0 * math.pi) ** 0.5) * f(x, *args) * math.exp(-0.5 * x ** 2)
|
df9f3d4a05f9ef50b8df589769684e5142a9cdc3
| 39,406 |
import requests
def _request(url: str, token: str, params: dict) -> requests.Response:
"""
Creates a request for the Species+/CITES checklist API and handles
HTTP exceptions.
Parameters
----------
url
Species+/CITES checklist API endpoint.
token
Species+/CITES checklist API authentication token.
params
Request parameters.
Returns
-------
requests.Response
Request response.
"""
headers = {"X-Authentication-Token": token}
try:
response = requests.get(url, params=params, headers=headers)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
raise Exception(f"Error calling Species+ API. {err}")
return response
|
357679de22fdb45cb3ca3aaea8633a5f76ad734c
| 39,407 |
def flatten_image_arrays(imarrays):
"""Given a 3d array of images (array of 2-dim arrays), flatten it
to a 2d array (array of 1-dim arrays)"""
return imarrays.reshape(imarrays.shape[0], -1)
|
d987f9478acc8da978ab0b14951f26784c29b362
| 39,410 |
import re
def _vtx_enabled(g):
"""Detect if system supports VTx using /proc/cpuinfo."""
regex = re.compile("svm|vtx")
with open("/proc/cpuinfo") as f:
for line in f:
if regex.search(line) is not None:
return True
return False
|
79e3082064ab64f73291881c59829cbfe8a970d1
| 39,414 |
import glob
def get_video_sequence(path):
"""
Returns a list of the pahts of all frames in the video.
Input:
path - String defining the path to the sequence of frames
Return:
video_sequence - List containing the paths of all frames in the sequence as String
"""
return [frame_path for frame_path in sorted(glob.glob(path + "/*.png"))]
|
eaaf6ee7c70f3650f5187e19c9f4a854e02d77a1
| 39,416 |
import time
def repeat_execution(fct, every_second=1, stop_after_second=5,
verbose=0, fLOG=None, exc=True):
"""
Runs a function on a regular basis. The function
is not multithreaded, it returns when all execution
are done.
@param fct function to run
@param every_second every second
@param stop_after_second stop after a given time or never if None
@param verbose prints out every execution
@param fLOG logging function
@param exc if False, catch exception,
else does not catch them
@return results of the function if
*stop_after_second* is not None
"""
iter = 0
start = time.monotonic()
end = None if stop_after_second is None else start + stop_after_second
current = start
res = []
while end is None or current < end:
iter += 1
if exc:
r = fct()
if verbose > 0 and fLOG is not None:
fLOG("[repeat_execution] iter={} time={} end={}".format(
iter, current, end))
if stop_after_second is not None:
res.append(r)
else:
try:
r = fct()
if verbose > 0 and fLOG is not None:
fLOG("[repeat_execution] iter={} time={} end={}".format(
iter, current, end))
if stop_after_second is not None:
res.append(r)
except Exception as e:
if verbose > 0 and fLOG is not None:
fLOG("[repeat_execution] iter={} time={} end={} error={}".format(
iter, current, end, str(e)))
while current <= time.monotonic():
current += every_second
while time.monotonic() < current:
time.sleep(every_second / 2)
return res if res else None
|
832c95cbb9285a68708dd90e8dbd7b8e7c09a6d1
| 39,418 |
def print_chunks(client, num=-1):
""" Prints a list of the [num] most recently learned chunks (or all if num=-1)"""
chunks = client.execute_command("pc").split("\n")
if num > 0:
chunks = chunks[0:num]
return "\n".join(reversed(chunks))
|
e9b161d0538729bb35a7dd28858b5b9573a14697
| 39,419 |
def split_metrics_by_namespace_and_name(metrics, namespace, name):
"""Splits metrics list namespace and name.
Args:
metrics: list of metrics from pipeline result
namespace(str): filter metrics by namespace
name(str): filter metrics by name
Returns:
two lists - one of metrics which are matching filters
and second of not matching
"""
matching_metrics = []
not_matching_metrics = []
for dist in metrics:
if dist.key.metric.namespace == namespace\
and dist.key.metric.name == name:
matching_metrics.append(dist)
else:
not_matching_metrics.append(dist)
return matching_metrics, not_matching_metrics
|
8d680801b22aa3a596aff3d81a0cb353c4a7315e
| 39,420 |
import itertools
def common_age(a, b):
"""
Calculates the number of ages in common between two lists of ages.
Allows for ages to be one year apart.
Parameters
----------
a: list
list of age strings to be compared to b
b: list
list of age strings to be compared to a
Raises
------
TypeError
if a or b are not lists
Returns
-------
integer
number of ages in common
Example
--------
>>> list_1 = ['15', '20', '2']
>>> list_2 = ['15', '15', '20', '2', '99']
>>> common_age(list_1, list_2)
4
"""
# Check variable types
if not ((isinstance(a, list)) and (isinstance(b, list))):
raise TypeError('Both variables being compared must contain lists')
# Compare two age sets against each other
comparisons = list(itertools.product(a, b))
# Count how many are equal or 1 year apart
value = [abs(int(x)-int(y)) for x, y in comparisons]
value = len(list(filter(lambda x: x <= 1, value)))
return value
|
10dc4299cf81ce7e611e906dc752afb591e472ce
| 39,421 |
def split_dict(dic, *keys):
"""Return two copies of the dict. The first will contain only the
specified items. The second will contain all the *other* items from the
original dict.
Example::
>>> split_dict({"From": "F", "To": "T", "Received", R"}, "To", "From")
({"From": "F", "To": "T"}, {"Received": "R"})
"""
for k in keys:
if k not in dic:
raise KeyError("key {!r} is not in original mapping".format(k))
r1 = {}
r2 = {}
for k, v in dic.items():
if k in keys:
r1[k] = v
else:
r2[k] = v
return r1, r2
|
f4df3e166e484b6b15abed673503c87ee9ce55c4
| 39,422 |
import math
def distance_bw_points(alpha, beta):
"""Takes two points, alpha and beta, and returns a float according to their
Euclidean distance.
alpha, beta: Point objects
"""
x1, y1 = alpha.x, alpha.y
x2, y2 = beta.x, beta.y
distance = math.sqrt((x2-x1) ** 2 + (y2-y1)**2)
return distance
|
887c369b078b73963acd56d6c2221149dce2fc3e
| 39,423 |
from functools import reduce
def attrgetter(item, default=''):
"""operator.attrgetter with a default value."""
reducer = lambda obj, name: getattr(obj, name, default)
return lambda obj: reduce(reducer, item.split('.'), obj)
|
b95b24b9333e0e0adadec4cfc9862ee574e1c017
| 39,426 |
def zero_size_pairs(width, height):
"""Creates a generator which yields pairs of sizes.
For each pair of sizes at least one of the sizes will have a 0 in it.
"""
sizes = ((width, height), (width, 0), (0, height), (0, 0))
return ((a, b) for a in sizes for b in sizes if 0 in a or 0 in b)
|
8507ae13e26fc1d1bcf6b43988a0c331b58bb6c0
| 39,428 |
def createBoundaries4Colorbar(df, step):
"""
Create list of boundaries for colorbar.
:param pandas.DataFrame df: Data frame with 4 columns (sizes, labels, ranks, colors). Created by orsum_readResultFile() function.
:param int step: Difference between each number in the sequence.
:returs: **boundariesCB** (*list*) – list of values.
"""
boundariesCB = list(range(0, df['ranks'].max(), step))
boundariesCB[0] = df['ranks'].min()
if(df['ranks'].max() - boundariesCB[-1] < step/2):
boundariesCB[-1] = df['ranks'].max()
else:
boundariesCB.append(df['ranks'].max())
return(boundariesCB)
|
26b30c78bdc6b18837604270f86e29f207c63098
| 39,429 |
def diff(old, new):
"""Returns the set of differences between two C{dict}s.
@return: A 3-tuple of dicts with the changes that would need to be
made to convert C{old} into C{new}: C{(creates, updates, deletes)}
"""
new_keys = set(new.iterkeys())
old_keys = set(old.iterkeys())
creates = {}
for key in new_keys - old_keys:
creates[key] = new[key]
updates = {}
for key in old_keys & new_keys:
if old[key] != new[key]:
updates[key] = new[key]
deletes = {}
for key in old_keys - new_keys:
deletes[key] = old[key]
return creates, updates, deletes
|
cd58953a59279e8a0d392fa0051ee7cd1ff26a17
| 39,434 |
import base64
def decode_base64(data):
"""Decodes a base64 string to binary."""
data = data.replace("\n", "")
data = data.replace(" ", "")
data = base64.b64decode(data)
return data
|
5092d5b87f0f5a98b565ff5d066ed895a62a1af2
| 39,435 |
def log_on_response_code(response, log, msg, code):
"""
Log `msg` if response.code is same as code
"""
if response.code == code:
log.msg(msg)
return response
|
68a13a67155228a80ad3c65f1c52dca6197cc531
| 39,438 |
def _mangle_attr(name):
"""
Mangle attributes.
The resulting name does not startswith an underscore '_'.
"""
return 'm_' + name
|
32860a624c7e89a4240cea9e2f2777a313637225
| 39,440 |
def pad(text, min_width, tabwidth=4):
"""
Fill the text with tabs at the end until the minimal width is reached.
"""
tab_count = ((min_width / tabwidth) - (len(text) / tabwidth)) + 1
return text + ('\t' * int(tab_count))
|
4f31bad4029be2dd74ff49613189b0d04ec23ee4
| 39,442 |
def cat_replace(m, cats):
"""Replaces categories with regular expressions that will match them.
Args:
m: The match object. It should have two groups, the first one a
(possibly zero-length) string of digits (the number for a numbered
category), and the second a string of word characters (the name of
the category).
cats: The dict of categories to use in replacement.
Returns:
If there is no number, a pattern that simply matches every item in the
category. If there is a number, the pattern will additionally capture
the match to a named group, 'nc' + n + '_' + c, where n is the number,
and c is the name of the category. If the name of the category is not
found in cats, the original string is returned.
"""
n, c = m.groups()
if c in cats:
if not n:
return '(' + '|'.join(cats[c]) + ')'
return '(?P<nc{}_{}>{})'.format(n, c, '|'.join(sorted(cats[c],
key=len,
reverse=True)))
return m.group(0)
|
31e0aa08c27f8496e6d3fb28582cd88732e05191
| 39,448 |
def coerce(P, x):
"""
Coerce ``x`` to type ``P`` if possible.
EXAMPLES::
sage: type(5)
<type 'sage.rings.integer.Integer'>
sage: type(coerce(QQ,5))
<type 'sage.rings.rational.Rational'>
"""
try:
return P._coerce_(x)
except AttributeError:
return P(x)
|
c572565d56bb717cfb90dfb5ef9bfd00c8496358
| 39,449 |
def uppercase(string: str):
"""Safely recast a string to uppercase"""
try:
return string.upper()
except AttributeError:
return string
|
7cf6ae03fb5234a3012350c5a8cf406ef86fc2a6
| 39,451 |
import re
def get_subject_line(message):
"""Separate commit subject from the commit type"""
if message:
subject = re.split(': |] ',message)[1]
return subject
|
06d7a90d18053fe37c7f81b07d63de34c4a7e963
| 39,453 |
def package_url(package_name):
"""Return PyPi package URL for given package name."""
return 'https://pypi.python.org/pypi/%s/' % package_name
|
a444bfa81f8be595848e97ee6e972d0e235355c1
| 39,454 |
def is_model_on_gpu(model):
"""
Function to check whether given model is created on GPU or CPU
Assumption : model is on single device
:return:
True if the model is on GPU, False if on CPU
"""
return next(model.parameters()).is_cuda
|
64a4fcabbde843b6b26e6f3a73c51e580304f1a4
| 39,455 |
def add_price(price: float, man_value: float, reduce_value: float):
"""
Use full discount coupons to calculate the hand price.
:param price: goods price
:param man_value: minimum limit price
:param reduce_value: discount price
:return: real price
"""
num = 1
if price <= 0:
return price
temp_price = price
while True:
if temp_price >= man_value:
break
num += 1
temp_price = price * num
fin_price = ((price * num) - reduce_value) / num
return fin_price
|
80cf369562f37fa70937dcae1d8fc1c86e5d807b
| 39,460 |
def envi_hdr_info(filename):
"""Returns a dict of envi header info. All values are strings."""
with open(filename,'r') as fid:
x = [line.split("=") for line in fid]
envi_info = {a[0].strip(): a[1].strip() for a in x if len(a) ==2}
return envi_info
|
65c016cf4f7c53a8ec970a1a23a3ca6d2cfb0aa5
| 39,461 |
def truncate(s, length):
"""Truncate a string to a specific length
The result string is never longer than ``length``.
Appends '..' if truncation occurred.
"""
return (s[:length - 2] + '..') if len(s) > length else s
|
a94a1dd852749a3d4f96fa3a58db32d50a882074
| 39,462 |
def source_information_from_method(source_method):
"""Obtain source information from a method of a source object.
:param source_method: Source method that is used
:type source_method: method
:returns: string with source information identifying the object that the
method belongs to
:rtype: str
"""
source = source_method.__self__
info_str = f"source {source.name} of type {source.driver} using method "
return info_str+f"{source_method.__name__}"
|
8f6cd465eb4b0979089753ce6cce4cdb96ab8283
| 39,463 |
def get_event_description(e):
"""Return the description field for the event."""
return e.get('description')
|
96fde5fe77964e364907e6321cbd8352ee2c6bc1
| 39,474 |
def get_references(name, references):
"""Generate section with references for given operator or module"""
name = name[12:] # remove nvidia.dali prefix
result = ""
if name in references:
result += ".. seealso::\n"
for desc, url in references[name]:
result += f" * `{desc} <../{url}>`_\n"
return result
|
c0b92763ae3f63aebb0f3157a90d97d77d6e5dcf
| 39,476 |
def make_options_bank_drop(values):
"""
Helper function to generate the data format the dropdown dash component wants
"""
ret = []
for value in values:
ret.append({"label": value, "value": value})
return ret
|
3c89c73b6e3cdc6fe07a71080577320f0af6729f
| 39,481 |
from typing import Iterable
def round_values(data, places=3):
"""
Round values in nested structures
Args:
data: Value / structure with values
places (int): Decimal places
Returns:
Rounded value / Structure with rounded values
"""
if isinstance(data, dict):
return {k: round_values(v, places) for k, v in data.items()}
if isinstance(data, Iterable):
return [round_values(x, places) for x in data]
return round(data, places)
|
68bdcabff375b81567c0c710b55f21572e9ccda1
| 39,483 |
from typing import Iterable
from typing import List
def sort_unique_lex(iterable: Iterable) -> List:
"""Return list in lexically ascending order and without
duplicate entries."""
unique = set(iterable)
return sorted(unique)
|
27ff573803aad34e46859b3607294f864bf6d016
| 39,487 |
def sequential_weighted_avg(x, weights):
"""Return a sequence by weighted averaging of x (a sequence of vectors).
Args:
x: batch * len2 * hdim
weights: batch * len1 * len2, sum(dim = 2) = 1
Output:
x_avg: batch * len1 * hdim
"""
return weights.bmm(x)
|
52497ab49ec4e672631a2f277a48d4eefec36b77
| 39,489 |
import math
def hosts_from_prefixlength(prefixlength):
"""Calculate the number of hosts supported ia given prefix length.
Args:
prefixlength (int): CIDR mask to find the number of supported hosts.
Returns:
hosts (int): The number of hosts in a subnet the size of a given prefix length (minus net and bcast).
"""
hosts = math.pow(2, 32 - prefixlength) - 2
return int(hosts)
|
471e801fd91b190c377fc850c9ada1c4beb71ed6
| 39,493 |
def find_height_profile_dimension(dat_file):
"""Find the dimension of the provided height profile .dat file.
1D files have 2 columns, 2D - 8 columns.
Args:
dat_file (str): full path to height profile .dat file.
Returns:
dimension (int): found dimension.
"""
with open(dat_file, 'r') as f:
header = f.readline().strip().split()
dimension = 1 if len(header) == 2 else 2
return dimension
|
f982396e839eea3ffc5e6bde4386e07e93fc9f14
| 39,498 |
def _get_last_doc_ref(path, doc):
"""Mutate a document top-down using a path.
:param pathlib.Path path: The path.
:param dict doc: The document.
:rtype: dict
"""
for part in path.parts:
if part not in doc:
doc[part] = {}
doc = doc[part]
return doc
|
316c2579897e1ce5834a2bf34fc7c5929c7868c9
| 39,501 |
def set_var_conditional(context, condition=None, condition_var=None, compare=None, else_value=None, **kwds):
"""
Sets the given variables to provided values. Kind of like the 'with' block, only it isn't a block tag
:param context: template context (automatically provided by django)
:param kwds: named parameters with their respective values
:param condition_var: pair with compare to obtain True or False whether to use original assignment or else_value
:param compare: pair with condition_var to obtain True or False whether to use original assignment or else_value
:param condition: alternative to condition_var & compare: original assignment if truthy or else_value if falsy
:param else_value: value to be assigned to the variable(s) when condition is falsy
:return: this tag doesn't render
"""
if condition_var is not None:
condition = condition_var == compare
for k, v in kwds.items():
context[k] = v if condition else else_value
return ''
|
4a4de9711dc6560ff6eb4806029b7a12980da38f
| 39,508 |
import re
def gruppercase(value):
"""Correctly uppercases all Gr characters in a string"""
grletters = [u'α', u'β', u'γ', u'δ', u'ε', u'ζ', u'η', u'θ', u'ι', u'κ', u'λ', u'μ', u'ν', u'ξ', u'ο', u'π', u'ρ', u'σ', u'τ', u'υ', u'φ', u'χ', u'ψ', u'ω']
grletters_accent = [u'ά', u'έ', u'ή', u'ί', u'ό', u'ύ', u'ώ']
grletters_upper_accent = [u'Ά', u'Έ', u'Ή', u'Ί', u'Ό', u'Ύ', u'Ώ']
grletters_upper_solvents = [u'ϊ', u'ϋ']
grletters_other = [u'ς']
grletters_to_uppercase = [u'Α', u'Β', u'Γ', u'Δ', u'Ε', u'Ζ', u'Η', u'Θ', u'Ι', u'Κ', u'Λ', u'Μ', u'Ν', u'Ξ', u'Ο', u'Π', u'Ρ', u'Σ', u'Τ', u'Υ', u'Φ', u'Χ', u'Ψ', u'Ω']
grletters_accent_to_uppercase = [u'Α', u'Ε', u'Η', u'Ι', u'Ο', u'Υ', u'Ω']
grletters_upper_accent_to_uppercase = [u'Α', u'Ε', u'Η', u'Ι', u'Ο', u'Υ', u'Ω']
grletters_upper_solvents_to_uppercase = [u'Ι', u'Υ']
grletters_other_to_uppercase = [u'Σ']
grlowercase = grletters + grletters_accent + grletters_upper_accent + grletters_upper_solvents + grletters_other
gruppercase = grletters_to_uppercase + grletters_accent_to_uppercase + grletters_upper_accent_to_uppercase + grletters_upper_solvents_to_uppercase + grletters_other_to_uppercase
grkeys = dict(zip(grlowercase, gruppercase))
pattern = "|".join(grkeys.keys())
return re.sub(pattern, lambda m: grkeys[m.group()], value.upper())
|
d505e5b1f3b9906c5f41c5d60ab52a9c2b6db685
| 39,510 |
def make_unique_node(graph, name):
""" Add as much postfix-'_' to `name` as necessary to make unique name for new node in `graph`.
Parameters
----------
graph : nx.Graph
graph, for which the node is created.
name : str
name of new node.
Returns
-------
Resulting name. Composed from `name` and possibly several '_'-characters.
"""
if name not in graph:
return name
ctr = 1
while True:
name_ = name + '_' * ctr
if name_ not in graph:
return name_
ctr += 1
|
8323e2fb36ca8bfe103f86bf7cf0a50dd013edfe
| 39,511 |
def get_crate_from_line(line):
"""Get crate name from use statements"""
if not line.startswith("use "):
return None
if line.startswith("use crate"): # will handle this better later
return None
return line[4:-1]
|
8bda2d140e5f6c9ceb61764d64ab16c25ef450da
| 39,514 |
def scale_edge_weights(edge_weights, w):
""" Scales all the edge-weights described by a dictionary. """
edge_weights2 = {}
for (n1, n2) in edge_weights.keys():
edge_weights2[(n1, n2)] = w * edge_weights[(n1, n2)]
return edge_weights2
|
b69e97f00943843fa117b1f0d7e51ab9dfe8a07b
| 39,515 |
from typing import Any
from typing import Dict
import yaml
def load_yaml(yaml_filepath: str, safe_load: bool = True, **kwargs: Any) -> Dict:
"""
Load a YAML file.
Parameters
----------
yaml_filepath : str
safe_load : bool, optional (default: True)
This triggers the usage of yaml.safe_load.
yaml.load can call any Python function and should only be used if the
source of the configuration file is trusted.
**kwargs : Any
Arbitrary keyword arguments which get passed to the loader functions.
Returns
-------
config : Dict
"""
with open(yaml_filepath) as stream:
if safe_load:
config = yaml.safe_load(stream)
else:
config = yaml.load(stream, **kwargs) # noqa
return config
|
4d490e02c432c5866c62ba31aacad4bed69a6711
| 39,519 |
def remove_port_parameter(sw, port, col, keys):
"""Removes 'keys' in 'col' section from 'port' on 'sw'."""
cmd = "remove port %s %s %s" % (port, col, ' '.join(map(str, keys)))
return sw(cmd, shell='vsctl')
|
53c2d8db4ba96987b16cea90fe192f0e07e0a0a3
| 39,520 |
def rectSet(rectList):
"""Returns a list of rect without doublons"""
toReturn = []
for rect in rectList:
if rect not in toReturn:
toReturn.append(rect)
return toReturn
|
839c64a165d74ba58683870ba8ae6348a944d5f2
| 39,523 |
import re
def remove_block(text, block_tag):
"""Remove the specified block from the template text.
A block is marked by [[block_name]]block contents[[/block_name]].
Parameters
----------
text : str
The template text to remove the block from.
block_tag : str
The name of the block to remove. We will search for [[block_tag]]contents[[/block_tag]] and remove it.
"""
return re.sub(f"\\[\\[{block_tag}\\]\\](.|\\r|\\n)*\\[\\[/{block_tag}\\]\\]", "", text, flags=re.MULTILINE)
|
e5fd7b15650834dccf950bd8897afca03d0b908c
| 39,525 |
def get_line_data(line):
"""get the data from a line"""
if line.old_lineno == -1:
status = "i"
elif line.new_lineno == -1:
status = "d"
else:
status = None
return {"status": status, "content": line.content}
|
89daba7e26bc8bd44e24388c32198c3f08afcac8
| 39,528 |
from typing import Sequence
from typing import List
from typing import Tuple
from typing import Dict
import copy
import random
def configuration_model(
*, degrees: Sequence[int], max_trials: int = 10, max_fails: int = 1000
) -> List[Tuple[int, int]]:
"""Configuration model from degree list.
Generates undirected simple graph: no self-loops nor multiedges.
Returns empty list if not feasible.
Args:
degrees: Degree list.
max_trials: Max number of trials with this degree sequence.
max_fails: Max number of fails (not added pair) in a trial.
Returns:
adjacency: Adjacency list with tuples of pairs (n1, n2), with
n1 < n2.
Raises:
ValueError: If the sum of degrees is uneven.
"""
# check if sum of stubs is even
if sum(degrees) % 2 != 0:
err = f"Sum of degrees ({sum(degrees)}) must be even."
raise ValueError(err)
# backup stubs and edges
stubs_bu = []
edges_bu: Dict[int, List[int]] = {}
for i, el in enumerate(degrees):
aux = [i] * el
stubs_bu += aux[:]
edges_bu[i] = []
trials = 0
while trials < max_trials:
stubs = copy.copy(stubs_bu)
edges = copy.deepcopy(edges_bu)
fails = 0
while stubs:
n1 = random.choice(stubs)
aux = stubs[:]
aux.remove(n1)
n2 = random.choice(aux)
if n1 != n2 and n2 not in edges[n1]:
edges[n1].append(n2)
edges[n2].append(n1)
stubs.remove(n1)
stubs.remove(n2)
else:
fails += 1
if fails > max_fails:
trials += 1
break
adjacency = [(i, j) for i in edges for j in edges[i] if i < j]
return adjacency
return []
|
3e0c2088f3a12fdcce8347d1f3b143a3aee3dada
| 39,529 |
from typing import List
from pathlib import Path
def get_filepaths(tutor_data_path: str) -> List[Path]:
"""
Get the paths of the .npy data files in the directory, with recursive
effect.
Parameters
----------
tutor_data_path : str
String representing the directory path.
Returns
-------
List
List of the paths of the files.
"""
return list(Path(tutor_data_path).rglob('*.npy'))
|
46a657e100a638b841eeee8f0906b7f6bf668bec
| 39,530 |
def subreadNamesToZmwCoverage(qnames):
"""From list of PacBio subread names, report number of ZMWs represented
QNAME of a PacBio subread has the following convention:
{movieName}/{holeNumber}/{qStart}_{qEnd}
We want to count the number of holes (ZMWs), because a given hole may
result in multiple subreads.
Parameters
----------
qnames : list
read names of PacBio subreads
Returns
-------
int
Number of ZMWs represented by the above subreads
"""
zmwnames = ["/".join(n.split("/")[0:2]) for n in qnames]
zmwnames = set(zmwnames)
return(len(zmwnames))
|
0a01ff4f5d5231173ad08d7f0b1cd9b4d9004f2b
| 39,532 |
def prevent_sentence_boundary_detection(doc):
"""
Disable the sentence splitting done by Spacy
More info: https://github.com/explosion/spaCy/issues/1032
:param doc: a Spacy doc
:return: a disable sentence splitting Spacy doc
"""
for token in doc:
# This will entirely disable spaCy's sentence detection
token.is_sent_start = False
return doc
|
92fb37cf80233c13392e7fa36c684af5b9e2d0ec
| 39,535 |
def _classdir(klass):
"""Return a set of the accessible attributes of class/type klass.
This includes all attributes of klass and all of the base classes
recursively.
"""
names = set()
ns = getattr(klass, '__dict__', None)
if ns is not None:
names.update(ns)
bases = getattr(klass, '__bases__', None)
if bases is not None:
# Note that since we are only interested in the keys, the order
# we merge classes is unimportant
for base in bases:
names.update(_classdir(base))
return names
|
d1011da63d9205b5a4abf20d8877b1c20a7fea44
| 39,537 |
import uuid
def _get_random_string() -> str:
"""Create random string to be used."""
return uuid.uuid4().hex
|
594bf0034c2812cfd1443152f22aa302c01cefb9
| 39,548 |
def align_corpora(old_corpus, new_corpus, remove_empty=True):
"""Takes two Corpus objects `old_corpus` and `new_corpus` and returns
a copy of `new_corpus` with the following modifications: (1) the
word to integer mapping agrees with that of `old_corpus` and (2)
words in `new_corpus` which do not appear in `old_corpus` are
removed from the corpus. Empty documents are removed.
"""
new_words = [w for w in new_corpus.words if w not in old_corpus.words]
out = new_corpus.apply_stoplist(new_words)
if remove_empty:
out.remove_empty()
int_words = out.words
words_int = old_corpus.words_int
int_int = {}
for i in range(len(int_words)):
int_int[i] = words_int[int_words[i]]
for i in range(len(out.corpus)):
out.corpus[i] = int_int[out.corpus[i]]
out.words = old_corpus.words.copy()
out._set_words_int()
return out
|
d65f0f18bca986bf7e2ad8ad213a8666cf239be3
| 39,556 |
import re
def get_metrics_lines(lines):
"""
Return subset of lines corresponding to relevant metrics data.
"""
metrics_start = [idx for idx, l in enumerate(lines)
if re.search('^## METRICS CLASS', l)][0]
metrics_end = metrics_start + 3
return lines[metrics_start:metrics_end]
|
29872090b2b847d0c4e88511b16f2a1b3deba303
| 39,564 |
def simple_validator_cmds(*args, **kwargs):
"""Used with SetSawtoothHome in integrationtools, to have more control
at the test file level over how the validator is started.
Returns:
str : The validator startup command.
"""
return ['sawtooth-validator -v']
|
28f432ff077569165844c39158ba05b215258635
| 39,567 |
import itertools
def next_data(it):
"""
Advances an iterator until new data is found.
:param it: Character iterator.
:returns: Data found.
"""
quotation_mark = lambda c: c != '"'
data_begin = itertools.dropwhile(quotation_mark, it)
next(data_begin)
data = itertools.takewhile(quotation_mark, data_begin)
return ''.join(data)
|
5ceb48c04cab857c7b00614537e2abac17cc82cb
| 39,572 |
def treatment_link(flora_id, taxon_id):
"""Build a link to the treatment page."""
return ('http://www.efloras.org/florataxon.aspx?'
rf'flora_id={flora_id}&taxon_id={taxon_id}')
|
e853b1b767407f459881c716a4443795a9379f86
| 39,574 |
def dims_to_targetshape(data_dims, batch_size=None, placeholder=False):
"""Prepends either batch size/None (for placeholders) to a data shape tensor.
Args:
data_dims: list, indicates shape of the data, ignoring the batch size.
For an RGB image this could be [224, 224, 3] for example.
batch_size: scalar, indicates the batch size for SGD.
placeholder: bool, indicates whether the returned dimensions are going to
be used to construct a TF placeholder.
Returns:
shape: A tensor with a batch dimension prepended to the data shape.
"""
if batch_size is not None and batch_size != 0:
shape = [batch_size]
elif placeholder is True:
shape = [None]
else:
shape = [-1]
shape.extend(data_dims)
return shape
|
30cc97cdeca53e835fc51288e943235a12269146
| 39,576 |
def format_number(number):
"""
Formats a number to a more readable format; 10000 -> 10,000
"""
if isinstance(number, int):
return '{:,d}'.format(number)
if number is None:
return 'Unknown'
return '{:3.2f}'.format(number)
|
9fa26e43e86c12834b460ca68583f05782f2531c
| 39,579 |
import json
def derive_mapping_dict(obj):
"""Fetch the mapping dict from an object
:param obj: The mapping object
:type obj:
:return: Mappings dict
:rtype: `dict`
"""
if obj:
pdict = dict(obj.__dict__)
pdict.pop("_sa_instance_state", None)
# If output is present, json decode it
if pdict.get("mappings", None):
pdict["mappings"] = json.loads(pdict["mappings"])
return pdict
|
b11881c733d9ef3c8e1f9a4475b65138225c10d9
| 39,584 |
from pathlib import Path
def filesize(fname):
"""
Simply returns the size of the file fname in bytes
"""
return (Path(fname).stat().st_size)
|
1cf2cb0fbab2533e69c5200b25a134ee6dd61424
| 39,588 |
def sort_proxy_stats_rows(proxy_stats, column):
"""
Sorts proxy statistics by specified column.
Args:
proxy_stats: A list of proxy statistics.
column: An int representing a column number the list should be sorted by.
Returns:
A list sorted by specified column.
"""
return sorted(proxy_stats, key=lambda p: p[column], reverse=False)
|
8fd6dd57bbb893aebfd282e4bf0718eee9d8b383
| 39,590 |
def __getpixel(image, x, y):
""" Get pixel from image at given (x, y) coordinate.
If given coordinate is out of range, returns a non-stroke pixel.
Arguments:
image -- The PIL image object.
x -- X coordinate of the pixel.
y -- Y coordinate of the pixel.
"""
if x >= 0 and x < image.size[0] and y >=0 and y < image.size[1]:
return image.getpixel((x, y))
else:
return (255, 255, 255) if image.mode == 'RGB' else 255
|
662bc721586c7c49172eb6f3f6619336c17ab055
| 39,593 |
def word_to_bag(word):
""" Convert word to bag-of-chars. """
return ''.join(sorted(set(word)))
|
a31346d604ed2868c8bd722f5e004288724a9397
| 39,599 |
def load_rfam_urs_accessions_from_file(urs_acc_list):
"""
Loads all existing Rfam URS accessions in a python
dictionary
urs_acc_list: A .txt file with all URS accession already
in Rfam
return: A python dictionary with all URS accessions as
keys.
"""
rfam_urs_accs = {}
fp = open(urs_acc_list, 'r')
for line in fp:
accession = line.strip()
if accession not in rfam_urs_accs:
rfam_urs_accs[accession] = ""
fp.close()
return rfam_urs_accs
|
14f41a6241cbf7e8138a44f5e86a5b7a50df4edb
| 39,601 |
import math
def degrees(rad_angle) :
"""Converts and angle in radians to degrees, mapped to the range [-180,180]"""
angle = rad_angle * 180 / math.pi
#Note this assume the radians angle is positive as that's what MMTK does
while angle > 180 :
angle = angle - 360
return angle
|
df366e3eef93f0f51feca48b83072bf8b13eba78
| 39,603 |
def audit(log):
"""
Single method to ensure that the log object is an audit log (by binding
the audit log param)
:param log: a bound log object
:returns: a bound log object with keyword that specifies it as an audit
log already bound
"""
return log.bind(audit_log=True)
|
54b25800392c49426000a4144401c51834a35848
| 39,604 |
def conditions(ctx):
"""
Tests for stack to verify that our glitch worked
ie. a fault occured during the AES computation
"""
buff = bytes(ctx['stack'][192:208])
if buff.hex() == "3ad77bb40d7a3660a89ecaf32466ef97":
return False
else:
return True
|
1eddf4d0e22d4cbcc58348f073e84db5ec82bc52
| 39,606 |
from pydantic import BaseModel # noqa: E0611
def get_model_defaults(model_class: BaseModel):
"""Return the default values for fields in a Pydantic BaseModel.
If a field doesn't have a default then return None.
Default values may also be None.
Returns
-------
dict
"""
return {x: y.get("default") for x, y in model_class.schema()["properties"].items()}
|
f68483fbb59f0fa44365ac04ffd4514e4518efe7
| 39,607 |
def create_locations(client, create_smb=False, create_s3=False):
"""
Convenience function for creating locations.
Locations must exist before tasks can be created.
"""
smb_arn = None
s3_arn = None
if create_smb:
response = client.create_location_smb(
ServerHostname="host",
Subdirectory="somewhere",
User="",
Password="",
AgentArns=["stuff"],
)
smb_arn = response["LocationArn"]
if create_s3:
response = client.create_location_s3(
S3BucketArn="arn:aws:s3:::my_bucket",
Subdirectory="dir",
S3Config={"BucketAccessRoleArn": "role"},
)
s3_arn = response["LocationArn"]
return {"smb_arn": smb_arn, "s3_arn": s3_arn}
|
73cb87329a8105187721dba41a37d9ff20095fc4
| 39,610 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.