content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def _centroid(gen):
"""Find the centroid of the coordinates given by the generator. The
generator should yield pairs (longitude, latitude).
:return: Pair (longitude, latitude) of the centroid.
"""
n, lon, lat = 0, 0, 0
for pair in gen:
lon += pair[0]
lat += pair[1]
n += 1
if n == 0:
raise ValueError("No points found to compute centroid from")
return lon / n, lat / n
|
27f4df5257023ce4f6f286bc3b8f7b0206c6f61b
| 56,946 |
import re
def __remove_punctuations__(text):
"""
Removes all punctuations in the specified text.
It matches character(s) that is/are not word character(s)
or spaces and replaces them with empty strings.
:param text: the text whose punctuations to be removed
:return: the text after removing the punctuations
"""
return re.sub(r'[^\w\s]', '', text)
|
5626d1267e2cff26597020a703ac2f35f982b921
| 408,396 |
def _split_image(image, axis='Horizontal'):
"""Splits an image into two halves and returns each half.
Parameters
----------
image : np.ndarray
Image to split in half.
axis : string (default = 'Horizontal')
Which axis to split the image. If 'Horizontal', upper and lower halves
of the specified image are returned. If 'Vertical', left and right
halves of the specified image are returned.
Returns
-------
half1, half2 : np.ndarrays of type np.uint8
Image halves, either upper and lower or left and right.
"""
nrows, ncols = image.shape
if axis == 'Horizontal':
half1 = image[:nrows/2, :] # upper half
half2 = image[nrows/2:, :] # lower half
return half1, half2
half1 = image[:, :ncols/2] # left half
half2 = image[:, ncols/2:] # right half
return half1, half2
|
4a32b819754f060ee0281c3a47349dd2c6bd2dc3
| 48,490 |
def get_reverse_list(ori_shape, transforms):
"""
get reverse list of transform.
Args:
ori_shape (list): Origin shape of image.
transforms (list): List of transform.
Returns:
list: List of tuple, there are two format:
('resize', (h, w)) The image shape before resize,
('padding', (h, w)) The image shape before padding.
"""
reverse_list = []
h, w = ori_shape[0], ori_shape[1]
for op in transforms:
if op.__class__.__name__ in ['Resize', 'ResizeByLong']:
reverse_list.append(('resize', (h, w)))
h, w = op.target_size[0], op.target_size[1]
if op.__class__.__name__ in ['Padding']:
reverse_list.append(('padding', (h, w)))
w, h = op.target_size[0], op.target_size[1]
if op.__class__.__name__ in ['LimitLong']:
long_edge = max(h, w)
short_edge = min(h, w)
if ((op.max_long is not None) and (long_edge > op.max_long)):
reverse_list.append(('resize', (h, w)))
long_edge = op.max_long
short_edge = int(round(short_edge * op.max_long / long_edge))
elif ((op.min_long is not None) and (long_edge < op.min_long)):
reverse_list.append(('resize', (h, w)))
long_edge = op.min_long
short_edge = int(round(short_edge * op.min_long / long_edge))
if h > w:
h = long_edge
w = short_edge
else:
w = long_edge
h = short_edge
return reverse_list
|
723a12c4635f154c1194f46874aba17a08e5abb9
| 617,753 |
def is_larger_better(metric_name):
"""
Helper method to check whether high or low is better for a metric
:param metric_name:
:return:
"""
if metric_name in ['r2']:
return True
else:
return False
|
32edce19aa639e006f1c3f0244dd56a7641db6a3
| 598,816 |
from pathlib import Path
from typing import Dict
from typing import Any
import toml
def load_pyproject(pyproject_path: Path = Path("pyproject.toml")) -> Dict[str, Any]:
"""Load pyproject.toml into a dictionary, with a default dict as a fallback."""
try:
return dict(toml.load(pyproject_path))
except Exception:
return {
"tool": {"poetry": {"name": "inboard", "description": "", "version": ""}}
}
|
c4eccb49d0819cc7da8506a0a2b360d96e87146e
| 151,219 |
def insignificant(path):
"""Return True if path is considered insignificant."""
# This part is simply an implementation detail for the code base that the
# script was developed against. Ideally this would be moved out to a config
# file.
return path.endswith('Dll.H') or path.endswith('Forward.H') or \
path.endswith('templates.H')
|
6610ca32d35d4f0cc8c2a559c08a8157e6fbb473
| 666,693 |
def poke(d, address, word):
"""Write one 32-bit word to ARM memory."""
return d.poke(address, word);
|
90d6622da5b5bc58cd4236aa8388d0ed6d43ef2d
| 295,331 |
def selected(data, select):
"""
Takes data and removes any values/columns not in SELECT parameter
:param data: List of data entries to be SELECT'ed. ex:
[
{ 'stb': 'stb1',
'title': 'the matrix',
'rev': '6.00',
'date': '2017-05-01',
'provider': 'Warner Brothers',
'time': '12:30' },
{ ... }
]
:param select: List of SELECT parameters. ex: ['stb', 'rev', 'title']
:return: List of data with only keys matching SELECT parameters
"""
result = []
for entry in data:
result += [{key: entry[key] for key in select}]
return result
|
da7dec6686ee57ec5e89f78e552eb5476f27c66c
| 37,743 |
def example(commandline):
"""
Usage: `ping`
Returns **pong**.
"""
return '**pong**'
|
9e06629bb93bfbc36d80bf2c2f6cd91e9ff02812
| 219,526 |
def extract_secondary_date(gunw_scene_name: str) -> str:
"""Get Secondary Date from GUNW id"""
date_pair_str = gunw_scene_name.split('-')[6]
temp = date_pair_str.split('_')[1]
secondary_date_str = f'{temp[:4]}-{temp[4:6]}-{temp[6:]}'
return secondary_date_str
|
455e1b6038b5057b618f682bbdd360082f359d86
| 358,571 |
def get_peptide_elements(templates_peptide, js):
"""
The function return a list with compounds of core peptide chain.
Parameters
----------
templates_peptide : list
List of atoms of peptide bonds
js : dict
Opend rBAN peptideGraph.json.
Returns
-------
core_peptide_elements : list
List of core peptide elements.
"""
core_peptide_elements = []
for tp in templates_peptide:
for atom in js['atomicGraph']['atomicGraph']['atoms']:
if atom['cdk_idx'] in tp:
if atom['matchIdx'] in core_peptide_elements:
continue
core_peptide_elements.append(atom['matchIdx']) #Adding aminoacids
return core_peptide_elements
|
a696e28042c478de09b94c0002313169811a36e7
| 435,640 |
import math
def H_approx(n):
"""
Returns an approximate value of n-th harmonic number
http://en.wikipedia.org/wiki/Harmonic-number
"""
# Euler-Mascheroni constant
gamma = 0.57721566490153286060651209008240243104215933593992
return gamma + math.log(n) + 0.5/n - 1./(12*n**2) + 1./(120*n**4)
|
5c37f62e64c7187d92c4a6824546402c69f59d70
| 548,062 |
def polstring_version_required(str_in):
"""
What SICD version does the pol string require?
Parameters
----------
str_in : None|str
The tx/rcv polarization string.
Returns
-------
tuple
One of `(1, 1, 0)`, `(1, 2, 1)`, `(1, 3, 0)`
"""
if str_in is None or str_in in ['OTHER', 'UNKNOWN']:
return (1, 1, 0)
parts = str_in.split(':')
if len(parts) != 2:
return (1, 1, 0)
part1, part2 = parts
if part1 in ['S', 'E', 'X', 'Y', 'OTHER'] or part2 in ['S', 'E', 'X', 'Y', 'OTHER']:
return (1, 3, 0)
elif (part1 in ['V', 'H'] and part2 in ['RHC', 'LHC']) or \
(part2 in ['V', 'H'] and part1 in ['RHC', 'LHC']):
return (1, 2, 1)
else:
return (1, 1, 0)
|
ecfc9b367acb57b4f83a5e05dbd3fe420a1fd402
| 575,246 |
def next_power(x: int, k: int = 2) -> int:
"""Calculate x's next higher power of k."""
y, power = 0, 1
while y < x:
y = k ** power
power += 1
return y
|
cf914b49cb10da41a6c4e89a46c39ac1d958b043
| 172,155 |
def update_dict_key(dict_value, key_map):
"""将字典中的key进行修改
Args:
dict_value:原始字典
key_map: key映射dict类型
Returns:
返回修改后的字典
举例:
dict_value = {'a':1,'b':2}
key_map = {'a':'c'}
返回值new_val = {'c':1, 'b':2}
"""
new_val = dict_value.copy()
for k, v in key_map.items():
if k not in new_val or v in new_val:
continue
new_val[v] = new_val.pop(k)
return new_val
|
bf4d78798514144ac5c1396f67558480c3f7affd
| 647,635 |
def add_field_name_to_value(row):
""" Combine the field name and value provided into one string.
Args:
row: the dataframe row containing information about a cell, including the header and contents
Returns:
The field name and value provided combined into one string
"""
return row['Field Name'] + ': ' + row['Value Provided']
|
dd39e79187fd9327204a39f29fa999d79c816550
| 455,044 |
def convert_arg_to_int(cmd_args):
"""Convert str to int as much as possible
:param cmd_args: the sequence of args
:return the sequence of args
"""
args = []
for arg in cmd_args:
if isinstance(arg, str): # Int will cause TypeError
try:
arg = int(arg, 0)
except ValueError:
pass # Unable to convert
args.append(arg)
return args
|
2b63400165e3b073bc66b83686701f80494dd0e3
| 654,683 |
import json
def get_feed_by_guild(guild, *, return_path=False):
"""Get all feeds in a certain guild
Args:
guild (number): the guild ID (int() is used on it so string is fine too)
return_path (boolean): whether it should return the path to the database entry instead of the entry directly (defaults to False)
Returns:
sends a list of all "channels" objects that matched from the database, in addition it adds a 'url' string to all of them
"""
guild = int(guild)
results = []
feeds = json.load(open('settings/database.json', 'r'))['feeds']
f_count = 0
for f in feeds:
c_count = 0
for c in f['channels']:
if c['guildID'] == guild:
if return_path == True:
results.append(f'{f_count}-{c_count}')
else:
c['url'] = f['url']
results.append(c)
c_count += 1
f_count += 1
return results
|
e0a75799dde629277126cd19b1af4abb72322548
| 196,760 |
import re
def re_sub_recursive(pattern, sub, inputstr):
"""Recursive regex.
:str pattern: The regex pattern
:str sub: What to substitute the regex pattern for.
:str inputstr: The string to perform the substitutions on."""
patt = re.compile(pattern)
old_inputstr = inputstr
inputstr = patt.sub(sub, inputstr)
while old_inputstr != inputstr:
old_inputstr = inputstr
inputstr = patt.sub(sub, inputstr)
return inputstr
|
724925f2c8b40fb76d59ff33f66e388b284081d8
| 175,329 |
def get_token_index(text, offset, tokens):
"""
>>> text = "A quick brownDasheyBhgx fox jump's over the lazy dog"
>>> tokens = ['[CLS]', 'a', 'quick', 'brown', '##das', '##hey', '##bh',
... '##g', '##x', 'fox', 'jump', "'", 's', 'over', 'the', 'lazy',
... 'dog', '[SEP]']
>>> tokens[get_token_index(text, text.index('fox'), tokens)] == 'fox'
True
>>> tokens[get_token_index(text, text.index('over'), tokens)] == 'over'
True
"""
# the idea is, match tokens chunk by chunk
i = 1 # skip [CLS] in the beginning
for chunk in text[:offset].split():
acc = ''
while len(acc) < len(chunk):
acc += tokens[i].lstrip('##')
i += 1
if i >=len(tokens):
break
return i
|
d56aab443b631ae1c8a3eb1f7c6790d5bb77b57f
| 647,294 |
import math
def code_header(text):
"""
Insert section header into a jinja file, formatted as Python comment.
Leave 2 blank lines before the header.
"""
seperator_len = (75 - len(text)) / 2
seperator_len_left = math.floor(seperator_len)
seperator_len_right = math.ceil(seperator_len)
return f"# {'-' * seperator_len_left} {text} {'-' * seperator_len_right}"
|
8c7ac8c55b303a1b8b40bd4198c7b55b4dfb88bd
| 378,155 |
import re
def remove_ansi(string):
"""strip ansi code from a str"""
ansi_escape = re.compile(
r"""
\x1B # ESC
(?: # 7-bit C1 Fe (except CSI)
[@-Z\\-_]
| # or [ for CSI, followed by a control sequence
\[
[0-?]* # Parameter bytes
[ -/]* # Intermediate bytes
[@-~] # Final byte
)
""",
re.VERBOSE,
)
return ansi_escape.sub("", string)
|
03625d6615f61e08770d711f54ff5f59e3094fd6
| 278,080 |
import click
def project_path_option(function):
"""Define the common project path option"""
function = click.option(
"-pp",
"--projet_path",
type=click.Path(
dir_okay=True, file_okay=False, writable=True, resolve_path=True
),
required=False,
help="Path to the project",
)(function)
return function
|
18c4fd4b069223bbfd906ef14a9acff49a280bed
| 427,999 |
def get_field_value_for_current_experiment(cur_experiment, field_name):
"""Returns a specific field of the given experiment."""
return cur_experiment[field_name].iloc[0]
|
2c35cba29edeadbced261f8e2ca080b8380a01a3
| 418,374 |
from typing import Collection
def remove_space(toks: Collection[str]) -> Collection[str]:
"""
Do not include space for bag-of-word models.
:param Collection[str] toks: list of tokens
:return: Collection of tokens where space tokens (" ") are filtered out
:rtype: Collection[str]
:Example:
>>> toks = ['ฉัน','เดิน',' ','กลับ','บ้าน']
>>> remove_space(toks)
['ฉัน','เดิน','กลับ','บ้าน']
"""
res = []
for t in toks:
t = t.strip()
if t:
res.append(t)
return res
|
25f252c58d878286f65d3bf85f5336ba98978636
| 123,143 |
def topic_filename(topic: str) -> str:
"""
Returns the filename that should be used for the topic (without extension).
"""
# Remove commas and replace spaces with '-'.
return topic.replace(",", "").replace(" ", "-")
|
4d8c20d4ee82fdd33238b90f1021e50f8e16817c
| 651,453 |
def format_rate(rate):
"""
Removes trailing .0 from a <class 'float'>:
1.0 to 1
5.5 to 5.5
"""
out = ""
if isinstance(rate, int): # 1, 2, 6, etc
out = f"{out + str(rate)}"
elif rate.is_integer(): # 1.0, 2.0, 6.0, 9.0 etc
out = f"{out + str(int(rate))}"
else: # 5.5, etc.
out = f"{out + str(rate)}"
return out
|
7b8f4d06680a1b781fe0cee672a408272c6a4b8b
| 183,436 |
import string
def isHex(val: str) -> bool:
"""
Return whether the given str represents a hex value or not
:param val: the string to check
:return: whether the given str represents a hex value
"""
if isinstance(val, bytes):
# only decodes utf-8 string
try:
val = val.decode()
except ValueError:
return False
return isinstance(val, str) and all(c in string.hexdigits for c in val)
|
ca2815d8bc9ed902758a1248f96316bbb66a026c
| 442,607 |
def get_unique_tokens(geneset_name):
"""
Delimit the input `geneset_name` by "; ", and return a new string
that includes only unique tokens delimited by "; ".
"""
tokens = geneset_name.split("; ")
uniq_tokens = []
for t in tokens:
if t not in uniq_tokens:
uniq_tokens.append(t)
return "; ".join(uniq_tokens)
|
56fccb391abb6368f118e3bcd069be6d41574ccf
| 356,948 |
def get_pairs(language_list):
"""Given a list, return a list of tuples of all element pairs."""
pairs = []
for l1 in language_list:
for l2 in language_list:
if l1 != l2:
if (l1, l2) not in pairs and (l2, l1) not in pairs:
pairs.append((l1, l2))
return pairs
|
173705ecc7f6861322c083cf559b8d874169d2a3
| 332,671 |
import torch
def orthographic_project_torch(points3D, cam_params):
"""
Scaled orthographic projection (i.e. weak perspective projection).
Should be going from SMPL 3D coords to [-1, 1] scaled image coords.
cam_params are [s, tx, ty] - i.e. scaling and 2D translation.
"""
x = points3D[:, :, 0]
y = points3D[:, :, 1]
# Scaling
s = torch.unsqueeze(cam_params[:, 0], dim=1)
# Translation
t_x = torch.unsqueeze(cam_params[:, 1], dim=1)
t_y = torch.unsqueeze(cam_params[:, 2], dim=1)
u = s * (x + t_x)
v = s * (y + t_y)
proj_points = torch.stack([u, v], dim=-1)
return proj_points
|
719e18bbea7e88ae2088400386fc420090a8cf27
| 550,642 |
def uninstall_hook(cr, registry):
"""Delete the actions that were created with mass_editing when
the module is uninstalled"""
cr.execute("""DELETE FROM ir_act_window
WHERE res_model = 'mass.editing.wizard'""")
return True
|
d64af77909d1ce0e9230b23ddff9612d0d0b6049
| 540,643 |
def find_volume_id(onclick):
"""
Find book id from the given string. The string actually is javascript
function.
"""
# find which kind of quote, ' or "
quote = "'"
start = onclick.find(quote)
if start == -1:
quote = '"'
_id = ''
start = onclick.find(quote)
end = onclick.rfind(quote)
if start != -1 and end != -1:
_id = onclick[start+1:end]
return _id
|
678e2233644eeb0b60e80bb8402341c7d7748d38
| 283,954 |
def facade_versions(name, versions):
"""
facade_versions returns a new object that correctly returns a object in
format expected by the connection facades inspection.
:param name: name of the facade
:param versions: versions to support by the facade
"""
if name.endswith('Facade'):
name = name[:-len('Facade')]
return {
name: {'versions': versions},
}
|
4378df3da64453ee8bd6f278cee260e877fc2cdd
| 33,014 |
def rm_par(s: str):
"""Remove parenthesis."""
if s[0] == "(" and s[-1] == ")":
s = s[1:-1]
return s
|
4a26319321385cf1bfbc7a503aaa16ae873cf60b
| 307,924 |
def minute_most_asleep(guards_awake_asleep, guard_number):
"""Calculate on which minute the guard was most often asleep"""
calendar = guards_awake_asleep[guard_number]
times_asleep = [0] * 60
for events in calendar.values():
for minute, event in events.items():
if not event:
times_asleep[minute] += 1
most_times_asleep = max(times_asleep)
minute_asleep = times_asleep.index(most_times_asleep)
return minute_asleep
|
5159546ea9f11b0450ccccc4f8b0a685c80e216f
| 495,149 |
def get_logging_options_string(args):
""" This function extracts the flags and options specified for logging options
added with add_logging_options. Presumably, this is used in "process-all"
scripts where we need to pass the logging options to the "process" script.
Args:
args (namespace): a namespace with the arguments added by add_logging_options
Returns:
string: a string containing all logging flags and options
"""
args_dict = vars(args)
# first, pull out the text arguments
logging_options = ['log_file', 'logging_level', 'file_logging_level',
'stdout_logging_level', 'stderr_logging_level']
# create a new dictionary mapping from the flag to the value
logging_flags_and_vals = {'--{}'.format(o.replace('_', '-')) : args_dict[o]
for o in logging_options if len(args_dict[o]) > 0}
s = ' '.join("{} {}".format(k,v) for k,v in logging_flags_and_vals.items())
# and check the flags
if args.log_stdout:
s = "--log-stdout {}".format(s)
if args.no_log_stderr:
s = "--no-log-stderr {}".format(s)
return s
|
070ed0cd906845abf784bd566118a1959af875f2
| 22,097 |
def element_path(elt):
"""
Walk up the XML structure from the given element, producing a tuple with the
names of the region, sector, subsector, technology, and input found in this
"path".
:param elt: (lxml.etree.Element) an "input" element to start from
:return: tuple of strings: (region, sector, subsector, technology, input)
"""
input = elt.attrib['name']
sector = subsector = technology = region = None
for node in elt.iterancestors(): # walk up the hierarchy
tag = node.tag
attr = node.attrib
if tag == 'period':
continue
elif tag == 'location-info':
sector = attr['sector-name']
subsector = attr['subsector-name']
elif tag == 'region':
region = attr['name' ]
break
elif tag == 'supplysector':
sector = attr['name']
elif tag in ('stub-technology', 'technology'):
technology = attr['name']
elif tag in ('subsector', 'tranSubsector'):
subsector = attr['name']
elif tag in ('global-technology-database'):
break
return (region, sector, subsector, technology, input)
|
b01ddbfbdeac0d72db021384b4048ce0e27d0928
| 229,706 |
def base_url(skin, variables):
""" Returns the base_url associated to the skin.
"""
return variables['skins'][skin]['base_url']
|
2d02cbee1f412bab17cb3a734c5c3d7b9576c203
| 625,201 |
from typing import Any
def latex_repr(item: Any) -> str:
"""
Return a str if the object, 'item', has a special repr method
for rendering itself in latex. If not, returns str(result).
"""
if hasattr(item, "_repr_latex_"):
return item._repr_latex_().replace("$", "")
elif hasattr(item, "latex"):
try:
return item.latex().replace("$", "")
except TypeError:
return str(item)
elif hasattr(item, "to_latex"):
try:
return item.to_latex().replace("$", "")
except TypeError:
return str(item)
elif hasattr(item, "__len__") and not isinstance(item, (str, dict, tuple)):
comma_space = ",\\ "
try:
array = "[" + comma_space.join([str(v) for v in item]) + "]"
return array
except TypeError:
return str(item)
else:
return str(item)
|
4390900c07dc4e4d7b90e5556a1871f2383891fd
| 620,203 |
def eqzip(*args):
"""Zip but raises error if lengths don't match.
Args:
*args: list of lists or tuples
Returns:
list: the result of zip
Raises:
ValueError: when the lengths don't match
"""
sizes = [len(x) for x in args]
if not all([sizes[0] == x for x in sizes]):
raise ValueError("Lists are of different sizes. \n %s" % str(sizes))
return zip(*args)
|
a4492487995feced9349874af0c6303fda74ac58
| 578,748 |
import re
def unformat(recipe):
"""
Remove indentation, alignment from a Makefile recipe
"""
recipe = re.sub('[\\\\]\n', ' ', recipe)
recipe = '\n'.join(re.sub('[\s]+', ' ', line).strip() for line in recipe.split('\n') if line != '')
return recipe
|
5c9ea4ad2e7f4a37ac66a77a76042729a6649dec
| 253,833 |
def find_n(list, needle, n):
"""
Devuelve True si en list hay n o más ocurrencias de needle
False si hay menos o si n < 0
"""
# si n >= 0...
if n >= 0:
# Incializamos el índice y el contador
index = 0
count = 0
# mientras no hayamos encontrado al elemento n veces o no hayamos terminado la lista...
while count < n and index < len(list):
# si lo encontramos, actualizamos el contador
if needle == list[index]:
count = count + 1
# avanzamos al siguiente elemento
index = index + 1
# devolvemos el resultado de comparar contador con n
return count >= n
else:
return False
|
026bae25bd9752071ad3efc3fb559f82ea3f9203
| 199,150 |
def drop_cols(df, cols):
"""
Removes from the input dataframe the columns indicated in args argument.
Prints error message if columns can't be dropped.
Parameters:
_________________
input_df: dataframe containing columns to be removed
cols: (list) columns to be removed
Returns:
_________________
df_after_drop: dataframe with desired columns dropped, if no exception raised
"""
try:
for col in cols:
df = df.drop(col)
print('The DataFrame after dropping columns is:')
df.show()
return df
except:
print("Something went wrong... make sure column names provided are in dataframe")
return
|
acaf69d8b8ba0c4221afb2485916b715ec840862
| 272,058 |
def percent(value):
"""Percentage with % sign
>>> percent(1)
'100.0 %'
"""
return str(round(value * 100, 2)) + " %"
|
79863f221eba5c627cde93b3ca07d38f6b82ab73
| 147,938 |
def generate_cipher_response(cipher: str, key_size: int) -> str:
"""Generate a response message
:param cipher: chosen cipher
:param key_size: chosen key size
:return: (cipher, key_size) selection as a string
"""
return "ChosenCipher:{},{}".format(cipher, key_size)
|
207e692c15d7c2be6040a632ac7a3fa5d996564b
| 614,124 |
def ParsePointCoordinates(coordinates):
""" Parse <coordinates> for a Point
Permits a sloppy Point coordinates.
Arg:
coordinates: lon,lat,alt or lon,lat with spaces allowed
Returns:
None: if coordinates was not 2 or 3 comma separated numbers
(lon,lat): float tuple
(lon,lat,alt): float tuple
"""
p = coordinates.split(',')
if len(p) == 2:
return (float(p[0].strip()), float(p[1].strip()))
elif len(p) == 3:
return (float(p[0].strip()), float(p[1].strip()), float(p[2].strip()))
return None
|
6d8804f3aee3ba6589c37965353b5bc4e3d5d660
| 171,008 |
def fitness_func_sum3(vector):
""" returns the sum of the first 3 numbers"""
return vector[0]+vector[1]+vector[2]
|
fa23d1a9aad79ee7d3d339403e97aa40fb5d5355
| 382,475 |
from typing import Dict
from typing import List
from typing import Tuple
def invert_dict(orig: Dict[str, List[Tuple[int, str]]]) -> Dict[str, List[str]]:
"""Inverts the dict returned in convert: values to keys and keys to values.
The original values is a list of tuples, each of them transformed to a key
in the result. The key used is the second element of the tuple."""
dest = dict()
# For each element in the dictionary, collect all the keys where in
# which that element is present in the dict values
for elem in orig:
dest[elem] = [k for k, v in orig.items() if elem in [tup[1] for tup in v]]
return dest
|
34020467ba0d8508b642215446fea149315834d8
| 475,016 |
def substract(x, y):
"""Substract two numbers"""
return y-x
|
3b1b0e749b2be7661af37dc076711bae52ef81b1
| 289,269 |
from typing import List
from typing import Dict
import requests
def get_stalker(user: str, limit: int = 30) -> List[Dict]:
"""Gets messages from given user [Source: stocktwits]
Parameters
----------
user : str
User to get posts for
limit : int, optional
Number of posts to get, by default 30
"""
result = requests.get(f"https://api.stocktwits.com/api/2/streams/user/{user}.json")
if result.status_code == 200:
return list(result.json()["messages"][:limit])
return []
|
15c7347783f5367c607ee1eb952a94e2f6344ed1
| 525,490 |
def _parent(i):
"""Gets index of a parent of a node given the node's index."""
return (i - 1) >> 1
|
4d296413cf7bf30bf179649fd979acbfe721316a
| 52,861 |
def get_essential_properties(report, prop_keys):
"""get essential properties
This function returns a dictionary which contains keys as in
prop_keys and its values from the report.
:param report: SCCI report element
:prop_keys: a list of keys for essential properties
:returns: a dictionary which contains keys as in
prop_keys and its values.
"""
v = {}
v['memory_mb'] = int(report.find('./System/Memory/Installed').text)
v['local_gb'] = sum(
[int(int(size.text) / 1024)
for size in report.findall('.//PhysicalDrive/ConfigurableSize')])
v['cpus'] = sum([int(cpu.find('./CoreNumber').text)
for cpu in report.find('./System/Processor')
if cpu.find('./CoreNumber') is not None])
# v['cpus'] = sum([int(cpu.find('./LogicalCpuNumber').text)
# for cpu in report.find('./System/Processor')])
v['cpu_arch'] = 'x86_64'
return {k: v[k] for k in prop_keys}
|
5ec15b35dff6126ce9d7dea8b71afcd4cc76eb05
| 488,490 |
def tensor_shape_from_node_def_name(graph, input_name):
"""Convenience function to get a shape from a NodeDef's input string."""
# To get a tensor, the name must be in the form <input>:<port>, for example
# 'Mul:0'. The GraphDef input strings don't always have the port specified
# though, so if there isn't a colon we need to add a default ':0' to the end.
if ":" not in input_name:
canonical_name = input_name + ":0"
else:
canonical_name = input_name
tensor = graph.get_tensor_by_name(canonical_name)
shape = tensor.get_shape()
return shape
|
de4dda1dec6c32d27a36d4803830c7914f6a65ae
| 248,339 |
def get_height_levels(coord_data):
"""Gets height level values from coords nested dictionary and sets pressure
value based on whether heights or pressures key is used.
Args:
coord_data (Dict):
Dictionary containing values to use for either height or pressure levels.
Returns:
Tuple[List(float), bool]:
A tuple containing a list of values to use for the height/pressure dimension
and a bool specifying whether the coordinate should be created as height
levels or pressure levels.
"""
height_levels = None
pressure = False
if "heights" in coord_data:
height_levels = coord_data["heights"]
elif "pressures" in coord_data:
height_levels = coord_data["pressures"]
pressure = True
return height_levels, pressure
|
12c74423486c14f291798909e4b8f47210cd15f7
| 293,483 |
def count_vowels(phrase: str) -> int:
"""Count the number of vowels in the phrase.
:param phrase: text to be examined
:return: number of vowels in phrase
"""
return len([x for x in phrase.lower() if x in 'aeiou'])
|
0f099819dfa242f52ad560b88b280f9d3c38292b
| 72,653 |
def min_max(x,axis=None):
"""
return min_max standalization
x = (x-x.min)/(x.max-x.min)
min=0 max=1
Parameters
-------------------
x : numpy.ndarray(x,y)
axis :int 0 #caliculate each col
1 # each row
Returns
--------------------
result : np.ndarray(x,y)
"""
xmin =x.min(axis=axis,keepdims=True)
xmax =x.max(axis=axis,keepdims=True)
result = (x-xmin)/(xmax-xmin)
return result
|
a7a31bfdda1d6a21a8ee0fbe5148d6cdd53aa60b
| 691,796 |
def top_five_byfuel(collection, fuel):
"""
Find the top five countries by fuel type capacity
"""
result = collection.aggregate([
{
'$match': {
'primary_fuel': fuel
}
},
{
# Accumulate capacity by fuel type
'$group':
{
'_id': '$country_long',
'totCapacity': {
'$sum': {'$toDecimal': '$capacity_mw'}
}
}
},
{
'$sort': {'totCapacity': -1}
},
{
'$limit': 5
}
])
return result
|
847bf73e638e0a85baea612fccd82a199131d26d
| 220,306 |
from typing import Dict
from typing import Union
def params_to_line(params: Dict[str, Union[str, int]]) -> str:
"""
Transforms a dictionary of parameters into a command line arguments.
:param params: A dictionary of parameter names to values.
:return: A executable python command.
"""
base = 'python train.py'
for param, value in params.items():
if value is None:
base += f' --{param}'
else:
base += f' --{param} {value}'
return base + '\n'
|
d9bb0bc8d46c543b1c4dfe9fea0753812edcaac6
| 584,223 |
from typing import Callable
from typing import Any
import inspect
def func_accepts_kwargs(func: Callable[..., Any]):
"""Return True if function 'func' accepts keyword arguments **kwargs."""
parameters = list(inspect.signature(func).parameters.values())
return any(p for p in parameters if p.kind == p.VAR_KEYWORD)
|
db959fde9450a4d4e2614218a90b90cdb2b3315e
| 257,471 |
import re
def dict_from_regex_match(pattern, input_, type_dispatcher=None):
"""Return a dict from matching `pattern` to `input_`.
If match failed, returns None.
Parameters
----------
pattern : str, `re.Pattern`
The regex that matches to the `input_`.
input_ : str
The string to be matched.
type_dispatcher : dict
This specifies how the matched group values are handled after being
extracted.
"""
if type_dispatcher is None:
type_dispatcher = dict()
m = re.match(pattern, input_)
if m is None:
return None
result = dict()
for k, v in m.groupdict().items():
if k in type_dispatcher:
result[k] = type_dispatcher[k](v)
else:
result[k] = v
return result
|
ffccfaf811bdbb586eace4e0bdb1bf082647b4f1
| 618,067 |
def join_name(*parts):
"""Joins a name. This is the inverse of split_name, but x == join_name(split_name(x)) does not necessarily hold.
Joining a name may also be subject to different schemes, but the most basic implementation is just joining all parts
with a space.
"""
return " ".join(parts)
|
03eaf4b0aafbc3469c3c46926d4735057b99c28f
| 259,169 |
def factorial_recur(n):
"""Nth number of factorial series by recursion.
- Time complexity: O(n)
- Space complexity: O(n).
"""
# Base case.
if n <= 1:
return 1
return n * factorial_recur(n - 1)
|
740c07b7fe00818918952a997cd2a14860bd7e71
| 157,890 |
def current(space, w_arr):
""" Return the current element in an array """
return w_arr._current(space)
|
0df4ea22712b0c70f95e4ba5a1484d75ea85a17e
| 395,832 |
def _range_intersection(a, b):
"""
Returns the range where the two given ranges intersect.
Parameters
----------
a, b: Each is a list of two coordinate values designating a min-max
range.
Returns
-------
A range (a list of two numbers) where the two given ranges intersect,
or an empty list if they do not. E.g., a = [0, 10], b = [5, 15] will
return [5, 10].
"""
if (b[0] <= a[0] <= b[1]) or \
(b[0] <= a[1] <= b[1]) or \
(a[0] <= b[0] <= a[1]) or \
(a[0] <= b[1] <= a[1]):
return [max(a[0], b[0]), min(a[1], b[1])]
else:
return []
|
7baf50aa162a6ef2e71a98e64d483d18fc45a0ac
| 659,420 |
def chunks(seq, n) :
"""
Description
----------
Split list seq into n list
Parameters
----------
seq : input list
n : number of elements to split seq into
Returns
-------
list of n list
"""
return [seq[i::n] for i in range(n)]
|
561fccc0af4957dce1cee3cc51c2448879a12f26
| 156,838 |
def polyfill_filename(api):
"""Gets the filename associated with an API polyfill.
Args:
api: String name of API.
Returns:
Filename of API polyfill.
"""
return "{}.polyfill.js".format(api)
|
efb54fddafb846985c77e44837f7a24c57596581
| 37,204 |
def get_value(config, key):
"""Get value from (possibly nested) configuration dictionary by using a
single string key as used for the commandline interface (list of keys
delimited by '-' or '_').
"""
keys = key.replace('-', '_').split('_')
for key in keys[:-1]:
config = config[key]
return config[keys[-1]]
|
d0ef4c16cb3fb202956c0ab14d8ade843d68d7cc
| 34,222 |
def _join_names(names):
"""Join the names of a multi-level index with an underscore."""
levels = (str(name) for name in names if name != '')
return '_'.join(levels)
|
8dcc2fa21dd07bbcdb24648339b2e85e2e969e67
| 231,666 |
def _get_spacecraftid(spid):
"""
Normalizes Landsat SPACECRAFT_ID fields
'Landsat_8' -> 'L8', 'Landsat5' -> 'L5' etc
"""
if spid.upper().startswith("LANDSAT"):
return spid[0].upper() + spid[-1]
else:
return spid
|
ce1159ff3458e0ffaac6c154045c4558434ff9f9
| 520,244 |
def unescape_single_quotes(origin):
"""Strip the quotes and unescape a string inside single quotes."""
return origin[1:-1].replace("''", "'")
|
8ad88027f92f6b513caff143c10b5fd8c0adc912
| 204,838 |
def is_datasource(tool_xml):
"""Returns true if the tool is a datasource tool"""
return tool_xml.getroot().attrib.get('tool_type', '') == 'data_source'
|
4a3292a81994d85f4194d7799c957f520b111e07
| 214,284 |
import json
def GetError(error):
"""Returns a ready-to-print string representation from the http response.
Args:
error: the Http error response, whose content is a JSON-format string for
most cases (e.g. invalid test dimension), but can be just a string other
times (e.g. invalid URI for CLOUDSDK_TEST_ENDPOINT).
Returns:
A ready-to-print string representation of the error.
"""
try:
data = json.loads(error.content)
except ValueError: # message is not JSON
return error.content
code = data['error']['code']
message = data['error']['message']
return 'ResponseError {0}: {1}'.format(code, message)
|
e970f4c18151c82249587f9052390b928ad11e37
| 161,248 |
import yaml
def unmarshal_yaml(yaml_file, replacements={}):
"""
Unmarshals yaml into a python object.
`replacements` allow substituting values in the yaml file.
Ex:
replacements = {"NAMESPACE", "kubeflow"}
metadata:
- name: ...
- namespace: ${NAMESPACE}
will become
metadata:
- name: ...
- namespace: kubeflow
"""
with open(yaml_file) as file:
contents = file.read()
for r_key, r_value in replacements.items():
contents = contents.replace(f"${{{r_key}}}", r_value)
return yaml.safe_load(contents)
|
18aa154084054a537c52d19635490a4ee5f0e046
| 104,037 |
def _filter_direct_matching(key: dict, all_data: list, *, inverse: bool=False) -> tuple:
"""Filter through all data to return only the documents which match the key.
`inverse` keyword-only argument is for those cases when we want to retrieve all documents which do NOT match the `check`."""
if not inverse:
def check(element: dict) -> bool:
return all([True if i in element.items() else False for i in key.items()])
else:
def check(element: dict) -> bool:
return all([False if i in element.items() else True for i in key.items()])
results = filter(check, all_data)
return tuple(results)
|
af5118ebfb5a44b23557a8c44c7fee180bc2dc41
| 430,701 |
def split_name_length(name):
"""Split name and length from a name like CubeYF_8"""
split = name.split('_')
return split[0], int(split[1])
|
9b29a897082ed3af02301ad1970e1ba5b923d8a0
| 532,632 |
def genes_to_rwr_tsv(genes):
"""convert a list of genes to a tsv string for use with RWR tools"""
return "".join([f"report\t{gene}\n" for gene in genes])
|
92c36d31b67c73bc39bd40569c85916bcb8cb183
| 65,635 |
def get_instance_index(label):
""" gets the instance index from a label
the label is assumed to be of the form:
some-text-<num> e.g. 'copy-s3-data-0'
"""
return_val = None
try:
return_val = label.split('-').pop()
return_val = int(return_val)
except:
pass
return return_val
|
a28328ea940df057ff0c100a873c0b49c0e67931
| 306,481 |
def count_ngram(hyps_resp, n):
"""
Count the number of unique n-grams
:param hyps_resp: list, a list of responses
:param n: int, n-gram
:return: the number of unique n-grams in hyps_resp
"""
if len(hyps_resp) == 0:
print("ERROR, eval_distinct get empty input")
return
if type(hyps_resp[0]) != list:
print("ERROR, eval_distinct takes in a list of <class 'list'>, get a list of {} instead".format(
type(hyps_resp[0])))
return
ngram = set()
for resp in hyps_resp:
if len(resp) < n:
continue
for i in range(len(resp) - n + 1):
ngram.add(' '.join(resp[i: i + n]))
return len(ngram)
|
4ab063744e48812c360dbb79421a51b003f79700
| 78,122 |
def bits_to_int(bits):
"""Converts a list of bits into an integer"""
val = 0
for bit in bits:
val = (val << 1) | bit
return val
|
fe2024817f3659b6304577e7c325168690e74392
| 638,295 |
def clc_core_fn_name(dst, size='', mode='', sat=''):
"""
This helper function returns the correct clc core conversion function name
for a given source and destination type, with optional size, mode
and saturation arguments.
"""
return "__clc_convert_{DST}{N}{SAT}{MODE}".format(DST=dst, N=size, SAT=sat, MODE=mode)
|
bb1f0a3d15916c371a200d51a0dc390aa174cc49
| 417,828 |
import torch
def compute_iou(boxes1, boxes2):
"""Compute IoU of two sets of boxes, each box is [x1, y1, x2, y2].
Arguments:
boxes1: a float tensor of shape [n, 4].
boxes2: a float tensor of shape [m, 4].
Returns:
a float tensor of shape [n, m].
"""
n = boxes1.size(0)
m = boxes2.size(0)
# left top
lt = torch.max(
boxes1[:, :2].unsqueeze(1).expand(n, m, 2),
boxes2[:, :2].unsqueeze(0).expand(n, m, 2),
)
# [n, 2] -> [n, 1, 2] -> [n, m, 2]
# [m, 2] -> [1, m, 2] -> [n, m, 2]
# right bottom
rb = torch.min(
boxes1[:, 2:].unsqueeze(1).expand(n, m, 2),
boxes2[:, 2:].unsqueeze(0).expand(n, m, 2),
)
# width height
wh = rb - lt # [n, m, 2]
wh[wh < 0.0] = 0.0
inter = wh[:, :, 0] * wh[:, :, 1] # [n, m]
area1 = (boxes1[:, 2] - boxes1[:, 0])*(boxes1[:, 3] - boxes1[:, 1]) # [n]
area2 = (boxes2[:, 2] - boxes2[:, 0])*(boxes2[:, 3] - boxes2[:, 1]) # [m]
area1 = area1.unsqueeze(1).expand_as(inter) # [n] -> [n, 1] -> [n, m]
area2 = area2.unsqueeze(0).expand_as(inter) # [m] -> [1, m] -> [n, m]
iou = inter/(area1 + area2 - inter)
return iou
|
1566c98d72a5d36376044272f8328273861a901b
| 358,833 |
import pkg_resources
def get_model_path(path: str = 'models/knn.pkl') -> str:
"""Access a model included with the package
Returns:
Scikit-learn model picked with joblib
"""
return pkg_resources.resource_filename('persis', path)
|
6bdaf3bc92b13a5df5572f8140670b574b1e33bd
| 536,752 |
def get_stat( dance, player, stat ):
"""
Accepts a bin representing the player
and a string representing the stat
and returns the stat's value.
"""
return dance[ str(player) ][ str(stat) ]
|
fb31c3e8930d5d66a237516494aa70eae3bd6c8b
| 615,015 |
def pareto_front(moea):
"""
Method that allows to extract the values of the individuals from a multi-objective genetic algorithm
of the last generation.
Parameters
----------
:param moea: beagle.Algorithm
Multi-objective genetic algorithm.
Returns
-------
:return tuple
(Indices of individuals in the population, Values of individuals on the non-dominated front)
"""
last_generation = max(list(moea.report._report.keys()))
populations = list(moea.report._report[last_generation].keys())
front = {population: [] for population in populations}
indices = {population: [] for population in populations}
for population in populations:
for i, individual in enumerate(moea.report._report[last_generation][population]):
if individual.fitness[0].rank == 0 and not list(individual.values) in front[population]:
front[population].append(list(individual.values))
indices[population].append(i)
return indices, front
|
4d95d957b0928d4de1c754367f7b795d6f133a7c
| 284,490 |
import optparse
def check_key_value(option, opt, value):
"""Checks value is split in two by a ':', returns the parts in a tuple."""
result = value.split(':');
if not len(result) == 2:
raise optparse.OptionValueError(
"option %s: invalid value: %s should be of the form '<key>:<value>'"
% (opt, value))
return tuple(result)
|
2444043d970bf74ac4849aa3155756e1a2900f5d
| 157,745 |
def xsd_datetime_str_from_dt(dt):
"""Format datetime to a xs:dateTime string.
Args:
dt : datetime
- tz-aware: Used in the formatted string.
- tz-naive: Assumed to be in UTC.
Returns:
str
The returned format can be used as the date in xs:dateTime XML elements. It
will
be on the form ``YYYY-MM-DDTHH:MM:SS.mmm+00:00``.
"""
return dt.strftime("%Y-%m-%dT%H:%M:%S.%f+00:00")
|
c722a88a1eee276342066af2098d404f8302863a
| 637,413 |
def depth_to_space_shape(input_shape, options):
"""Depth to space input to output shape conversion."""
block_size = options["block_size"]
height = int(input_shape[1] * block_size)
width = int(input_shape[2] * block_size)
return (int(input_shape[0]), height, width, int(input_shape[3] // (block_size * block_size)))
|
63f051eb43b9dff6b10bbd2561f1c887e28e6925
| 308,129 |
def find_empty_cells(board):
"""Returns the empty cells of the board."""
return [x for x in board if x in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]
|
b7a976f910710f7e10f1ad60804a6d7e22550da1
| 15,713 |
def complex_to_xy(complex_point):
"""turns complex point (x+yj) into cartesian point [x,y]"""
xy_point = [complex_point.real, complex_point.imag]
return xy_point
|
2984b70c3015cb69a0f7dfd62bd022bb26310852
| 571 |
from typing import Iterable
from pathlib import Path
def list_images(img_dir) -> Iterable[str]:
"""List all image files in img_dir.
Returns an iterator that lists the files to process. Subclasses may want to override this to return specific
image types or filter the results. By default, will list all images in self.img_dir if the file extension is in
the extensions list.
Returns
-------
Iterable[str]
The list of files to be used for generating the masks.
"""
extensions = (".png", ".jpg", ".jpeg", ".tif", ".tiff")
paths = Path(img_dir).glob("**/*")
paths = filter(lambda p: p.is_file() and p.suffix.lower() in extensions, paths)
return (str(p) for p in paths)
|
c7585c4fe737fb95af27a3fad578ebf3347e4f9c
| 36,282 |
def is_hero_type(page):
"""Method to check if page belongs to a hero or creep-hero(Warlock's Golem).
:param page: Page name as string.
:return: True if page belongs to hero else False
"""
return '/Responses' in page
|
468be5f911c72a84934df68e7e1bea9a1b95ef72
| 374,660 |
def get_rgba_from_color(rgba):
"""Return typle of R, G, B, A components from given color.
Arguments:
rgba - color
"""
r = (rgba & 0xFF000000) >> 24
g = (rgba & 0x00FF0000) >> 16
b = (rgba & 0x0000FF00) >> 8
a = (rgba & 0x000000FF)
return r, g, b, a
|
56d3e0dce01cfc4348ae115de81abb55ec85eb56
| 1,916 |
def find_replace_tuple(t, aliasDict):
"""
Replace elements of t according to rules in `aliasDict`.
Parameters
----------
t : tuple or list
The object to perform replacements upon.
aliasDict : dictionary
Dictionary whose keys are potential elements of `t` and whose values
are tuples corresponding to a sub-sequence that the given element should
be replaced with. If None, no replacement is performed.
Returns
-------
tuple
"""
t = tuple(t)
if aliasDict is None: return t
for label, expandedStr in aliasDict.items():
while label in tuple(t):
i = t.index(label)
t = t[:i] + tuple(expandedStr) + t[i + 1:]
return t
|
d9d01a399fc97e09e37bd098129e0283b1ab05fb
| 355,066 |
import random
def generate_commit_id(id_length: int = 40) -> str:
"""Creates a random string using 0-9a-f, of a certain length."""
chars = "1234567890abcdef"
return "".join(random.choice(chars) for _ in range(id_length))
|
b318a5bc06249a40a006498152e690efe8366da9
| 275,713 |
from datetime import datetime
def validate_certificate(cert):
"""
Function used to validate a given certificate.
:param cert: The ciphertext to decrypt
:return: True, if the current timestamp is between the limits of validity of the certificate. False, otherwise.
"""
today = datetime.now().timestamp()
return (
cert.not_valid_before.timestamp() <= today <= cert.not_valid_after.timestamp()
)
|
da648390e72cf02da415ead9b259507e2cee070a
| 308,047 |
def first_second_ids(dates):
"""
Returns a dictionary of 'date:unique ID' for each date in 'dates'.
IDs are ordered from oldest to newest, starting at 0.
:param list dates: List of dates
:return: unique dates IDs
:rtype: dict
"""
dset = sorted(set(dates))
return dict([(date_, i) for i, date_ in enumerate(dset)])
|
3742f7a9ae9113303a9febff48faba83dd2da7d3
| 62,554 |
def dict_contains_only(dct, allowed, allow_mpp=True):
"""Check whether a dictionary contains only allowed keys"""
for key in dct.keys():
if allow_mpp and key.startswith("mpp-"):
continue
if key in allowed:
continue
return False
return True
|
feae1cfb5724fe2355e0a7c7bff4ec5d59c59e11
| 304,176 |
def mine_remove(x, y):
"""Removes a mine
x: X coordinate
y: Y coordinate
"""
return "Removing mine at {0}, {1}".format(x, y)
|
963b479608ffb2d5e322a9fdf4d64c0100462f6c
| 534,115 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.