content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
import pathlib
def load_gtdb_md5sums(md5sums_file):
"""Create a dictionary from the MD5SUMS file"""
md5_dict = {}
with open(md5sums_file, "r") as fin:
for line in fin:
fields = [f.strip() for f in line.split()]
fname = pathlib.Path(fields[1]).name
md5_dict[fname] = fields[0]
return md5_dict
|
bd59d7ea798ab2e3c5654eb25659566dbb77f9c2
| 445,768 |
def set_attributes(cls, obj, values, save=True):
"""Assign a dictionary of values to a document, only using those
attributes that the document has a field for.
:Parameters:
- cls (type): The document class
- obj (Document): The object to set the attributes on
- values (dict): The values to assign
- save (bool): Save the document afterwards?
"""
for attr in values.keys():
if attr in cls._fields:
setattr(obj, attr, values.pop(attr))
return obj.save(), values
|
4ff37b03c992ab2681409acb0494de37edce1a7e
| 311,329 |
import hashlib
def url_keygen(prod_id: int) -> str:
"""
Generates a key that MTGJSON will use for redirection
:param prod_id: Seed
:return: URL Key
"""
return hashlib.sha256(str(prod_id).encode()).hexdigest()[:16]
|
6f8e828683518b1ff05f325b9d203974139d68e0
| 106,103 |
def _between_symbols(string, c1, c2):
"""Grab characters between symbols in a string.
Will return empty string if nothing is between c1 and c2."""
for char in [c1, c2]:
if char not in string:
raise ValueError("Couldn't find character {} in string {}".format(
char, string))
return string[string.index(c1)+1:string.index(c2)]
|
26e2d84eaaedcc4ca7b0b0a890368b6864994da0
| 104,902 |
def construct_pandoc_command(
input_file=None,
lua_filter=None,
):
"""
Construct the Pandoc command.
# Parameters
input_file:pathlib.Path
- The file that we want to apply the lua filter too.
lua_filter:pathlib.Path
- The path to the lua filter to use for the word counts.
# Return
A list of CLI elements that will be used by subprocess.
"""
# --------
# Basic Commands
return [
"pandoc",
"--lua-filter",
lua_filter,
input_file,
]
|
357f4bf76aed2328b86b21f0b706348e5306d6bc
| 690,453 |
def fbeta_precision_recall(precision_val, recall_val, beta):
"""
Fbeta score for a given precision and recall
:param precision_val: precision score of a certain class
:param recall_val: recall score of a certain class
:param beta: beta coefficient
:return: fbeta score
"""
beta2 = beta ** 2
if precision_val + recall_val == 0:
return 0
return (1 + beta2) * (precision_val * recall_val) / (beta2 * precision_val + recall_val)
|
942852910557ef9f880b91da8164bc5b511a9d33
| 82,510 |
def checkload(input):
"""
Check if the load input is integer or not. If int return int, if not return string
"""
try:
return int(input)
except ValueError:
return input
|
b3b7d9d1bff27b7fc55d5c05b00e4c35d19c8d7c
| 253,092 |
def nat(x):
"""Smart constructor for naturals
Arg
x: any literal
Returns:
natural value
Raises:
ValueError if x cannot be converted into int
or the result of this conversation is negative
"""
try:
n = int(x)
except Exception as ex:
raise ValueError("invalid literal for nat()")
if n < 0:
raise ValueError("invalid literal for nat()")
return n
|
ec30fd78c7d07320d7a1f5a2256e5e8573431283
| 294,372 |
def create_lookup_url(usernames, user_fields):
"""
Parameters
----------
usernames : str
Username that you want to search.
=> Sample: usernames=TwitterDev <=
user_fields : str
User fields are adjustable, options include:
created_at, description, entities, id, location, name,
pinned_tweet_id, profile_image_url, protected,
public_metrics, url, username, verified, and withheld
=> Sample: user.fields=description,created_at <=
"""
url = "https://api.twitter.com/2/users/by?{}&{}".format(
usernames, user_fields)
return url
|
4cdf31b97c2ae8b987d87ca28a6740976d525df9
| 606,994 |
def parse_residue_spec(resspec):
"""
Parse a residue specification: [<chain>-][<resname>][[#]<resid>] where
resid is /[0-9]+/.
If resname ends in a number and a resid is also specified, the # separator
is required.
Returns a dictionary with keys 'chain', 'resname', and 'resid' for the
fields that are specified. Resid will be an int.
Parameters
----------
resspec: str
Returns
-------
dict
"""
# A-LYS2 or PO4#2
# <chain>-<resname><resid>
*chain, res = resspec.split('-', 1)
res, *resid = res.rsplit('#', 1)
if resid: # [] if False
resname = res
resid = resid[0]
else:
idx = 0
for idx, char in reversed(list(enumerate(res))):
if not char.isdigit():
idx += 1
break
resname = res[:idx]
resid = res[idx:]
out = {}
if resid:
resid = int(resid)
out['resid'] = resid
if resname:
out['resname'] = resname
if chain:
out['chain'] = chain[0]
return out
|
f8b3aa8ef287567d25ab74679acbd067e6b98ce0
| 88,240 |
def normalize_skin_tone(tone):
"""
Converts from the more visual skin tone preferences string to a more
machine-readable format.
"""
if tone == "👌 default": return ''
elif tone == "👌🏻 light": return 'light'
elif tone == "👌🏼 medium-light": return 'medium-light'
elif tone == "👌🏽 medium": return 'medium'
elif tone == "👌🏾 medium-dark": return 'medium-dark'
elif tone == "👌🏿 dark": return 'dark'
else: return None
|
4f1825643a728ec125cc92f79a0a935bc09e78aa
| 410,360 |
def rtsip_to_tag(rel_type, rel_sense, rel_id, rel_part):
"""Convert relation type, sense, id, and part to tag."""
rel_tag = ":".join([rel_type, rel_sense, str(rel_id), rel_part])
return rel_tag
|
ea3ed2295e1af94e080d0de995e1ac3cbb29ca2d
| 381,573 |
def splitFilename(filename):
"""Split a NIfTI filename into basename and extension.
:Parameters:
filename: str
Filename to be split.
:Returns:
tuple: (basename, extension)
The function returns a tuple of basename and extension. If no valid
NIfTI filename extension is found, the whole string is returned as
basename and the extension string will be empty.
"""
parts = filename.split('.')
if parts[-1] == 'gz':
if not parts[-2] in [ 'nii', 'hdr', 'img' ]:
return filename, ''
else:
return '.'.join(parts[:-2]), '.'.join(parts[-2:])
else:
if not parts[-1] in [ 'nii', 'hdr', 'img' ]:
return filename, ''
else:
return '.'.join(parts[:-1]), parts[-1]
|
8b8ba158b0a5cfc31c1e975c56ff0b17b37c66d6
| 222,943 |
def convert_range_to_list(x):
"""
Returns list of numbers from given range as string
e.g.: convert_range_to_list('3-5') will give [3, 4, 5]
"""
# pylint: disable=C0103
result = []
for part in x.split(','):
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
elif part != '':
a = int(part)
result.append(a)
# remove duplicates
result = list(dict.fromkeys(result))
return result
|
ff7ae6b193e9bfc20a7934cb024f8048d56a2ee2
| 576,252 |
import copy
def deepmerge_dicts(dict1, dict2):
"""Deep merge dictionaries, second dict will take priority."""
result = copy.deepcopy(dict1)
for key, value in dict2.items():
if isinstance(value, dict):
result[key] = deepmerge_dicts(result[key] or {}, value)
else:
result[key] = value
return result
|
10c2d14ca5b98fd29e860595a811b93e0305fc3d
| 167,159 |
def name(obj):
"""return the object name
Using in template:
'{{ obj|name }}'
"""
return obj.__class__.__name__
|
e35f26f13b04ead1c77444883c80ba55147a3937
| 111,494 |
def dayFormat(day: int) -> str:
"""
Formats a (0-6) weekday number as a full week day name, according to the current locale.
For example: dayFormat(0) -> "Sunday".
"""
days = [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
]
return days[day % 7]
|
a154bc4b59635553d82166f515e7a6a17dd14a19
| 200,011 |
def getObjectCounts(meshes):
"""
Count the total number of vertex groups and shapes required for all
specified meshes.
"""
nVertexGroups = 0
for mesh in meshes:
if mesh.vertexWeights is None:
continue
for weights in mesh.vertexWeights.data:
if weights:
nVertexGroups += 1
nShapes = 0
for mesh in meshes:
if hasattr(mesh, 'shapes') and mesh.shapes is not None:
for key,shape in mesh.shapes:
if shape:
nShapes += 1
return nVertexGroups, nShapes
|
d4e50868044326cd8af583165dc7d3274a897187
| 605,125 |
def flatten_list(seq):
"""Flatten a list-of-lists into a single list. This does only one level of flattening."""
return sum(seq, [])
|
cd036e44bbbbe9f3e6d2efbe827d6586a01ece49
| 321,643 |
def concat(a: str, b: str) -> str:
"""Concatenate two strings together"""
# Existing 'add' filter isn't suitable for strings as it will try to coerce them into integers
return str(a) + str(b)
|
d81b1846749cf66d77a88bda8620ead2dd52a80d
| 338,093 |
def to_list(ls):
"""
Converts ``ls`` to list if it is a tuple, or wraps ``ls`` into a list if
it is not a list already
"""
if isinstance(ls, (list, tuple)):
return list(ls)
else:
return [ls]
|
e518c59d792b214e6b822258ce7bd01260627b85
| 175,824 |
def load_moves(filename):
"""Load a list of moves from a file,
ignoring whitespace."""
with open(filename) as f:
return [line.strip() for line in f if line.strip()]
|
138f8d9be8429a460661db719ba42cd7490434f2
| 115,736 |
def cgi_decode(s):
"""Decode the CGI-encoded string `s`:
* replace "+" by " "
* replace "%xx" by the character with hex number xx.
Return the decoded string. Raise `ValueError` for invalid inputs."""
# Mapping of hex digits to their integer values
hex_values = {
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4,
'5': 5, '6': 6, '7': 7, '8': 8, '9': 9,
'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15,
'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15,
}
t = ""
i = 0
while i < len(s):
c = s[i]
if c == '+':
t += ' '
elif c == '%':
digit_high, digit_low = s[i + 1], s[i + 2]
i += 2
if digit_high in hex_values and digit_low in hex_values:
v = hex_values[digit_high] * 16 + hex_values[digit_low]
t += chr(v)
else:
raise ValueError("Invalid encoding")
else:
t += c
i += 1
return t
|
310519ac6edd52dc2131a32695e29aa5ad610f43
| 683,286 |
def Vdiode(Icell, Vcell, Rs):
"""
Calculate Vdiode from current, voltage and series resistance.
:param Icell: cell current [A]
:param Vcell: cell voltage [V]
:param Rs: cell series resistance [:math:`\Omega`]
:return: diode voltage [V]
"""
return Vcell + Icell * Rs
|
45f9f7c02be6172c5ed4e36cb804a0199347ef99
| 639,571 |
def prior_min_field(field_name, field_value):
"""
Creates prior min field with the
:param field_name: prior name (field name initial)
:param field_value: field initial properties
:return: name of the min field, updated field properties
"""
name = field_name
value = field_value.copy()
value.update({
'label': 'Min',
'required': False,
})
return name + '_min', value
|
9f331ee58e699318e678d881c0028486b746c05c
| 706,521 |
def is_ortho_tier(tier_name):
"""Return true is the tier_name matches an ortho trans.
i.e. is containing either "ipu", "trans", "trs", "toe" or "ortho" in its name.
:param tier_name: (str)
:returns: (bool)
"""
tier_name = tier_name.lower()
if "trans" in tier_name:
return True
if "trs" in tier_name:
return True
if "toe" in tier_name:
return True
if "ortho" in tier_name:
return True
if "ipu" in tier_name:
return True
return False
|
b530de9caa44a365fa24c16d2a21f146a0818486
| 407,809 |
def max_sequence_length_from_log(log):
"""
Returns length of the longest sequence in the event log.
:param log: event log.
:return: max_seq_length.
"""
max_seq_length = 0
for sequence in log:
max_seq_length_temp = 0
for activity_ in sequence:
max_seq_length_temp += 1
if max_seq_length_temp > max_seq_length:
max_seq_length = max_seq_length_temp
return max_seq_length
|
626a045a344e37e3e99f864665e9990fb12d87a2
| 670,975 |
def read_paths(path):
"""Return a list of paths found in the file at path."""
old_paths = []
with open(path, "r", encoding="utf-8") as in_file:
for line in in_file:
old_paths.append(line.strip())
return old_paths
|
12bccf7448a0960d29bae24a25ac04c984799acb
| 129,410 |
def find_subtree(tree, node_id):
"""
Recurse through the tree until the node is found
Then return the node
Args:
tree: Node containing full tree
node_id: string id directing which nodes to traverse
Returns:
Node containing full tree
"""
# We have reached the correct node if the string is empty
if node_id == "":
# Return the tree node
return tree
# This gets casted to an integer so it can be used as an index
node_index = int(node_id[0])
# The next node is the correct node if the length of the node_id is 1
if len(node_id) == 1:
next_id = ""
else:
next_id = node_id[1:]
# Return the recursive call
return find_subtree(tree.args[node_index], next_id)
|
a899d00d25eac865c29b2d0926fb5e9cec85deae
| 671,636 |
def parse_optionalString(value):
"""parse an optional string"""
if not value:
return None
return value
|
b3f45005b0cbffe7214f8d3240ecf79705fa9743
| 645,407 |
import re
def get_cough_type(cough_path: str) -> str:
"""Returns cough type {`cough_1`, `cough_2`, `cough_3`} from audio path
Note: this function is specific to wiai-facility/ dataset.
"""
# defines regex pattern: example - cough_[integer in 0-9].wav
pattern = r'cough_[\d]\.wav'
# finds all matches of the above pattern in cough_path
# assuming there is only 1 occurence of cough_*.wav
find = re.findall(pattern, cough_path)[0]
cough_type = find.split('.')[0]
return cough_type
|
ace0935b971138a691b9328b9380560528849aaf
| 188,706 |
def _get_excel_function_name(cls, method_name):
"""return an Excel function name for a method"""
return f"og.{cls.name}.{method_name}"
|
4a7be1b36087dd72f51266b5c6c8db8092e5e23d
| 468,347 |
def comma_sep(value, precision=0):
"""Convert `int` to #,###.## notation as `str`"""
#https://stackoverflow.com/questions/36626017/format-a-number-with-comma-separators-and-round-to-2-decimal-places-in-python-2
return f"{value:,.{precision}f}"
|
766821b2a735c923e32ce5cff6a4b01353e864e2
| 101,252 |
def exists_key_in_dicts_list(dict_list, key):
"""From a list of dicts, checks if a certain key is in one of the dicts in the list.
See also https://stackoverflow.com/questions/14790980/how-can-i-check-if-key-exists-in-list-of-dicts-in-python
Parameters
----------
dict_list: list
A list of dictionaries.
key: obj
The obj to be found in dict keys
Returns
-------
Dict or None
"""
# return next((i for i,d in enumerate(dict_list) if key in d), None)
return next((d for i,d in enumerate(dict_list) if key in d), None)
|
27ff74ea0258d394685b28560232628be96a70a4
| 612,166 |
def event_message() -> str:
"""Return the static message we use for all events"""
return "Automation for the people!"
|
4bfaa350b2fed2d6364ecfa2a22a730f25f42b06
| 238,631 |
def age_to_minutes(age: str) -> int:
""" Convert job age to minutes.
Parameters:
age (str): age of job, in units of minutes, hours, or days.
Returns:
length (int): age of job in minutes.
"""
units = age[-1]
length = int(age[:-1])
if units == 'm':
pass
elif units == 'h':
length = 60 * length
elif units == 'd':
length = 60 * 24 * length
else:
# Ages greater than 7 days formatted as "mmm d"
length = 60 * 24 * 7
return length
|
1ce75c0907009a36b499a0ee9878e201b8b7c654
| 445,951 |
def _singlethread_iteration(selection_iterator, scoring_fn):
"""Handles a single pass of the abstract variable importance algorithm,
assuming a single worker thread
:param selection_iterator: an iterator which yields triples
``(variable, training_data, scoring_data)``. Typically a
:class:`PermutationImportance.selection_strategies.SelectionStrategy`
:param scoring_fn: a function to be used for scoring. Should be of the form
``(training_data, scoring_data) -> float``
:returns: a dict of ``{var: score}``
"""
result = dict()
for var, training_data, scoring_data in selection_iterator:
score = scoring_fn(training_data, scoring_data)
result[var] = score
return result
|
58ab64e4d4ece0c7629caf3d2c1582f2cc8b4133
| 607,812 |
def status(logger):
"""
Creates a one-line summary on the actions that were logged by the given
Logger.
:type logger: Logger
:param logger: The logger that recorded what happened in the queue.
:rtype: string
:return: A string summarizing the status.
"""
aborted = logger.get_aborted_actions()
succeeded = logger.get_succeeded_actions()
total = aborted + succeeded
if total == 0:
return 'No actions done'
elif total == 1 and succeeded == 1:
return 'One action done (succeeded)'
elif total == 1 and succeeded == 0:
return 'One action done (failed)'
elif total == succeeded:
return '%d actions total (all succeeded)' % total
elif succeeded == 0:
return '%d actions total (all failed)' % total
else:
msg = '%d actions total (%d failed, %d succeeded)'
return msg % (total, aborted, succeeded)
|
944ae708c23db06b22d6cebe6df466d0388b56c5
| 515,887 |
def first_down(items):
"""Return True if the first item is down."""
return items[0] == '-'
|
e24afe79971572de01676bda608a317c83fb7792
| 683,080 |
def load_owm_api_key() -> str:
"""Loads the Open-Weather-Map API key.
:return The api key as a string
"""
with open("OWM_API_KEY") as infile:
return infile.read().strip()
|
9b41fa7feb472945b34a6fa471863a6c6dd00a2a
| 570,987 |
def invert_dict(dictionary):
"""
Invert a dict object.
"""
return {v:k for k, v in dictionary.items()}
|
c82fc7f8ba86a09cff371dec31da8853ec369fc7
| 296,837 |
def _value_and_precision(message) -> str:
"""Returns value +/- precision."""
txt = f"{message.value:.7g}"
if message.precision:
txt += f" ± {message.precision:.7g}"
return txt
|
63e89d4b61e43c4513c891ee886a9e7fe15dbb1b
| 259,105 |
def split_off_attrib(xpath):
"""
Splits off attribute of the given xpath (part after @)
:param xpath: str of the xpath to split up
"""
split_xpath = xpath.split('/@')
assert len(split_xpath) == 2, f"Splitting off attribute failed for: '{split_xpath}'"
return tuple(split_xpath)
|
6a4669de50fb78fa471310735a7b7c883d4931f8
| 680,629 |
def facility_name(hutch):
"""Return the facility name for an instrument"""
return '{}_Instrument'.format(hutch.upper())
|
fd9f5c9974fc24104390e0f15dd229fb7efa272f
| 175,086 |
def _generateSpecial_Volume3DOpts_clipPlane(
overlayList, displayCtx, source, longArg):
"""Generates arguemnts for the ``Volume3DOpts.clipPlane`` option. """
args = []
for i in range(source.numClipPlanes):
args += [longArg,
'{:0.3f}'.format(source.clipPosition[ i]),
'{:0.3f}'.format(source.clipAzimuth[ i]),
'{:0.3f}'.format(source.clipInclination[i])]
return args
|
9cb1b05b14639111be9933e9789c8c9277e1a635
| 339,734 |
def fnsMakeReadable(sCmd, lOut):
""" Put HTML line breaks into command and its output lines. """
sOut = "<br/>".join(lOut)
return sCmd + "<br/>" + sOut
|
b3d3aea35566b41e16b05ed59d3999f9c1efa2a8
| 549,336 |
def get_item(dictionary, key):
"""
Very simple template filter to be able to easily get an item from 'dictionnary' using given 'key'
Usage:
{{ mydict|get_item:'foo' }}
"""
return dictionary.get(key)
|
7739422264d68078364de41d3bef57caaa8c0b15
| 220,770 |
def get_diff(old: dict, new: dict, value: str, statistics=False):
""" Get the difference between old and new osu! user data. """
if not new or not old:
return 0
if statistics:
new_value = float(new["statistics"][value]) if new["statistics"][value] else 0.0
old_value = float(old["statistics"][value]) if old["statistics"][value] else 0.0
else:
new_value = float(new[value]) if new[value] else 0.0
old_value = float(old[value]) if old[value] else 0.0
return new_value - old_value
|
3354fc212916bea4596d1a78a46a6a2aff2d465d
| 106,245 |
def _extrapolate_point(point_1: tuple[float, float], point_2: tuple[float, float]) -> tuple[float, float]:
"""Create a point extrapoled in p1->p2 direction."""
# p1 = [p1.x, p1.y]
# p2 = [p2.x, p2.y]
extrap_ratio = 10
return (point_1[0]+extrap_ratio*(point_2[0]-point_1[0]), point_1[1]+extrap_ratio*(point_2[1]-point_1[1]))
|
f025c61ae50fd24c7393b218e1168ccbeed72154
| 516,086 |
from typing import List
from typing import Dict
def merge_results(results: List[Dict]) -> List[Dict]:
"""
Merges procedure/file dicts corresponding to the same procedure/file
after recursive filtering.
:param results: list of procedure/file dictionaries to merge
:return: list of merged procedure/file dictionaries
"""
combos = []
# Create list of unique procedure/file dictionaries
for x in results:
c = {
'name': x['name'],
'path': x['path'],
'schema': x['schema'],
'statements': []
}
if c not in combos:
combos.append(c)
# Add all statements from results dictionaries to unique list's
# statement attribute
for x in combos:
for y in results:
if x['name'] == y['name'] \
and x['path'] == y['path'] \
and x['schema'] == y['schema']:
x['statements'].extend(y['statements'])
return combos
|
23f8b7807ea9600908d83b5883f4bc386e7966fa
| 535,121 |
def getvalue(row, name, mapping={}):
"""If name in mapping, return row[mapping[name]], else return row[name]."""
if name in mapping:
return row[mapping[name]]
else:
return row[name]
|
d3c0a5b6c8a9c28e59a220159b9a7b50437a886e
| 209,476 |
import zipfile
def listing(zip_path):
"""Get list of all the filepaths in a ZIP.
Args:
zip_path: path to the ZIP file
Returns: a list of strings, the ZIP member filepaths
Raises:
any file i/o exceptions
"""
with zipfile.ZipFile(zip_path, "r") as zipf:
return zipf.namelist()
|
702efd93a2ba6cd462678e493eccbea6829cb28f
| 689,996 |
import hashlib
def md5(string):
"""
Encrypt a string with MD5, used for password and Gravatar.
>>> md5('')
'd41d8cd98f00b204e9800998ecf8427e'
>>> md5('admin')
'21232f297a57a5a743894a0e4a801fc3'
"""
string = string.encode('UTF-8')
return hashlib.md5(string).hexdigest()
|
9a2b1d391b8a89dc60f5129d0988e83abeba3241
| 496,647 |
import pytz
def add_utc_timezone_if_necessary(dt):
"""Makes the datetime timezone-aware, if necessary, by setting its timezone
to UTC.
Args:
dt: a datetime
Returns:
a timezone-aware datetime
"""
if dt.tzinfo is None:
dt = dt.replace(tzinfo=pytz.utc)
return dt
|
624d68934bef5a1569853f0e6b6b314396c1c221
| 365,038 |
import torch
def gcn_reduce(nodes):
"""
compute the new 'h' features by summing received 'msg' in each node's mailbox.
:param nodes:
:return:
"""
return {'h': torch.sum(nodes.mailbox['msg'], dim=1)}
|
787ae570cb269ab6e5693758d3d691b9c7bd97c9
| 307,418 |
def preconvert_snowflake(snowflake, name):
"""
Converts the given `snowflake` to an acceptable value by the wrapper.
Parameters
----------
snowflake : `str` or `int`
The snowflake to convert.
name : `str`
The name of the snowflake.
Returns
-------
snowflake : `int`
Raises
------
TypeError
- If `snowflake` was not passed neither as `int` or as `str` instance.
ValueError
- If `snowflake` was passed as `str` and cannot be converted to `int`.
- If the converted `snowflake` is negative or it's bit length is over 64.
"""
snowflake_type = snowflake.__class__
if snowflake_type is int:
pass
if issubclass(snowflake_type, int):
snowflake = int(snowflake)
# JSON uint64 is str
elif issubclass(snowflake_type, str):
if 6 < len(snowflake) < 21 and snowflake.isdigit():
snowflake = int(snowflake)
else:
raise ValueError(f'`{name}` can be passed as `int` or `str` instance, got `str` instance, but not a valid'
f'snowflake (7-20 length, digit only), got {snowflake!r}.')
else:
raise TypeError(f'`{name}` can be passed as `int` or `str` instance, got {snowflake_type.__name__}.')
if snowflake < 0 or snowflake > ((1<<64)-1):
raise ValueError(f'`{name}` can be only uint64, got {snowflake!r}.')
return snowflake
|
fa7db596998759a0c79a76139036cd2ef4d4fcb9
| 393,266 |
def neighbors(currentGen, row, column):
"""
This function takes a 2D array and the row number and
the column number of the target cell and returns the
number of neighbor cells that are 1.
"""
count = 0
# Handle boundary case for the row number
if row - 1 < 0:
minRow = 0
else:
minRow = row - 1
# Handle boundary case for the column number
if column - 1 < 0:
minCol = 0
else:
minCol = column - 1
# Use slicing to get the rows
for rows in currentGen[minRow:row+2]:
# Use slicing to get the cells
for cell in rows[minCol:column+2]:
# Increase count by 1 if the cell is 1
# Complete this
if cell == 1:
count = count + 1
# Reduce count by 1 if the target cell is also 1
# Complete this
if currentGen[row][column] == 1:
count = count - 1
# Return the number of neighbors
return count
|
35540a456e453db90716d8b41cdb9e73cfd11ad5
| 529,308 |
def adoc_with_preview_command(event=None, verbose=True):
"""Run the adoc command, then show the result in the browser."""
c = event and event.get('c')
if not c:
return None
return c.markupCommands.adoc_command(event, preview=True, verbose=verbose)
|
2b25bff2683ba40d92529bd3152d6eb6f7191ed0
| 461,281 |
def make_linked_methods(parent, o):
"""
Generate 'magic' methods to fetch linked objects so that
org = Organization(pd, 123)
o.deals() # return an iterable list of deals
o.files() # return an interable list of files
"""
segment = o.RESOURCE_SEGMENT
def linked_objects(**kw):
# Define dynamic method
if kw:
return parent.list_linked_objects(o, **kw)
else:
return parent.list_linked_objects(o)
linked_objects.__name__ = segment
linked_objects.__doc__ = "Fetch %s linked to this %s" % (
segment, parent.RESOURCE)
return linked_objects
|
883cfa1710245a06bec546bbc4d76e82af5ddaa3
| 387,425 |
def parse_certificates(soup):
"""
Parses the certificates specific to the United States.
:param soup: Beautiful soup object for the certificates section
:return: List of the ratings that were found
"""
# removes the first item because it does not needed
rating_tags = soup.find_all('a')[1:]
rating_codes = [code.string for code in rating_tags]
mpaa = []
if rating_codes:
for rating in rating_codes:
# sorry international folks, only interested in the US ratings
if rating.startswith('United States'):
mpaa.append(rating)
return mpaa
|
060cdf4f2e3707e2c544b7d325e45906d5135c41
| 229,272 |
def keypoint_scale(keypoint, scale_x, scale_y, **params):
"""Scales a keypoint by scale_x and scale_y."""
x, y, a, s = keypoint
return [x * scale_x, y * scale_y, a, s * max(scale_x, scale_y)]
|
82582ee98a97d5b76fddaf6ac76d25ae7de9b95d
| 471,876 |
from typing import Union
def parse_bool(s: Union[str, bool]) -> bool:
"""
Interpret a string (etc) representing a boolean value.
Parameters
----------
s
A string (etc) representing a boolean value.
Returns
-------
b
True or False.
"""
if isinstance(s, bool):
return s
return {
"false": False,
"f": False,
"no": False,
"n": False,
"off": False,
"0": False,
"true": True,
"t": True,
"yes": True,
"y": True,
"on": True,
"1": True,
}[s.lower()]
|
dbf85920475601b143f8eea416788c93fa320f6a
| 145,665 |
def collapse(iter_flow_result:dict)->list:
""" Return iter_flow dict result as a single flat list"""
nested_lists = [iter_flow_result[k] for k in iter_flow_result.keys()]
flat_list = [item for sublist in nested_lists for item in sublist]
return(flat_list)
|
ada0d654ed36df8168b4835fbde3b91b7f56fb72
| 11,431 |
def int_to_be_bytes(x):
"""Convert an integer to an array of four integer values representing the big endian byte encoding."""
return [(x >> 24) & 0xff, (x >> 16) & 0xff, (x >> 8) & 0xff, x & 0xff]
|
69c2464b56fcc93ea4b5e0bb28622bcfe0341f60
| 580,727 |
def headers(mime, length):
"""Returns a list of HTTP headers given the MIME type and the length of the
content, in bytes (in integer or sting format)."""
return [('Content-Type', mime),
('Content-Length', str(length))]
|
da5f73591bf9d4bbc8b1d01f4d6babf0de54ce00
| 13,287 |
from datetime import datetime
def first_of_next_month(dte):
"""
Return the dte of the first day of the month after the month containing the specified date,
except when dte is the first day of the month. In this case, just use this date.
Argument:
dte - the date used to determine the end of the month.
"""
if dte.day == 1:
return dte
year = dte.year
month = dte.month
day = 1
if month == 12:
month = 1
year += 1
else:
month += 1
first_of_month = datetime(year, month, day)
return first_of_month
|
20711116b1541b804ee975d8130a8391da6a24db
| 522,868 |
def create(collection, dict, preproc):
"""
Create BoW corpus of documents collection in a streaming-way
"""
def preprocess_collection(collection):
return (preproc.proc(doc) for doc in collection)
return (dict.doc2bow(doc) for doc in preprocess_collection(collection))
|
fd79e491ffeddadd6959b4d5c48424beb81c3f54
| 412,025 |
def _pylong_join(count, digits_ptr='digits', join_type='unsigned long'):
"""
Generate an or-ed series of shifts for the first 'count' digits.
Assumes that they fit into 'join_type'.
(d[2] << 2*n) | (d[1] << 1*n) | d[0]
"""
def shift(n):
# avoid compiler warnings for overly large shifts that will be discarded anyway
return " << (%d * PyLong_SHIFT < 8 * sizeof(%s) ? %d * PyLong_SHIFT : 0)" % (n, join_type, n) if n else ''
return '(%s)' % ' | '.join(
"(((%s)%s[%d])%s)" % (join_type, digits_ptr, i, shift(i))
for i in range(count-1, -1, -1))
|
e2dd4c5190ccc1eb96b4052a1ab7772db4c77ffd
| 583,947 |
def deg2tenths_of_arcminute(deg):
"""
Return *deg* converted to tenths of arcminutes (i.e., arcminutes *
10).
"""
return 10 * deg * 60
|
2f8ab77642d91eb4e061ad30727ed0098257501c
| 76,686 |
from typing import Tuple
from typing import List
def analyzeInput(inputString: str) -> Tuple[List[str], List[str], List[str]]:
"""
splits the input string so it can be read
Args:
inputString (str): has to be like "elemA|elemB|... factor*vdw elemC|elemD|..."
Returns:
list: list of lists. Like [['C', 'N'], ['2','vdw'], ['C', 'O']]
"""
inputParts = inputString.split()
inputA = inputParts[0].split("|")
length = inputParts[1].split("*")
inputB = inputParts[2].split("|")
return (inputA, length, inputB)
|
ca74593527fd2ced8bd351fcd7604497c2076a8a
| 604,048 |
import requests
def get_data_from_country(country_name: str) -> list:
"""
Function for geting data from api for specific country.
Parameters
----------
country_name: str, required
Name of countr to get data about. It must be slug value provided by
api site.
Returns
-------
List with data from begining of virus in country of specified status.
Example
-------
>>> poland_data = get_data_from_country('poland')
"""
url = f"https://api.covid19api.com/total/dayone/country/{country_name}"
payload = {}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
data = response.json()
return data
|
24cc66c6d9801c26cec6d9aac91e886dfc54a8a9
| 176,121 |
def mse(a, p):
"""Calculate the mean square error."""
return abs(p-a) ** 2
|
eec526fae0e99c69bd43ab0e778e2a0e97b47908
| 398,108 |
def int_to_bytes(num, lens):
"""
int转bytes
:param num: 整数
:param lens: 目标bytes的字节长度
:return: bytes类型
"""
int_bytes = int(num).to_bytes(lens, byteorder='big')
return int_bytes
|
2dee2d30ba5fb93cd9f8b74a0dc16e9c0ca20dad
| 45,245 |
def check_streams(streams='*'):
"""
Checks that the streams given are a list containing only possible streams, or is all streams - '*'.
"""
possible_streams = ['prices_ahead', 'prices', 'temperatures', 'emissions', 'generation-mix']
if isinstance(streams, list):
unrecognised_streams = list(set(streams) - set(possible_streams))
if len(unrecognised_streams) == 0:
return streams
else:
unrecognised_streams_to_print = ''.join(["'"+stream+"', " for stream in unrecognised_streams])[:-2]
raise ValueError(f"Streams {unrecognised_streams_to_print} could not be recognised, must be one of: {', '.join(possible_streams)}")
elif streams=='*':
return possible_streams
else:
raise ValueError(f"Streams could not be recognised, must be one of: {', '.join(possible_streams)}")
|
3d96377b7b519e438841aab66dbc1cbdd492a1a1
| 686,013 |
def uncurry_nested_dictionary(curried_dict):
"""
Transform dictionary from (key_a -> key_b -> float) to
(key_a, key_b) -> float
"""
result = {}
for a, a_dict in curried_dict.items():
for b, value in a_dict.items():
result[(a, b)] = value
return result
|
7df8d770ebd643c9fc53f0e65a309431984f4d01
| 190,317 |
def _read_txt(file_path: str) -> str:
"""
Read specified file path's text.
Parameters
----------
file_path : str
Target file path to read.
Returns
-------
txt : str
Read txt.
"""
with open(file_path) as f:
txt: str = f.read()
return txt
|
5f0657ee223ca9f8d96bb612e35304a405d2339e
| 707,137 |
def add_old_flag(df):
"""
Mark all mechanisms which were originally evaluated in StatDP [1].
[1] Ding, Zeyu, Yuxin Wang, Guanhong Wang, Danfeng Zhang, and Daniel Kifer.
"Detecting Violations of Differential Privacy." In Proceedings of the 2018
ACM SIGSAC Conference on Computer and Communications Security - CCS ’18.
https://doi.org/10.1145/3243734.3243818.
"""
old_mechanisms = \
['NoisyHist1', 'NoisyHist2'] + \
[f'ReportNoisyMax{i}' for i in range(1, 5)] + \
[f'SVT{i}' for i in range(1, 7) if i != 2]
df['old'] = False
df.loc[old_mechanisms, 'old'] = True
df = df.sort_values(by=['old', 'mechanism'])
return df
|
65170264e46d917aeb69d331054e7bdc23f3f774
| 589,787 |
def label_vector_from_samples(samples):
"""
Produce a vector of labels for the sample vector provided
:param list(str) samples: List of samples to derive labels for
:return: Label vector
:rtype: list(str)
"""
vector = []
for x in samples:
if x.startswith('TCGA'):
if x.endswith('11'):
vector.append('tcga-normal')
elif x.endswith('01'):
vector.append('tcga-tumor')
else:
vector.append('tcga-other')
else:
vector.append('gtex')
return vector
|
b6f21c72af4c66877ce5d1c28e145595549c6673
| 474,190 |
import re
def read_wordlist(filename, format='diceware'):
"""
Return a list of the words in the wordlist `filename`.
"""
if format == 'diceware':
expression = re.compile(r"^\d{5}\t(?P<word>\S+)$")
elif format == 'simple':
expression = re.compile(r"^(?P<word>\S+)$")
else:
raise ValueError(
'"{}"is not a supported word list format.'.format(format))
words = []
with open(filename, 'r') as file_:
for line in file_:
match = expression.match(line)
if match:
word = match.group('word')
words.append(word)
return words
|
ced3d7c8172d4daf107146c6fdcc059713d76a82
| 227,981 |
def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
"""
Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function.
Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ
these may not be able to be used out of the box.
:param current_tags_dict:
:param new_tags_dict:
:param purge_tags:
:return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty
:return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty
"""
tag_key_value_pairs_to_set = {}
tag_keys_to_unset = []
for key in current_tags_dict.keys():
if key not in new_tags_dict and purge_tags:
tag_keys_to_unset.append(key)
for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset):
if new_tags_dict[key] != current_tags_dict.get(key):
tag_key_value_pairs_to_set[key] = new_tags_dict[key]
return tag_key_value_pairs_to_set, tag_keys_to_unset
|
1918bda2184b40cf7d24deafa341c1a4fb29ca99
| 448,577 |
def bytes_to_text(inbytes):
"""
(bytes, str) -> (str)
Formats a bytes sequence as a string. Each byte is separated
by a space (by default, the separator could be specified as an argument).
:param inbytes: byte sequence to be represented as a string.
:param sep: string to be used as a separator.
:return: string representation of the bytes.
"""
ret_hex = ""
if inbytes is not None:
ret_hex = inbytes.hex().upper()
return ret_hex
|
6f7b89057e235cd2cf0d21f3d152cdc7ef9346d8
| 157,615 |
def get_filecontent(filename):
"""Get file data as text."""
with open(filename) as fo:
return fo.read()
|
64c39c89daaaea946437890d0a59795ab600528b
| 485,967 |
from typing import OrderedDict
def _read_tokens(corpus_soup):
""" Obtain all tokens in current document.
Arguments
---------
corpus_soup: bs4.element.Tag
Wrapped XML element containing the document (<tc:TextCorpus ...> tag).
Returns
-------
dict[str, str]:
Mapping of token IDs to raw tokens
"""
id_to_tok = OrderedDict()
for i, el in enumerate(corpus_soup.findAll("tc:token")):
token_id = el["id"]
token = el.text.strip()
id_to_tok[token_id] = token
return id_to_tok
|
8ef23ac717dbcaa63fe037a7ff48dce4ccf22843
| 192,895 |
def DecodeMyPyString(s):
# type: (str) -> str
"""Workaround for MyPy's weird escaping.
Used below and in cppgen_pass.py.
"""
byte_string = bytes(s, 'utf-8')
# In Python 3
# >>> b'\\t'.decode('unicode_escape')
# '\t'
raw_string = byte_string.decode('unicode_escape')
return raw_string
|
a05c06c8ccc8f5a3d650fabb01cac1d78ef1062f
| 179,092 |
def intfloat2int(x):
"""convert floats like 1.0, 100.0, etc. to int, if possible"""
if type(x) is float and x % 1 == 0:
return int(x)
return x
|
5570ec8d247f66037ad9dfba13a1e07a33faf0be
| 284,632 |
def fibonacci(n: int) -> int:
"""
The Fibonacci sequence is a list of numbers where the subsequent number is the sum of the previous two.
:param n: The number of items requested for the Fibonacci sequence.
:return: The Fibonacci number at position n
"""
if n <= 0:
raise ValueError("The number must be greater than zero.")
exit(code=1)
if n == 1:
return 0
elif n == 2:
return 1
else:
return fibonacci(n-1) + fibonacci(n - 2)
|
eca0fad95fa13bc3884f5458303171dbbda22505
| 347,160 |
import re
def _replace_type_with_say(text: str) -> str:
"""
>>> _replace_type_with_say("TYPED")
'TYPED'
>>> _replace_type_with_say("TYPE")
'SAY'
>>> _replace_type_with_say("TYPE W RATHER THAN WEST")
'SAY W RATHER THAN WEST'
>>> _replace_type_with_say('WELCOME TO ADVENTURE!! WOULD YOU LIKE INSTRUCTIONS? - - - (TYPE "RESTART" TO RESTART THE GAME.)') #doctest: +ELLIPSIS
'WELCOME...SAY "RESTART"...'
"""
return re.sub(r"\bTYPE\b", "SAY", text, flags=re.IGNORECASE)
|
34b5486cb120465b5d28c415fba21309618b971c
| 469,611 |
def paths_only(path):
"""
Return a list containing only the pathnames of the given path list, not the types.
"""
l = []
for p in path:
l.append(p[0])
return l
|
31638fbaa6713c6fbed7d382eaf15d9caf0a2a58
| 534,287 |
import json
def create_settings(settings_path):
"""Create settings file."""
err = False
default_theme = {
"enabled": False,
"themes": [],
}
j = json.dumps(default_theme, sort_keys=True, indent=4, separators=(',', ': '))
try:
with open(settings_path, 'w') as f:
f.write(j + "\n")
except Exception:
err = True
return err
|
ac75dd050c9c1c7b14944aa1e9ae04d2b05d5fa4
| 427,332 |
import math
def chars_from_nbytes(nbytes):
"""To how many characters do n-bytes encode with Base58-ISCC?"""
return math.ceil(math.log(256 ** nbytes, 58))
|
01dd6b6fbf37ae8e823473a0bbb7957ac9d5ad27
| 260,847 |
def is_not_object(x):
"""Helper function for selecting only object cols"""
return str(x[1]) != 'object'
|
70748353549026e9a6d8750d69c9789baf5d963a
| 184,335 |
from typing import Iterable
from typing import Any
def filter_remove_type(iterable: Iterable[Any], types: Iterable[type]) -> Iterable[Any]:
"""Filter certain types out of an iterable.
Parameters
----------
iterable : Iterable[Any]
The iterable to filter.
types : Iterable[Any]
The types to filter out of the iterable.
Returns
-------
list
A list of filtered types from the iterable.
"""
return list(filter(lambda x: type(x) not in types, iterable))
|
29b6216844c1ae9e80b2cb13754edff5a7457ec2
| 529,574 |
def check_status(results):
"""Make sure list of results succeeded
Parameters
----------
results : list of dicts, or dict
JSON from an API call parsed as a dictionary
"""
results.raise_for_status()
results = results.json()
def check_one_status(result):
"""Do the check on an individual result"""
if result['status'] != 'SUCCESS':
raise RuntimeError(str(result['object']))
if isinstance(results, list):
for result in results:
check_one_status(result)
else:
check_one_status(results)
return results
|
98382c8676f5987336f1c5b34f478d9839babe6f
| 532,971 |
def ithor_scene_names(scene_type="kitchen", levels=None):
"""
Returns a list of scene names.
Args:
scene_type (str): type of scene e.g. kitchen
levels (enumerable): the levels you want to include.
Note that this should always contain numbers greater than
or equal to 1 and less than or equal to 30,
regardless of scene_type.
"""
if levels is not None:
if max(levels) > 30 or min(levels) < 1:
raise ValueError("Invalid levels. Must be >= 1 and < 31")
scenes = dict(
kitchen = [f"FloorPlan{i}" for i in range(1, 31)],
living_room = [f"FloorPlan{200 + i}" for i in range(1, 31)],
bedroom = [f"FloorPlan{300 + i}" for i in range(1, 31)],
bathroom = [f"FloorPlan{400 + i}" for i in range(1, 31)]
)
if scene_type.lower() in scenes:
if levels is None:
return scenes[scene_type]
else:
return [scenes[scene_type][i-1] for i in levels]
raise ValueError("Unknown scene type {}".format(scene_type))
|
19533f80ee493172052f3bf136e4b00817ff9664
| 302,782 |
def subscript(text: str) -> str:
"""
Return the *text* surrounded by subscript HTML tags.
Subscript text appears half a character below the normal line,
and is sometimes rendered in a smaller font.
Subscript text can be used for chemical formulas.
>>> subscript("foo")
'<sub>foo</sub>'
"""
return f"<sub>{text}</sub>"
|
48cfc245c863b569aef83743ca7d39a1e02878da
| 42,272 |
def deltatime_format(a, b):
""" Compute and format the time elapsed between two points in time.
Args:
a Earlier point-in-time
b Later point-in-time
Returns:
Elapsed time integer (in s),
Formatted elapsed time string (human-readable way)
"""
# Elapsed time (in seconds)
t = b - a
# Elapsed time (formatted)
d = t
s = d % 60
d //= 60
m = d % 60
d //= 60
h = d % 24
d //= 24
# Return elapsed time
return t, f"{d} day(s), {h} hour(s), {m} min(s), {s} sec(s)"
|
0478dd50d7d8e4673058b4096cb0247352a80f6f
| 39,299 |
def gen_derived(data):
"""
Generate derived information
Should be called last
"""
return {
'miscellaneous': {
'number-of-layers':
int( data['layout-matrices']['_kb_layout']['length']/(6*14) ),
# because 6*14 is the number of bytes/layer for '_kb_layout'
# (which is a uint8_t matrix)
},
}
|
1243fa174a964e3c62d3071c4b9389a22b6cbc4a
| 148,656 |
def end_phase_previous_exon(data_frame,
exon_pos,
prev_row_index,
end_phase_column='EndPhase'):
"""
Return the end phase of the previous exon.
It returns 0 if the actual exon is the first in the transcript.
"""
if exon_pos == 0:
return 0
return data_frame.loc[prev_row_index, end_phase_column]
|
cc1a3fcad9163eec93071e0dc0fcf19f16a6799f
| 339,717 |
import json
import requests
def do_github_graphql_request(query, token, variables=None):
"""performs a requests.put with the correct headers for a GitHub request.
see: https://developer.github.com/v4/
Args:
query: the graphQL query string (excluding the variables section)
token: a GitHub API token string
variables: a dict of key=value variables that will be sent with the query
Returns:
A requests.Response object
"""
url = "https://api.github.com/graphql"
headers = {
"Authorization": "bearer " + token,
}
data = json.dumps({"query": query, "variables": json.dumps(variables)})
return requests.post(url, headers=headers, data=data)
|
a17cb2bde79905c23835a90a9d39e72dddf5233f
| 173,351 |
def get_priority_elem_in_set(obj_set, priority_list):
"""Returns the highest priority element in a set.
The set will be searched for objects in the order they appear in the
priority list, and the first one to be found will be returned. None is
returned if no such object is found.
Parameters
---------
obj_set : set, list
A set or list of objects.
priority_list : list
A list of objects in descending order of priority.
Returns
-------
object
The highest priority object in the given set.
Example:
--------
>>> obj_set = set([3, 2, 7, 8])
>>> priority_list = [4, 8, 1, 3]
>>> print(get_priority_elem_in_set(obj_set, priority_list))
8
"""
for obj in priority_list:
if obj in obj_set:
return obj
return None
|
3a5775d0feab15bae33cbb79212f9b049d2abfce
| 54,839 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.