content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def guard(f, *args, **kwargs):
"""
Run a function.
Return (is_error, result), where ``is_error`` is a boolean indicating whether
it raised an exception. In that case, ``result`` will be an exception.
"""
try:
return (False, f(*args, **kwargs))
except Exception as e:
return (True, e)
|
7b612dbc88a098c50a5f3b9cc2d2e8eeb617b160
| 34,919 |
def get_kind_and_id(thing_id):
"""
Args:
thing_id (str): a reddit thing id in the form t#_#+
Returns:
(str, str): a tuple of kind and id
"""
return thing_id.split("_", 1)
|
7b487b3a79c92104a938dfb9370c4092265c36f3
| 34,925 |
def rescale_eigenfunctions(eigenfunctions, scale_factor=1.0):
""" Scale eigenfunctions by a `scale_factor` """
return scale_factor * eigenfunctions
|
5a15aeb0eecd5eb161a66c4c0844191d0426f26f
| 34,927 |
def sample_cov(r, **kwargs):
"""
Returns the sample covariance of the supplied returns
"""
return r.cov()
|
971b98d9f951ed4c0dc3c9fca33098441f93cbf3
| 34,930 |
def street_not_in_use(street, items):
"""
Check if elements of street are not in use already.
For example [[(2, 'black'), (3, 'black'), (4, 'black'),(5, 'black)],[(3, 'black'), (4, 'black'), (5, 'black')]]
would return false, because 3, 4 and 5 are already used
:param street: List of elements that will be checked
:param items: list of items that are already planned to be played (here 2,3,4,5)
:return: True if new street can be played as well, otherwise false
"""
for element in street:
if element in items:
return False
return True
|
6ba82838ca0b49c59c20cb6b47ec593c1fe43454
| 34,944 |
def both_set_and_different(first, second):
"""
If any of both arguments are unset (=``None``), return ``False``. Otherwise
return result of unequality comparsion.
Returns:
bool: True if both arguments are set and different.
"""
if first is None:
return False
if second is None:
return False
return first != second
|
e5313d79ec434ef79c2b054fbdf0f80458f223a8
| 34,945 |
def CtoF(t):
"""Conversion: Celcius to Fahrenheit."""
return (t*9)/5+32
|
7453fb09a8cda84a1ae32748e3ffc36cccfdf59d
| 34,947 |
def l2_norm_sqr(w):
"""
Returns square of L2 norm of the given matrix (w).
square of L2 norm of a matrix is simply the sum of square of elements of the matrix.
@input: w, a theano shared variable.
@output: L2 norm of w
"""
return (w ** 2).sum()
|
3573d004e4e545f4f6fba4bf44ed8bb39badaba7
| 34,948 |
def load_external_ids(path):
"""
load file of ids to list
"""
with open(path) as f:
return [int(line.strip()) for line in f]
|
d430ed6541f390e03928beea1f4caa04cb244bd3
| 34,953 |
def second_to_human(time_in_sec):#{{{
"""
Returns a humanized string given the time in seconds
The output rounds up to days, hours, minutes, or seconds.
4 days 5 hours returns '4 days 5 hours'
0 days 4 hours 3 minutes returns '4 hours 3 mins', etc...
"""
days = int(time_in_sec)/3600/24
hours = int(time_in_sec - 3600*24*days)/3600
minutes = int(time_in_sec - 3600*24*days - 3600*hours)%3600/60
seconds = time_in_sec%3600%60
ss = ""
tStr = ""
if days > 0:
if days == 1: tStr = "day"
else: tStr = "days"
ss += " %s %s" %(days, tStr)
if hours > 0:
if hours == 1: tStr = "hour"
else: tStr = "hours"
ss += " %s %s" %(hours, tStr)
if minutes > 0:
if minutes == 1:tStr = "min"
else: tStr = "mins"
ss += " %s %s" %(minutes, tStr)
if seconds > 0 or (seconds == 0 and days == 0 and hours == 0 and minutes == 0):
if seconds <= 1:tStr = "sec"
else: tStr = "secs"
ss += " %g %s" %(seconds, tStr)
ss = ss.strip()
if ss != "":
return ss
else:
return None
|
f1f480f5ccbd05f3413f562f33beb7865e1a6d86
| 34,955 |
import warnings
def is_equal(v1, v2):
"""
Function for basic comparison compare.
Parameters
----------
v1: Any
first value
v2: Any
second value
Returns
-------
result : bool
Returns ``True`` if ``v1`` and ``v2`` are equivalent.
If an exception is raised during comparison, returns ``False``.
Warns
-----
UserWarning
If an exception is raised during comparison.
"""
try:
return bool(v1 == v2)
except Exception as e:
warnings.warn(
"Comparison method failed. Returned False. "
f"There may be need to define custom compare methods in __equality_checks__ dictionary. Exception {e}"
)
return False
|
3955602d046e16c0488c288bc7d942e69042ab46
| 34,958 |
def rows_to_dict(
rows,
main_key_position=0,
null_value="delete",
header_line=0,
contains_open_ends=False,
):
"""
Convert a row of rows (e.g. csv) to dictionary
Parameters
----------
rows : list
the row based data to convert to `dict`
main_key_position : int, optional
if the main_key is not on the top left, its position can be specified
null_value : any, optional
if an emtpy field in the lists shall be represented somehow in the dictionary
header_line : int, optional
if the header_line is not the first one, its position can be specified
contains_open_ends : bool, optional
if each row is not in the same length (due to last set entry as last element in row),
a length check for corrupted data can be ignored
Returns
-------
dict
dictionary containing the information from row-based data
"""
data = dict()
header = rows[header_line]
for row in rows[header_line + 1 :]:
sub_data = dict()
for i in range(len(header)):
if i == main_key_position:
continue
elif i >= len(row):
if not contains_open_ends:
raise IndexError("not all elements are the same length")
elif null_value != "delete":
sub_data[header[i]] = null_value
elif not row[i] and null_value != "delete":
sub_data[header[i]] = null_value
elif not row[i]:
continue
else:
sub_data[header[i]] = row[i]
data[row[main_key_position]] = sub_data
data = {header[main_key_position]: data}
return data
|
346f759464c1e4b9cd8f7b54387c10b4dc46453d
| 34,961 |
def trimHighscores(arr):
"""Returns 5 greatest scores from 'arr'."""
arr = sorted(arr, key=lambda record: -int(record[1]))
return arr[:5]
|
91f3ba4318104ec6a265b336cd2959fb78978864
| 34,963 |
def moeda(preco=0, moeda="R$"):
"""
-> Faz a formatação de um determinado preço.
:param preco: o preço a ser formatado.
:param moeda: determina o formato a ser aplicado.
:return: o valor formato
"""
return f'{moeda} {preco:.2f}'.replace('.', ',')
|
c9567ac6a205d8f37001188ade6ac3281d6f870f
| 34,965 |
def set_transformer_in_out(transformer, inputCol, outputCol):
"""Set input and output column(s) of a transformer instance."""
transformer = transformer.copy()
try:
transformer.setInputCol(inputCol)
except AttributeError:
try:
transformer.setInputCols([inputCol])
except AttributeError:
message = (
"Invalid transformer (doesn't have setInputCol or setInputCols): ",
str(transformer.__class__)
)
raise ValueError(message)
try:
transformer.setOutputCol(outputCol)
except AttributeError:
try:
transformer.setOutputCols([outputCol])
except AttributeError:
# we accept transformers that do not have an outputCol
# (as ColumnDropper)
pass
return transformer
|
4bf9bd4abeaa6435df5316c9201d62dd97645cd5
| 34,970 |
def join_bpe(lst:list, s:str):
"""Join words together that are prefaced with '##'. To be used with `reduce` """
if s[:2] == "##":
# How to handle situation where first term is double hashed?
base = lst.pop() # Remove from last position
new_term = base + s.strip("#")
return lst + [new_term]
return lst + [s]
|
53d8891adcbced80bbded95bcd32c1aaa292fd25
| 34,971 |
def convert_date(date):
"""
Converts the date from the DD/MM/YYYY format to YYYY-MM-DD
>>> convert_date("13/04/2018")
'2018-04-13'
"""
tokens = date.split("/")
return f"{tokens[2]}-{tokens[1]}-{tokens[0]}"
|
dd7eb20f73412565285887fa8f9ec19bcde2b3dc
| 34,973 |
import functools
import logging
import time
def RetryOnException(exc_type, retries):
"""Decorator to retry running a function if an exception is raised.
Implements exponential backoff to wait between each retry attempt, starting
with 1 second.
Note: the default number of retries is defined on the decorator, the decorated
function *must* also receive a "retries" argument (although its assigned
default value is ignored), and clients of the funtion may override the actual
number of retries at the call site.
The "unused" retries argument on the decorated function must be given to
keep pylint happy and to avoid breaking the Principle of Least Astonishment
if the decorator were to change the signature of the function.
For example:
@retry_util.RetryOnException(OSError, retries=3) # default no. of retries
def ProcessSomething(thing, retries=None): # this default value is ignored
del retries # Unused. Handled by the decorator.
# Do your thing processing here, maybe sometimes raising exeptions.
ProcessSomething(a_thing) # retries 3 times.
ProcessSomething(b_thing, retries=5) # retries 5 times.
Args:
exc_type: An exception type (or a tuple of them), on which to retry.
retries: Default number of extra attempts to try, the caller may also
override this number. If an exception is raised during the last try,
then the exception is not caught and passed back to the caller.
"""
def Decorator(f):
@functools.wraps(f)
def Wrapper(*args, **kwargs):
wait = 1
kwargs.setdefault('retries', retries)
for _ in range(kwargs['retries']):
try:
return f(*args, **kwargs)
except exc_type as exc:
logging.warning(
'%s raised %s, will retry in %d second%s ...',
f.__name__, type(exc).__name__, wait, '' if wait == 1 else 's')
time.sleep(wait)
wait *= 2
# Last try with no exception catching.
return f(*args, **kwargs)
return Wrapper
return Decorator
|
ea87117cd202cbc6ce5bc4fc9fb25a5b8a324e1f
| 34,974 |
import csv
def read_csv(csv_path):
"""Assume that csv file is formatted as:
name1,label1
name2,label2
...
"""
with open(csv_path) as to_read:
csv_reader = csv.reader(to_read)
ret_list = [(name, int(label)) for name, label in csv_reader]
return ret_list
|
182afc58b826079adee5e74ab07e4aa77febdb03
| 34,975 |
from typing import Any
from typing import Set
def get_all_subclasses(cls: Any) -> Set[Any]:
"""Get all subclasses of the given class."""
return set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in get_all_subclasses(c)])
|
0f1c93b3db6834b1596df4d403a438b1c5597219
| 34,976 |
def gcd(x: int, y: int) -> int:
"""Greatest common divisor."""
while x % y != 0:
r = x % y
x = y
y = r
return abs(y)
|
e076707c26a63af1ce8e993163b4c165b361d074
| 34,980 |
def _resnames_match(resnames, allowed_resnames):
"""
Return true if one element in resnames matches
one element in allowed_resnames.
Parameters
----------
resnames: `abc.iterable`
allowed_resnames: `abc.iterable`
"""
for resname in resnames:
if resname in allowed_resnames:
return True
return False
|
4078b7baae89a7c8c0d5edbba78235704825ee29
| 34,983 |
def is_pandigital(x):
"""
Checks if x contains all digits between 1 and 9 (inclusive) exactly once
"""
digits = [int(i) for i in str(x)]
digits.sort()
return(digits == list(range(1, 10)))
|
2024b596888e93d2ec91e2be60c0cde5ac30fce7
| 34,985 |
def __edge_exists__(i, j, g):
"""
Checks if the edge i --> j exists in the graph, g.
:param i: Index of a node.
:param j: Index of a node.
:param g: Graph.
:return: A boolean indicating if j is a successor of i.
"""
return j in list(g.successors(i))
|
00cb1fb0bb6f2fffb1f6359c9a8fdd2afd939652
| 34,987 |
def iteritems(d):
"""Python 2, 3 compatibility."""
try:
return d.items()
except AttributeError:
return d.iteritems()
|
8848c1efbddb1f4a1a404b8fa3ae800aeb5a0770
| 34,990 |
from typing import Callable
from typing import Dict
import uuid
def _guid_replacer() -> Callable[[str], str]:
"""
Closure for replace_guid.
Returns
-------
Callable[[str], str]
replace_guid function
"""
guid_map: Dict[str, str] = {}
def _replace_guid(guid: str) -> str:
"""
Replace GUID/UUID with mapped random UUID.
Parameters
----------
guid : str
Input UUID.
Returns
-------
str
Mapped UUID
"""
if not guid or not isinstance(guid, str):
return guid
if guid in guid_map:
return guid_map[guid]
new_guid = str(uuid.uuid4())
guid_map[guid] = new_guid
return new_guid
return _replace_guid
|
87c4b6553fb4dee1b588ce3f2eefde9723c9b28f
| 34,996 |
def query_phantom(view, pid):
"""Query phantom."""
return view.query_phantom(pid)
|
50d7c921264d924a38145af7b37086f8e750bd66
| 35,000 |
from pathlib import Path
def filename(filepath):
"""Returns the filename at the end of filepath.
Args:
filepath (str): A path to a file
Returns:
str: The filename
"""
return Path(filepath).name
|
49aa2e76f7ce4749796d1870d55ff58a2a435b65
| 35,002 |
def clean_labels(_labels, keep=1):
"""An input list of string numeric labels is split and casted to int
Args:
_labels (list): List of strings to be processed
keep (int, optional): [description]. Defaults to 1. Tells the function
if either the labels or the value is to be kept
Returns:
list: list of cleaned up strings or integers
"""
cleaned = []
for label in _labels:
item = label.split('-')[keep]
if keep:
item = int(item)
cleaned.append(item)
return cleaned
|
806da913dfdef53a04bb3b8d601feb9027be00a0
| 35,004 |
def getTypesWithName(types, names):
"""function returns all types that have a specific name
Keyword arguments:
types -- list of model.Type instances
names -- list of strings with names
"""
typesWithName = []
for type in types:
for name in names:
if type.name == name:
typesWithName.append(type)
break
return typesWithName
|
8219b1df233d1aea21c5efe80c3c621d30a8d595
| 35,009 |
def getCenter(x, y, blockSize):
"""
Determines center of a block with x, y as top left corner coordinates and blockSize as blockSize
:return: x, y coordinates of center of a block
"""
return (int(x + blockSize/2), int(y + blockSize/2))
|
9398aa6438b5921c625381898a7666b4203d2e73
| 35,010 |
import csv
def read_objects_csv(filename):
"""Takes a CSV with headings and converts each row to a dict. Useful for populating create_network()
:param filename: Full filename of the CSV
:type filename: str
:return: A list of dicts, each dict is a row from the CSV, with the heading as key and column as value
:rtype: list
"""
objs = []
with open(filename) as objects_csv:
objects_dict = csv.DictReader(objects_csv)
for obj in objects_dict:
objs.append(obj)
return objs
|
2782bb833e39c361338976f264b796f5231fc584
| 35,012 |
def threshold(pred, param):
"""
Takes the predicted image "pred", thresholds it with the determined
param, returns binary image.
Parameters
----------
pred : np.array
Prediction image
param : float
Threshold for the input image
Returns
-------
np.ndarray
Binary np.array
"""
pred[pred >= param] = 1
pred[pred < param] = 0
return pred
|
e38ab2683d48fc069782d0a18ffad51cce944aec
| 35,015 |
from functools import reduce
def PostUploadHook(cl, change, output_api):
"""git cl upload will call this hook after the issue is created/modified.
This will add extra trybot coverage for non-default Android architectures
that have a history of breaking with Seccomp changes.
"""
def affects_seccomp(f):
seccomp_paths = [
'bpf_dsl/',
'seccomp-bpf/',
'seccomp-bpf-helpers/',
'system_headers/',
'tests/'
]
# If the file path contains any of the above fragments, it affects
# the Seccomp implementation.
affected_any = map(lambda sp: sp in f.LocalPath(), seccomp_paths)
return reduce(lambda a, b: a or b, affected_any)
if not change.AffectedFiles(file_filter=affects_seccomp):
return []
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
'master.tryserver.chromium.android:android_arm64_dbg_recipe',
'master.tryserver.chromium.android:android_compile_x64_dbg',
'master.tryserver.chromium.android:android_compile_x86_dbg',
],
'Automatically added Android multi-arch compile bots to run on CQ.')
|
1923453354990d037a8c0b5f7af9026974905a7b
| 35,017 |
async def read_until(socket, messages):
"""
Reads from socket until all messages from the list are received
Returns the list of messages read
"""
messages = messages.copy()
res = []
while messages:
data = await socket.receive_json()
res += [data]
if data in messages:
messages.remove(data)
return res
|
0dc807b23b6b65fc2952da56abef442a742fc85d
| 35,018 |
def _DefaultRunnable(test_runner):
"""A default runnable for a PythonTestRunner.
Args:
test_runner: A PythonTestRunner which will run tests.
Returns:
The test results.
"""
return test_runner.RunTests()
|
7b86682211924848c7ebb9bdc70a36c08bc63cbe
| 35,020 |
def rekey_dict(d, key_map):
"""
Renames the keys in `d` based on `key_map`.
`d` is a dictionary whose keys are a superset of the keys in `key_map`.
`key_map` is a dictionary whose keys match at least some of the keys in `d` and whose values
are the new key names for `d`.
For example:
rekey_dict({'a': 1, 'b': 2}, {'a': 'b', 'b': 'c'}) =
{'b': 1, 'c': 2}
"""
# Create a new dictionary containing only the remapped key names.
new_dict = {new_key: d[old_key]
for old_key, new_key in key_map.items()
if old_key in d}
# Copy whatever key/value pairs were left after the remapping into the new dictionary.
keys_left = [key for key in d.keys() if key not in new_dict]
for key in keys_left:
new_dict[key] = d[key]
return new_dict
|
1e88b6ca14d94f3fbbc73a2760833a5514264667
| 35,022 |
def markdown_paragraph(text: str) -> str:
"""Return the text as Markdown paragraph."""
return f"\n{text}\n\n"
|
38f2a790f565e6417d73a2b2430cc0c5efabc27a
| 35,027 |
from torch.nn.modules.instancenorm import _InstanceNorm
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn import GroupNorm, LayerNorm
def assert_is_norm_layer(module) -> bool:
"""Check if the module is a norm layer.
Args:
module (nn.Module): The module to be checked.
Returns:
bool: Whether the module is a norm layer.
"""
norm_layer_candidates = (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm)
return isinstance(module, norm_layer_candidates)
|
2698640b36b6b08068b4276fc725aa84d950ace7
| 35,036 |
import six
def is_integer(obj):
"""Helper to determine if the provided ``obj`` is an integer type or not"""
# DEV: We have to make sure it is an integer and not a boolean
# >>> type(True)
# <class 'bool'>
# >>> isinstance(True, int)
# True
return isinstance(obj, six.integer_types) and not isinstance(obj, bool)
|
4b17b11fd0a86b900f21191522b636b1f63e5015
| 35,037 |
def confirm(msg):
"""Simple confirmation through user input
msg(str): Message expecting a yes or no answer
Returns True if answer is "yes" and False otherwise.
"""
question = f'{msg} (y/n) '
answer = ''
while answer not in ('y', 'n'):
answer = input(question).lower()
return answer == 'y'
|
50913505396ce8df87084da70c6886d90a0b1714
| 35,039 |
def MakePartition(block_dev, part):
"""Helper function to build Linux device path for storage partition."""
return '%s%s%s' % (block_dev, 'p' if block_dev[-1].isdigit() else '', part)
|
3e21c773a69b7c49bb4aad4210b9d168099565ef
| 35,041 |
def apl_singleton(ip: str) -> str:
"""
Convert a single IP or net/mask to APL item form
"""
try:
ind = ip.find(":") # IPv6?
except Exception:
ind = -1
if ind >= 0:
prefix = "2:"
else:
prefix = "1:"
try:
ind = ip.index("/") # mask included?
except Exception:
ind = -1
if prefix == "1:":
ip += "/32"
else:
ip += "/128"
return prefix + ip
|
32f4886bf7e55f7d15b916e6321454db7948f536
| 35,042 |
def getDigit(num, n, base=10):
"""
return nth least-significant digit of integer num (n=0 returns ones place)
in specified base
"""
return int(num / base**n) % base
|
1f63850ffb5f138056aa3111e3f472753edaf822
| 35,049 |
import requests
def query_bulk_games_endpoint(username, year, month):
"""
Get data from the chess.com bulk game API endpoint.
Args:
username (str): A valid chess.com username.
year (str): Year in a YYYY format.
month (str): Month in a MM format.
Returns:
requests.response: A ``requests.response`` object from the
chess.com bulk download API.
"""
url = f"https://api.chess.com/pub/player/{username}/games/{year}/{month:02d}/pgn"
return requests.get(
url=url,
headers={
"Content-Type": "application/x-chess-pgn",
"Content-Disposition": 'attachment; filename="ChessCom_username_YYYYMM.pgn"',
},
)
|
ece7d261ef574b45d4fe6f9ac0170362ac44ae6f
| 35,058 |
def cols_by_type(df, dtype):
""" Return all column names in `df` with data type `dtype`
"""
return [col for col, dtype in zip(df.columns, df.dtypes) if dtype == dtype]
|
8f33bb8624895c3cfd3793ee72554587ca202adb
| 35,065 |
def ceilsius_to_kelvin(t: float) -> float:
"""Converts the temperature from Ceilsius to Kelvin
Args:
t (float): Air temperatue [°C].
Returns:
float: Air temperature [K].
"""
t += 273.16
return t
|
a6310e213bfa699ce31d96782075de4fcc28ff20
| 35,066 |
import struct
def _pack_uint32(val):
""" Integer to 32-bit little-end bytes """
return struct.pack("<I", val)
|
32f4e6d74d572a716d723af4f7b7a6911a38b17c
| 35,069 |
def indent(s, indentation=" "):
"""
helper function to indent text
@param s the text (a string)
@param indentation the desired indentation
@return indented text
"""
return [indentation + _ for _ in s]
|
59ec5d6751f906b84c2d46a51777f0831116082a
| 35,074 |
def insert(base_seq, positions, sub_seq):
"""
Inserts a subsequence into a base sequence
"""
new_seq = list(base_seq)
for i, p in enumerate(positions):
new_seq[p-1] = sub_seq[i]
return "".join(new_seq)
|
6234b55e82edf69efe9e31bf9fa35b46c51019a2
| 35,079 |
import json
def open_file(file: str) -> dict:
"""Open the file, if present, if not, return an empty dict."""
try:
with open(file) as f:
return json.load(f)
except FileNotFoundError:
return {}
|
435462a2a7a4d40d7980dce80562f9ab231c8ac0
| 35,081 |
def get_node_text(nodes):
"""Recursively read text from a node tree."""
text = []
format_tags = {"literal": "`", "strong": "**", "emphasis": "*"}
for node in nodes:
# Handle inline formatting elements.
if node.nodeType == node.ELEMENT_NODE and node.tagName in format_tags:
wrap = format_tags[node.tagName]
text.append(wrap + get_node_text(node.childNodes) + wrap)
continue
if node.nodeType == node.TEXT_NODE:
text.append(node.data)
if node.nodeType == node.ELEMENT_NODE:
text.append(get_node_text(node.childNodes))
return "".join(text)
|
fdd0345472c4a01069f1c05cbeb9cd946405f552
| 35,088 |
def normalized(z):
"""Returns the complex number with the same argument/phase
but with a magnitude of 1."""
try:
return z/abs(z)
except ZeroDivisionError:
raise ZeroDivisionError("Cannot normalize 0.")
|
441cbe83fbd88319830231d62822b85fcffc6ce3
| 35,089 |
def hamming(list_sequences):
"""Compute Hamming distance of two DNA strings"""
string_a, string_b = list_sequences
hamm = 0
for nt_a, nt_b in list(zip(string_a, string_b)):
if nt_a != nt_b:
hamm += 1
return hamm
|
df80a89927a604d8d88879165bf41b3bf9a1096e
| 35,091 |
def reformat_acs_vars(col):
"""Convert variable names to the same format used by the Census Detailed Tables API.
See <https://api.census.gov/data/2019/acs/acs5/variables.html> for variable descriptions
Parameters
----------
col : str
column name to adjust
Returns
-------
str
reformatted column name
"""
pieces = col.split("e")
formatted = pieces[0] + "_" + pieces[1].rjust(3, "0") + "E"
return formatted
|
535a42bef9da5ade2afe87abab59578cf60e0ba6
| 35,095 |
from typing import Union
from datetime import datetime
def _new_name(seq_type: str) -> Union[str, None]:
"""
Creates a new file name based on the sequence type.
"""
tstamp = datetime.now().strftime("%Y_%m_%d-%I-%M-%S_%p")
if seq_type.lower() == "dna":
return f"dna_sequence_{tstamp}.txt"
if seq_type.lower() == "rna":
return f"rna_sequence_{tstamp}.txt"
print("Invalid sequence type. Choose RNA or DNA.")
return "Failed!"
|
2ed107900bc936472894cb40ee44cc887e9f8201
| 35,097 |
def parse_and_validate_latitude(request):
"""Extract and Check the validity of latitude.
Args:
request: HTTP request.
Returns:
latitude(float) if valid.
Raises:
ValueError: if latitude is not float, or outside range [-90, 90].
"""
lat = float(request.rel_url.query.get("lat"))
if lat > 90 or lat < -90:
raise ValueError
return lat
|
3bde916112e0c24cd989275dba389826e56a4e5f
| 35,103 |
def get_key(my_dict, val):
"""
This function finds the key of a value in a dictionary.
:param my_dict: The dictionary object to look for key
:param val: the value of the target key
:return: the first target key corresponding to the given value
or string saying "key doesn't exist"
"""
for key, value in my_dict.items():
if val == value:
return key
return "key doesn't exist"
|
3ad9ed2a0ee65aaf232fc941a6ee94739b980880
| 35,105 |
from typing import Optional
from pathlib import Path
def find_user_library() -> Optional[Path]:
"""Attempts to retrieve the path to the user's default music library folder."""
expected_path = Path('~/Music').expanduser()
if expected_path.exists():
return expected_path
|
d94e4cfbb48a06a4e870633c7be2fcea4597437e
| 35,109 |
def translate_key(key):
"""
Function to return the correct configuration key.
If not found return the key itself.
Returns a string.
"""
mapping = {
'user': 'User',
'identityfile': 'IdentityFile',
'proxycommand': 'ProxyCommand',
'ip': 'Hostname',
'hostname': 'Hostname',
'port': 'Port',
}
if key in mapping:
return str(mapping[key])
else:
return str(key)
|
9c7b9b32d1b341946a9c66120c7b32c9c304c754
| 35,110 |
import hashlib
def file_md5(file_content):
"""Checksum of content."""
m = hashlib.md5()
m.update(file_content)
return str(m.hexdigest())
|
8c68b2ab87dccf1605593942b3abc1e495a0c8f7
| 35,112 |
def get_target_ids(node_field_values):
"""Get the target IDs of all entities in a field.
"""
target_ids = []
for target in node_field_values:
target_ids.append(target['target_id'])
return target_ids
|
68e811b86d246cc0070934ce2a6dc940603e39f8
| 35,113 |
def match_rules(txt, rules):
"""Find rule that first matches in txt"""
# Find first begin tag
first_begin_loc = 10e100
matching_rule = None
for rule in rules:
begin_tag, end_tag, func = rule
loc = txt.find(begin_tag)
if loc > -1 and loc < first_begin_loc:
first_begin_loc = loc
matching_rule = rule
return (matching_rule, first_begin_loc)
|
35a89b274d06e21bc3d03dd5a7c2ece532030303
| 35,114 |
def get_embedding(word, model):
"""
Method to get embedding of given word
parameters
-----------
:param word: str
:param model: object
Word embedding model
:return: vector
"""
if word in model.wv.vocab:
return model.wv[word]
else:
raise KeyError
|
d77fd6b03bea62859bbce8388bc0961c9fe6d449
| 35,115 |
def estimate_evacuate_timeout(session, host):
""" Rough estimation of the evacuate uplimit based on live VMs memory """
mref = session.xenapi.host.get_metrics(host)
metrics = session.xenapi.host_metrics.get_record(mref)
memory_used = int(metrics['memory_total']) - int(metrics['memory_free'])
# Conservative estimation based on 1000Mbps link, and the memory usage of
# Dom0 (which is not going to be transferred) is an intentional surplus
return (memory_used * 8. / (1000. * 1024 * 1024))
|
508b1c4df6549b30366c623640fce258163adc8b
| 35,117 |
def parse_group_name(group_name):
""" Return an ((int, int), prefix) tuple from group name.
Expects group to be in the form {prefix}/{x}_{y}
raises ValueError if group_name is not in the expected format.
"""
idx = group_name.rfind('/')
if idx < 0:
raise ValueError('Bad group name: ' + group_name)
prefix = group_name[:idx]
vv = group_name[idx+1:].split('_')
if len(vv) != 2:
raise ValueError('Bad group name: ' + group_name)
return tuple([int(v) for v in vv]), prefix
|
b3151a19cd3b6cdd5028ded0967a637e592764a5
| 35,118 |
import json
def getConfig(configFileName='config.json'):
"""Reads config from file
Parameters
----------
configFileName: str
name of the file to be readed
Returns
-------
json: dict
data structure defining parameters describing application and stored externally
"""
with open(configFileName, 'r') as config:
return json.load(config)
|
c430b50f4c5cc342e328b8555fa48c8cd2a7fe17
| 35,120 |
def get_ca_id_from_ref(ca_ref):
"""Parse a ca_ref and return the CA ID
:param ca_ref: HHTO reference of the CA
:return: a string containing the ID of the CA
"""
ca_id = ca_ref.rsplit('/', 1)[1]
return ca_id
|
ece01d4566d6317e70c320eb86a52c2a7766b415
| 35,125 |
def cache_master_key(config):
"""Create a string determined by any cache-invalidating config elements."""
return (
"str{use_string}_"
"vocab{vocab_size}_"
"pg{use_page}_"
"geom{use_geom}_"
"amt{use_amount}_"
"pad{pad_windows}_"
"len{window_len}"
).format(**config)
|
adf39851298b0c15afdb2e3645ef91df6df11376
| 35,126 |
def clean_multiple_coordsets(protein):
"""
Deletes all but the first coordinate set of a protein.
"""
if len(protein.getCoordsets()) > 1:
for i in range(len(protein.getCoordsets())):
if i == 0:
pass
else:
protein.delCoordset(-1)
return protein
|
70e0c9355394b78331b802c40369b98c74ed75f1
| 35,127 |
def hash_series(series):
"""Fast series hash"""
# hash(series.to_string())
# 112 ms
# hash(tuple(series.items()))
# 2.24 ms
# hash((tuple(series.values), tuple(series.index.values)))
# 1.77 ms
# chosen solution: 82.3 µs
return hash((
hash(series.index.values.data.tobytes()),
hash(series.values.data.tobytes()),
))
|
252fe3ea429cecef88e376a19a189e3b3d45d3cf
| 35,133 |
def format_string(x: int, frame: str) -> str:
"""Helper function to format individual time frames.
:param x: Integer
:param frame: Time frame
:return: Formatted time string
"""
return '' if x == 0 else f' {x} {frame} ' if x == 1 else f' {x} {frame}s '
|
adccee40ba89d012c7ae75d07a6912cac0c8d7d0
| 35,134 |
import time
def datetime_to_seconds_since_epoch(dt):
"""Converts a Python datetime to seconds since the epoch."""
return time.mktime(dt.timetuple())
|
7f9b48591c0199aa1c1882fe84ecea3a7bd9526f
| 35,138 |
def luma(col):
"""Calculates ITU-R 601-2 luma value for a given RGB color. This is the same formula used by `PIL.Image.Image.convert` when converting RGB to L."""
r, g, b = col
return r * 299/1000 + g * 587/1000 + b * 114/1000
|
e57a11476205a0b9dc287c1239e8064762161544
| 35,140 |
def build_ranking(teams):
"""
Assigns rankings to the teams based on their winrates. If teams are tied,
they will be assigned the same ranking as each other.
:param teams: A list of teams, where each element is a list containing a team's name
and winrate (among potentially other data). This list is already sorted by num wins in descending order.
:type teams: list
:return: List of teams with their rank, name, and number of wins, in ascending order of rank.
"""
out = []
prev_wins = float("inf")
curr_rank = -1
cnt = 0
for name, wins, *_ in teams:
cnt += 1
if wins < prev_wins:
curr_rank += cnt
prev_wins = wins
cnt = 0
out.append([curr_rank, name, wins])
return out
|
7046e385bda56fd995fd261c253035b1ebb049a9
| 35,141 |
def net_income(ebt, tax):
"""
Computes net income.
Parameters
----------
ebt : int or float
Earnings before tax
tax : int or float
Tax expense
Returns
-------
out : int or float
Net income
"""
return ebt - tax
|
7d72f10d98d3646837ad3f5eccb6c19d2900ea38
| 35,142 |
def sanity_check_iob(naive_tokens, tag_texts):
"""
Check if the IOB tags are valid.
* Args:
naive_tokens: tokens split by .split()
tag_texts: list of tags in IOB format
"""
def prefix(tag):
if tag == "O":
return tag
return tag.split("-")[0]
def body(tag):
if tag == "O":
return None
return tag.split("-")[1]
# same number check
assert len(naive_tokens) == len(tag_texts), \
f"""Number of tokens and tags doest not match.
original tokens: {naive_tokens}
tags: {tag_texts}"""
# IOB format check
prev_tag = None
for tag_text in tag_texts:
curr_tag = tag_text
if prev_tag is None: # first tag
assert prefix(curr_tag) in ["B", "O"], \
f"""Wrong tag: first tag starts with I.
tag: {curr_tag}"""""
else: # following tags
if prefix(prev_tag) in ["B", "I"]:
assert (
(prefix(curr_tag) == "I" and body(curr_tag) == body(prev_tag))
or (prefix(curr_tag) == "B")
or (prefix(curr_tag) == "O")
), f"""Wrong tag: following tag mismatch.
previous tag: {prev_tag}
current tag: {curr_tag}"""
elif prefix(prev_tag) in ["O"]:
assert prefix(curr_tag) in ["B", "O"], \
f"""Wrong tag: following tag mismatch.
previous tag: {prev_tag}
current tag: {curr_tag}"""
else:
raise RuntimeError(f"Encountered unknown tag: {prev_tag}.")
prev_tag = curr_tag
|
4fb16ed2bd7a623a7dad331d8b7c8a5033a382ea
| 35,148 |
def rank_permutation(r, n):
"""Given r and n find the permutation of {0,..,n-1} with rank according to lexicographical order equal to r
:param r n: integers with 0 ≤ r < n!
:returns: permutation p as a list of n integers
:beware: computation with big numbers
:complexity: `O(n^2)`
"""
fact = 1 # compute (n-1) factorial
for i in range(2, n):
fact *= i
digits = list(range(n)) # all yet unused digits
p = [] # build permutation
for i in range(n):
q = r // fact # by decomposing r = q * fact + rest
r %= fact
p.append(digits[q])
del digits[q] # remove digit at position q
if i != n - 1:
fact //= (n - 1 - i) # weight of next digit
return p
|
6a8ec6e3a2165796a17b69f2f8cb26bc4e0d7489
| 35,150 |
def isclassattr(a, cls):
""" Test if an attribute is a class attribute. """
for c in cls.__mro__:
if a in c.__dict__:
return True
return False
|
114c84f575d1b59a78cfa4d32b8c04da7006b7ae
| 35,151 |
def list_s3_keys_in_bucket(s3client,
bucket,
prefix=''):
"""
Returns a list of the keys situated at the given prefix in the given bucket
:s3client: boto3.session.Session.client that represents a connection with s3
:bucket: string representing the s3 bucket's name
:prefix: string representing the base filepath to search at in the s3 bucket, default: ''
"""
keys = []
response = s3client.list_objects(Bucket=bucket, Prefix=prefix)['Contents']
for csv in response:
keys.append(csv['Key'])
return keys
|
dd39b7f7074315458a200b34f8bf4acf385442e6
| 35,153 |
def split_name(a_name):
"""
If only one word given, return it as last.
If more than two words given, return all but last as first.
examples = {
'ok simple': ('ok', 'simple'),
'solo': ('', 'solo'),
'three part name': ('three part', 'name'),
'name with-hyphen': ('name', 'with-hyphen'),
'': ('', '')
}
:param a_name: str
:return: ('first', 'last')
"""
try:
a_split = a_name.split()
last = a_split[-1]
first = ' '.join(a_split[:-1])
if not len(first) or not last:
# no_first_name_count += 1
first = ''
return first, last
except IndexError:
return '', ''
|
c4b735c723152bde677c9f85d5a6b0fe90deac1f
| 35,156 |
def fitsum(list):
"""Sum of fitnesses in the list, needed to build
wheel of fortune."""
sum=0.0
for i in range(0,len(list)):
sum+=list[i]
return sum
|
ced8f524f16f7174417eb3f9ce9effcab84a65ae
| 35,162 |
from datetime import datetime
def get_timestamp_id(year: bool = True, month: bool = True, date: datetime = datetime.utcnow()) -> str:
"""
Returns timestamp id (tp_id) in format '2021-01', '2021' or '01'.
"""
if not year:
return date.strftime('%m')
elif not month:
return date.strftime('%Y')
return date.strftime('%Y-%m')
|
ccf9fbaae3b93b239b422e15f633cf1347fc47b9
| 35,164 |
def _select_best_indel(indels):
"""Select the highest quality indel, based on the quality,
prefering low earlier positions above later positions in
case of ties."""
def _indel_by_quality_and_position(indel):
# The negative position is used to select the first
# of equally quality indels
return (float(indel.qual), -indel.pos)
return max(indels, key = _indel_by_quality_and_position)
|
fcee293103c86d7683d54ea030f5b6d0c4b31a21
| 35,170 |
def temporal_affine_backward(dout, cache):
"""
Backward pass for temporal affine layer.
Input:
- dout: Upstream gradients of shape (N, T, M)
- cache: Values from forward pass
Returns a tuple of:
- dx: Gradient of input, of shape (N, T, D)
- dw: Gradient of weights, of shape (D, M)
- db: Gradient of biases, of shape (M,)
"""
x, w, b, out = cache
N, T, D = x.shape
M = b.shape[0]
dx = dout.reshape(N * T, M).dot(w.T).reshape(N, T, D)
dw = dout.reshape(N * T, M).T.dot(x.reshape(N * T, D)).T
db = dout.sum(axis=(0, 1))
return dx, dw, db
|
e43b1ada3db1cac4189f1c4dbc9aa4e7a9c40b64
| 35,173 |
from typing import Tuple
from typing import List
from typing import Dict
def selection(triple: Tuple[str, str, str], variables: List[str]) -> Dict[str, str]:
"""Apply a selection on a RDF triple, producing a set of solution mappings.
Args:
* triple: RDF triple on which the selection is applied.
* variables: Input variables of the selection.
Returns:
A set of solution mappings built from the selection results.
Example:
>>> triple = (":Ann", "foaf:knows", ":Bob")
>>> variables = ["?s", None, "?knows"]
>>> selection(triple, variables)
{ "?s": ":Ann", "?knows": ":Bob" }
"""
bindings = dict()
if variables[0] is not None:
bindings[variables[0]] = triple[0]
if variables[1] is not None:
bindings[variables[1]] = triple[1]
if variables[2] is not None:
bindings[variables[2]] = triple[2]
return bindings
|
fee52583e62d589863214e74e99fc427a0b6577d
| 35,175 |
import re
def parse_firewall_rule(rule_str):
"""
Transforms a string of multiple inputes to a dictionary list
parameter: (string) rules
A firewall rule in the specified project
Return firewall rules as dictionary list
"""
rules = []
regex = re.compile(r'ipprotocol=([\w\d_:.-]+),ports=([ /\w\d@_,.\*-]+)', flags=re.I)
for f in rule_str.split(';'):
match = regex.match(f)
if match is None:
raise ValueError('Could not parse field: %s' % (f,))
rules.append({'IPProtocol': match.group(1), 'ports': match.group(2).split(',')})
return rules
|
4b3f364f9d102b664ca6f7dab4956ad96be810ae
| 35,178 |
def recvall(sock, size: int):
"""Receive data of a specific size from socket.
If 'size' number of bytes are not received, None is returned.
"""
buf = b""
while size:
newbuf = sock.recv(size)
if not newbuf:
return None
buf += newbuf
size -= len(newbuf)
return buf
|
6a0f6814cdaf6847d467f4c5620c3897b1ff2ac8
| 35,183 |
def is_IPv4(ip_string):
"""Returns true if the string is an IPv4: 4 digits < 255, separated by dots"""
digit_list = ip_string.split(".")
if len(digit_list) != 4:
return False
for d in digit_list:
if int(d) > 255:
return False
return True
|
1a0b20b4b366e8f4e19e225d32b7a887aed3fe17
| 35,189 |
def extractValue(indexString, content):
"""
Extracts an integer value after the indexString from the given content.
Searched for the given string, then moves over that string + 1 pos (new line),
then reads the numbers until a '<' is found
indexString - The string to search for.
content - The content to search in.
Returns:
Integer if found, else -1
"""
index = content.find(indexString)
if (index == -1):
raise ValueError('String not found!', indexString)
index += len(indexString) + 1
numberStr = '';
number = 0
while (content[index] != '<'):
if (content[index] != ','):
numberStr += content[index]
index = index + 1
number = int(numberStr)
return number
|
30daae4a53a4d898e5d8a9a1272cf7f0c3e170d2
| 35,191 |
import time
def wait_for_ego_vehicle(world):
"""Loops until a hero vehicle is spawned.
Note: The loop ticks the simulation.
"""
# Connect to the ego-vehicle spawned by the scenario runner.
while True:
time.sleep(1)
possible_actors = world.get_actors().filter('vehicle.*')
for actor in possible_actors:
if actor.attributes['role_name'] == 'hero':
return actor
world.tick()
|
22b584ef02a4db6f6c856ef7e69cf1a17e7bb150
| 35,192 |
def parse_context_table_records_list(records_list: list, fmt: str, is_delete: bool = False):
""" Parses records list given as an argument in context tables management commands.
Args:
records_list: The list of records
fmt: The format of each record, e.g. id:key:value
is_delete: Whether or not it is a delete request
Returns:
(list) The records, in request payload format.
"""
records = []
for record_item in records_list:
record_item = record_item.split(':')
keys = fmt.split(':')
if len(keys) != len(record_item):
raise ValueError('records argument is malformed.')
record = {k: v for k, v in zip(keys, record_item)}
if is_delete:
record['key'] = ''
if record.get('value'):
record['value'] = record['value'].split(';')
elif record.get('value') == '':
record['value'] = []
records.append(record)
return records
|
f03b077cfa421688a7e4e58058c0c4ef7d9e619b
| 35,193 |
def thr_half(tensor):
"""Get the middle between min/max over batch dimension."""
m = tensor.min(0, keepdim=True).values
M = tensor.max(0, keepdim=True).values
return m, (M - m) / 2.0
|
7d403d048f9c4f914474e4639be7ec1a1a012020
| 35,194 |
def _get_all_shortcuts(directories):
"""
Args:
directories (deque of str objects): All directories up to but excluding the function folder
Returns:
list: All possible shortcuts to the mcfunction path
str: The mcfunction path as specified by minecraft
Examples:
>>> i = InFileConfig()
>>> directories = deque(["ego", "floo_network", "init"])
>>> i._get_all_shortcuts(directories)
(['ego:floo_network/init', 'floo_network/init', 'init'], 'ego:floo_network/init')
"""
# gets the mcfunction path
mcfunction_path = directories.popleft() + ":"
mcfunction_path += "/".join(directories)
# shortcuts also has the mcfunction path to map to itself to pass the FunctionBuilder containment test
shortcuts = []
shortcuts.append(mcfunction_path)
# gets all shortcuts to the full name
while directories:
shortcut = "/".join(directories)
shortcuts.append(shortcut)
directories.popleft()
return shortcuts, mcfunction_path
|
ad8912b1f5ea58ae14ec8fd139b009d745f38bf5
| 35,195 |
def quote_path(path: str) -> str:
"""
Quote a file path if it contains whitespace.
"""
if " " in path or "\t" in path:
return f'"{path}"'
else:
return path
|
fae1dc338fe672871c08ef4b5aa2160dacbba650
| 35,197 |
import re
def remove_comments(tex_source):
"""Delete latex comments from TeX source.
Parameters
----------
tex_source : str
TeX source content.
Returns
-------
tex_source : str
TeX source without comments.
"""
# Expression via http://stackoverflow.com/a/13365453
return re.sub(r'(?<!\\)%.*$', r'', tex_source, flags=re.M)
|
efad4eba12e93af92ca55b6c926d0acf14b653c4
| 35,198 |
from datetime import datetime
import pytz
def from_unixtime(unixtime_, timezone_="UTC"):
"""
Convert a unixtime int, *unixtime_*, into python datetime object
Parameters
----------
`unixtime_` : int
Unixtime i.e. seconds since epoch
`timezone_` : string
The timezone of the output date from Olson timezone database. Defaults to utc.
Returns
-------
datetime.datetime
Python datetime object (timezone aware)
Notes
-----
unixtime == seconds since epoch (Jan 01 1970 00:00:00 UTC)\n
pytz http://pythonhosted.org/pytz/\n
Unit test: UKPVLiveTestCase.test_to_unixtime
"""
return datetime.fromtimestamp(unixtime_, tz=pytz.timezone(timezone_))
|
e23c795dd83c45eae376d70af7ea8632b844af43
| 35,199 |
def add_dimention(tensor):
"""[Function that allows a single tensor to be processed by the neural networks that use Sequential function]
Args:
tensor ([type]): [single tensor whitout the batch dimention]
Returns:
tensor[type]: [returns a tensor with extra dimention so it can be prosseced by Sequential in pytorch]
"""
tensor = tensor.unsqueeze(0) #Add extra dimention tensor.double() is equivalent to tensor.to(torch.float64)
tensor = tensor.double() #Formats tensor into double type
return tensor
|
03191db56a073f0a06b1358fe05719e57c8d9bec
| 35,205 |
def get_number_rows(ai_settings, ship_height, alien_height):
"""calculate how many row can the screen hold"""
available_space_y = ai_settings.screen_height-3*alien_height-ship_height
number_rows = int(available_space_y/(2*alien_height))
return number_rows
|
4cd769a162bc47447293d0ac34ff86298e9beb65
| 35,206 |
def valid_pt(pt, shape):
"""
Determine if a point (indices) is valid for a given shaped
"""
for i, j in zip(pt, shape):
if i < 0: # index is not negative
return False
if i >= j: # index is less than j
return False
return True
|
8d2ff4de6666bf60f128493b4a41859ba0a79a32
| 35,207 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.