content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def get_recursively(search_dict, field):
"""
Takes a dict with nested lists and dicts,
and searches all dicts for a key of the field
provided.
"""
fields_found = []
keys=[]
for key, value in search_dict.items():
if key == field:
fields_found.append(value)
keys.append([key])
elif isinstance(value, dict):
results,recurKeys = get_recursively(value, field)
for result in results:
fields_found.append(result)
for recurKey in recurKeys:
tempKey=[key]
tempKey+=recurKey
keys.append(tempKey)
elif isinstance(value, list):
for ind in range(len(value)):
item=value[ind]
if isinstance(item, dict):
more_results,more_recurKeys = get_recursively(item, field)
for another_result in more_results:
fields_found.append(another_result)
for more_recurkey in more_recurKeys:
tempKey=[ind]
tempKey+=more_recurkey
keys.append(tempKey)
return fields_found, keys
|
1e7f9f7fba5eb96225b05a888e467a78999873c6
| 466,135 |
import copy
def remove_items(data, items, action='r'):
"""Removes identified items from a list and returns updated list.
Only works on lists of items, not on nested lists. If action flag is set to
'k' then the items in items are kept and all items not in items are
removed.
Args:
data (list): List of items to be processed.
items (list): List of items to be checked against.
action (str): Action to take on items in items:
- 'r' Items in items are removed if found.
- 'k' Items not in items are removed if found.
Returns:
updated_data (list): Data after processing.
"""
updated_data = copy.deepcopy(data)
print('\nProcessing items')
num_students = len(updated_data) # For calculating % complete
n = 0
for item in updated_data:
# Display progress
n += 1
progress = round((n/num_students) * 100)
print("\rProgress: {}{}".format(progress, '%'), end="", flush=True)
if item in items:
if action == 'r': # Remove item
updated_data.remove(item)
elif action == 'k': # Keep item
continue
else:
continue
else: # item not in items
if action == 'r': # Keep item
continue
elif action == 'k': # Remove item
updated_data.remove(item)
else:
continue
print('\rFinished processing items')
return updated_data
|
ed7b867ed5e8ea83d04371198632dce56779bdfc
| 170,393 |
def fileGroupName(filePath):
"""
Get the group file name. foo.0080 would return foo
"""
return filePath.split('.')[0]
|
be43b8ce497b46ad79b153e686c1ed3b78ea6509
| 655,243 |
def fizzbuzz(n):
"""Return the FizzBuzz string of a number."""
if n % 15 == 0:
return 'FizzBuzz'
elif n % 3 == 0:
return 'Fizz'
elif n % 5 == 0:
return 'Buzz'
return str(n)
|
eb1a1b2ddc62d6008d40534d794e04d5369a6542
| 491,212 |
import math
def log(x):
"""
Normal logistic function. Output in [0, 1].
"""
return 1 / ( 1 + math.exp(-x))
|
4fa304e0093455c53a58fc78eaa5ab01cfa3078d
| 344,584 |
def intervals(nms # list of indices
):
"""
Return intervals between numbers.
::
>>> intervals([1, 2, 3])==[(1, 2), (2, 3)]
True
"""
return list(zip(nms[:], nms[1:]))
|
5cedb77bb582caa1ec757068c465797cde717f24
| 578,972 |
def compute_lwa(price):
"""Places greater importance on recent data using linear weights. """
denom = len(price) * (len(price) + 1) / 2
num = 0.0
for i in range(len(price)):
num += (i + 1) * price[i]
return num / denom
|
7898465d927c00c7e59a3aac95928a446212dd1c
| 532,923 |
import io
import zipfile
def create_zip(files):
"""
Write all given files in a dict to a ZIP file and return it as raw data
"""
byte_file = io.BytesIO()
zip_file = zipfile.ZipFile(byte_file, 'w', zipfile.ZIP_DEFLATED)
for file_name, file_data in files.iteritems():
zip_file.writestr(file_name, file_data)
zip_file.close()
return byte_file.getvalue()
|
0f097d3200ae20401276027af63a3b80c2ce44d4
| 70,119 |
def task_list_item_list_description(data, singular, plural):
"""
Returns a description for a task list item depending on how many
items are in its contents
"""
if len(data) == 0:
return None
elif len(data) == 1:
return f"1 {singular} added"
else:
return f"{len(data)} {plural} added"
|
6fe2230e40690377be8377c332d970fc649c2782
| 661,843 |
def get_class_text(soup, class_name):
"""
Extracts text from a given html class
"""
data = soup.find_all(class_=class_name)
if len(data):
return data[0].text
|
cb039a34b88b40a59fd45df1a028547cd6d2659a
| 146,006 |
def float_sum(iterable):
"""
Sum the elements of the iterable, and return the result as a float.
"""
return float(sum(iterable))
|
3bec19ab174a325c5deb57a722f3f89bfd65751a
| 569,303 |
def get_offset2DIE(dwarfinfo):
"""
Returns dict keyed by Compilation Unit offsets.
Each dict-value is another dict mapping an offset to a DIE.
"""
result = {}
for compile_unit in dwarfinfo.iter_CUs():
result[compile_unit.cu_offset] = {}
# print('# CU ----------------------------')
for die in compile_unit.iter_DIEs():
# DIE attributes are CU-relative, thus the need for the subtraction
result[compile_unit.cu_offset][die.offset -
compile_unit.cu_offset] = die
return result
|
0c57996fc7aa9b66b1463c815bd3b8035d4faeea
| 462,500 |
def url_string(it):
"""
Get the full url used to dowload the data of the given date
"""
b_link ="https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni-"
return b_link + it + ".csv"
|
f717cf841e757255e2d334f428f5ce44ad260edd
| 71,101 |
def comxyz(x,y,z):
"""Centre of mass given x, y and z vectors (all same size). x,y give position which has value z."""
Mx=0
My=0
mass=0
for i in range(len(x)):
Mx=Mx+x[i]*z[i]
My=My+y[i]*z[i]
mass=mass+z[i]
com=(Mx/mass, My/mass)
return com
|
014c52de3359e6e2b376c93e9ce7f5644b59b3d1
| 52,775 |
import typing
def get_ctffind_4_1_0_header_names() -> typing.List[str]:
"""
Returns the header names for the ctffind4 input file.
Arguments:
None
Returns:
List of names
"""
return [
'DefocusU',
'DefocusV',
'DefocusAngle',
'PhaseShift',
'CtfFigureOfMerit',
'CtfMaxResolution',
]
|
8e8ea28cc1a66690b67c5fe1d5f4f11aed79a49d
| 631,413 |
def is_pretty_name_line(line):
"""Line from symbol table defines a simple symbol name (a pretty name)."""
return line.startswith('Pretty name')
|
196dd548ddf54c2b51bf21a071d2b8b0123a872f
| 229,339 |
def find_file_with_string(flist, orb):
"""
FIND_FILE_WITH_STRING
Return the element of a list flist containing the value of orb
Parameters
==========
flist: list[str]
List of file list
orb: str
String we are looking for in the list
Returns
=======
Element of a list flist containing the value of orb
"""
return [fd for fd in flist if orb in fd][0]
|
1d7b07c84fd1f8ecd6a1e08e81aab45cc10cee70
| 168,880 |
def cumulative_sum(array):
"""Write a function that takes a list of numbers and returns the cumulative sum; that
is, a new list where the ith element is the sum of the first i + 1 elements from the original list. For
example, the cumulative sum of [1, 2, 3] is [1, 3, 6]."""
res = []
val = 0
for elem in array:
val += elem
res.append(val)
return res
|
a9d486338ef731700e1aab7e95bc5256623988bf
| 452,544 |
def parser_network_name_Descriptor(data,i,length,end):
"""\
parser_network_name_Descriptor(data,i,length,end) -> dict(parsed descriptor elements).
Parses a descriptor containing the name of the 'network' of which the
multiplex is a part. In the United Kingdom for the Freeview Terrestrial
Service, this is usually the name of the transmitter, eg. "Crystal Palace".
The dict returned is:
{ "type" : "network_name",
"network_name" : string name of the network,
}
(Defined in ETSI EN 300 468 specification)
"""
d = { "type" : "network_name",
"network_name" : data[i+2:end]
}
return d
|
0c2acc714795e66704085e95ba4ca8123a5a9b88
| 573,583 |
def is_point_data(columns):
"""
Searches the csv column names to see if the data set is point data,
which will have latitude or easting in the columns. If it is, return True
Args:
columns: List of dataframe columns
Return:
result: Boolean indicating if the data is point data
"""
result = False
# Check for point data which will contain this in the data not the header
if columns is not None and ('latitude' in columns or 'easting' in columns):
result = True
return result
|
77d7e4f612f23ab3cd704ea840b0d194efe2cc07
| 254,637 |
def func(f, xmin, xmax, step = None):
"""Create sample points from function <f>, which must be a
single-parameter function that returns a number (e.g., math.sin).
Parameters <xmin> and <xmax> specify the first and last X values, and
<step> specifies the sampling interval.
>>> chart_data.func(math.sin, 0, math.pi * 4, math.pi / 2)
[(0, 0.0), (1.5707963267948966, 1.0), (3.1415926535897931, 1.2246063538223773e-16), (4.7123889803846897, -1.0), (6.2831853071795862, -2.4492127076447545e-16), (7.8539816339744828, 1.0), (9.4247779607693793, 3.6738190614671318e-16), (10.995574287564276, -1.0)]
"""
data = []
x = xmin
if not step:
step = (xmax - xmin) / 100.0
while x < xmax:
data.append((x, f(x)))
x += step
return data
|
6422af2f85286411728169cc77a4b781429b9061
| 572,281 |
def _find_party_idx(party_id, endpoint_list):
"""
return the index of the given party id in the endpoint list
:param party_id: party id
:param endpoint_list: list of endpoints
:return: the index of endpoint with the party_id, or -1 if not found
"""
for idx in range(0, len(endpoint_list)):
if party_id == int(endpoint_list[idx].split(":")[0]):
return idx
return -1
|
7a49335fb2522bc7c19e87a112b768353c82bed2
| 67,606 |
def get_terms(p):
"""
This function gets the n-variable polynomial p and returns list of terms in this polynomial
:param p: n-variable polynomial
:return: list of terms which sum to polynomial p
"""
return [m*c for m, c in zip(p.monomials(), p.coefficients())]
|
0163dca9f7203a79ad10cc8d3d1ef179bc730179
| 468,303 |
from typing import Any
import textwrap
def short_str(obj: Any) -> str:
""" Returns a shortened string representation of `obj`, for use in error messages. """
if isinstance(obj, str):
return "\""+obj+"\""
return textwrap.shorten(repr(obj), width=30, placeholder="...")
|
acb1b3c6c7b91514670c47786af327b03a89ecf1
| 490,130 |
def in_time_frame(sim_start_epoch, sim_end_epoch,
rec_start_epoch, rec_end_epoch):
"""
Check wheter record is inside the simulation time frame.
:param sim_start_epoch: simluation start.
:param sim_end_epoch: simulation end.
:param rec_start_epoch: record start.
:param rec_end_epoch: record end.
:return: True if record overlaps with the simulation time frame.
"""
outside = (rec_end_epoch <= sim_start_epoch or
sim_end_epoch <= rec_start_epoch)
return not outside
|
2f33b4ac944f1be3224a259c218dd4c87f8d990a
| 578,136 |
import json
def transform_trusted_origins(data):
"""
Transform trusted origin data returned by Okta Server
:param data: json response
:return: Array of dictionary containing trusted origins properties
"""
ret_list = []
json_data = json.loads(data)
for origin_data in json_data:
props = {}
props["id"] = origin_data["id"]
props["name"] = origin_data["name"]
props["origin"] = origin_data["origin"]
# https://developer.okta.com/docs/reference/api/trusted-origins/#scope-object
scope_types = []
for scope in origin_data.get("scopes", []):
scope_types.append(scope["type"])
props["scopes"] = scope_types
props["status"] = origin_data["status"]
props["created"] = origin_data.get("created", None)
props["created_by"] = origin_data.get("createdBy", None)
props["okta_last_updated"] = origin_data.get("lastUpdated", None)
props["okta_last_updated_by"] = origin_data.get("lastUpdatedBy", None)
ret_list.append(props)
return ret_list
|
ea0f38afa9b894df5dcef8e0e99a6bfa8036de8e
| 310,709 |
def get_dct_subset(dct, keys):
""" Returns a subset of the dictionary, limited to the input keys """
return dict((k, dct[k]) for k in keys if k in dct)
|
cb4095266b7ae93cc1bc975e7928d9b65b55e4e2
| 375,609 |
import time
def validate_counter(counter):
"""
Validates a counter ensuring it's in a sliding window.
Window is +/- 12 hours (43200 seconds)
"""
currentTime = int(time.time())
return (currentTime-43200) <= counter <= (currentTime+43200)
|
7f7866c3f1f038bbe87ffdce4f05bb385f72d030
| 82,009 |
import string
def whitelisted(
s, whitelist="_-" + string.ascii_letters + string.digits, substitute="_"
):
"""
>>> whitelisted("ab/cd#ef(gh")
'ab_cd_ef_gh'
>>> whitelisted("ab/cd#ef(gh", substitute='')
'abcdefgh'
"""
return "".join(c if c in whitelist else substitute for c in s)
|
b70236d523416da367d3e5d372b6dfd94635b3b6
| 114,894 |
def parse_spec(spec, default_module):
"""Parse a spec of the form module.class:kw1=val,kw2=val.
Returns a triple of module, classname, arguments list and keyword dict.
"""
name, args = (spec.split(':', 1) + [''])[:2]
if '.' not in name:
if default_module:
module, klass = default_module, name
else:
module, klass = name, None
else:
module, klass = name.rsplit('.', 1)
al = [a for a in args.split(',') if a and '=' not in a]
kw = dict(a.split('=', 1) for a in args.split(',') if '=' in a)
return module, klass, al, kw
|
5ea1c05488e77e1c7dd76ed2ae332dbea460f0ff
| 686,851 |
def config(tmpdir):
"""
Return config with
- a unique temporary sqlite database location
- a unique nested dict instance that the test can mutate
"""
return {
"authentication": {
"secret_keys": ["SECRET"],
"providers": [
{
"provider": "toy",
"authenticator": "tiled.authenticators:DictionaryAuthenticator",
"args": {
"users_to_passwords": {"alice": "secret1", "bob": "secret2"}
},
}
],
},
"database_uri": f"sqlite:///{tmpdir}/tiled.sqlite",
"trees": [
{
"tree": f"{__name__}:tree",
"path": "/",
},
],
}
|
4ba29ecb64d00ee0433110a10eab3381e8de57c7
| 290,899 |
import json
def dejsonize(data):
"""
Returns JSON deserialized data
>>> dejsonize('{\\n "foo": "bar"\\n}')
{u'foo': u'bar'}
"""
return json.loads(data)
|
dc7bf3e44e936b1222752efed693889268d9d322
| 587,356 |
def get_header(fn):
"""
Get the header columns as a list.
:param fn: A filename from which we will use the first row to determine
the column names.
:return: a list of column names.
"""
with open(fn) as f:
line = f.readline()
headers = line.strip().split('\t')
return headers
|
27cef8956dca79d274698675466577264db766d9
| 223,081 |
import requests
def find_anaconda_versions(name, anaconda_channel='bioconda'):
"""
Find a list of available anaconda versions for a given container name
"""
r = requests.get(f"https://anaconda.org/{anaconda_channel}/{name}/files")
urls = []
for line in r.text.split('\n'):
if 'download/linux' in line:
urls.append(line.split('"')[1])
return urls
|
f174ec55163914220de9745acd00f9263ef26c1d
| 106,327 |
def FloorLog10Pow2(e):
"""Returns floor(log_10(2^e))"""
assert e >= -1650
assert e <= 1650
return (int(e) * 78913) // 2**18
|
328a9af331685a008ec53b0fc12b7de5d393c6b9
| 451,894 |
from datetime import datetime
def _append_current_datetime(file_path: str):
"""
Insert date and time into file path and return it.
"""
now = datetime.now().strftime("%Y_%m_%d_%H%M%S")
if file_path.endswith('.log'):
return file_path.replace(file_path, f'_{now}.log')
return f'{file_path}_{now}.log'
|
4983326b0c2a142b8ae1f18e3759c6f7e93d5428
| 578,646 |
def is_urn(s: str):
"""Test if is uuid string in urn format"""
return type(s) == str and s.startswith("urn:uuid:")
|
7f5bbf7dad8e86a687230c29a50f3218198a8286
| 20,366 |
import time
def epoch_to_date(arg_epoch):
""" Function to convert epoch to DateTime """
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(arg_epoch)))
|
4f64cd1b0e9ccbc5eb0bf4d141925da85d1963c2
| 323,346 |
def _is_bare_mapping(o):
"""Whether the object supports __getitem__ and __contains__"""
return (
hasattr(o, '__contains__') and callable(o.__contains__) and
hasattr(o, '__getitem__') and callable(o.__getitem__)
)
|
d8d9e65558c8434dede437caea201a10022fc15c
| 511,480 |
def batch(tensor, batch_size = 50):
""" It is used to create batch samples, each batch has batch_size samples"""
tensor_list = []
length = tensor.shape[0]
i = 0
while True:
if (i+1) * batch_size >= length:
tensor_list.append(tensor[i * batch_size: length])
return tensor_list
tensor_list.append(tensor[i * batch_size: (i+1) * batch_size])
i += 1
|
2b4b10520fd72b90ebe1239b7f52e61ba442484d
| 41,528 |
def add_type_restriction(step):
"""
for a given step, look for object type and construct a SPARQL fragement to restrict the graph
to objects of the type. If the object does not have a type restriction, return an empty string.
:param step: The step for which an object restriction is requested
:return: the SPARQL fragement for thr restriction, or an empty string if no type is specified
"""
if 'type' in step['object']:
return '?' + step['object']['name'] + ' a <' + str(step['object']['type']) + '> . '
else:
return ""
|
542048428c8eb38792d3604b57d420775cc86b60
| 136,539 |
def in_labelset(xmrs, nodeids, label=None):
"""
Test if all nodeids share a label.
Args:
nodeids: An iterable of nodeids.
label: If given, all nodeids must share this label.
Returns:
True if all nodeids share a label, otherwise False.
"""
nodeids = set(nodeids)
if label is None:
label = xmrs.ep(next(iter(nodeids))).label
return nodeids.issubset(xmrs._vars[label]['refs']['LBL'])
|
1db07cd4f4a24a16ac5f06d09dbe65f9473da4b6
| 73,643 |
def parse_month(month) -> int:
""" Parse string month to a number 1-12 """
map = {
'jan': 1,
'feb': 2,
'mar': 3,
'apr': 4,
'may': 5,
'jun': 6,
'jul': 7,
'aug': 8,
'sep': 9,
'oct': 10,
'nov': 11,
'dec': 12,
'sept': 9,
'january': 1,
'february': 2,
'march': 3,
'april': 4,
'june': 6,
'july': 7,
'august': 8,
'september': 9,
'october': 10,
'november': 11,
'december': 12
}
k = str(month).lower()
if k in map:
return map[k]
return int(month)
|
114180d15b6937d85c30f8d42363215df0df5bb9
| 492,558 |
def transform_dotted_decimal_to_mac(dotted_decimal_mac):
"""
Method to transform dotted decimals to Mac address
Args:
dotted_decimal_mac(string): dotted decimal string
Returns:
mac_address(string): Mac address separated by colon
"""
decimal_mac = dotted_decimal_mac.split(u'.')
hex_mac = u':'.join(str(hex(int(i)).lstrip(u'0x')).zfill(2) for i in decimal_mac).upper()
return hex_mac
|
57acbe5e34ca35685866e4000dbc301046a60e95
| 457,145 |
def Numbad(Index, Offset: int, Tr_Length: int):
""" Returns the number of Label instances that finish after the end of the Epoch (Prevents Index errors propagating)
Parameters:
----------
Index: list
A list of Lists containing the Start Times of only One type of Label in each Clipping.
Also Note that the Starts Argument must be converted to the 1 KHz Sampling Frequency
Offset: int
Tr_Length: int
return:
-------
count: int
number of bad indexes
"""
Count = 0
for i in range(len(Index)):
Count = Count + len([x for x in range(len(Index[i])) if Index[i][x] < (Offset + Tr_Length)])
return Count
|
5a5c5a5b6af8fadb25d689ff19ad5e2827fc90c0
| 256,894 |
import six
def byte_to_char(bin_input):
"""
Given byte as string or int, return character
>>> byte_to_char("0010010")
'2'
>>> byte_to_char(100)
'd'
"""
if isinstance(bin_input, six.string_types):
return chr(int(bin_input, base=2))
else:
return chr(bin_input)
|
686f28ce95aee1ecb409afecb7dca4d890a4d45c
| 591,016 |
def count_string_diff(a,b):
"""Return the number of characters in a string that don't exactly match"""
shortest = min(len(a), len(b))
return sum(a[i] != b[i] for i in range(shortest))
|
bed0fd617ad09f0a49ce64b3e12ed0791d81ebea
| 499,983 |
def require_login(f):
"""Decorator for methods that require login."""
def wrapper(instance, *args, **kwargs):
if not instance.logged_in:
instance.login()
instance.logged_in = True
return f(instance, *args, **kwargs)
return wrapper
|
109e1fec5ab06fc09b254d2a194eb23b35edbf4d
| 511,594 |
def get_shape(coordinates):
"""
Return shape of coordinates
Parameters
----------
coordinates : :class:`xarray.DataArrayCoordinates`
Coordinates located on a regular grid.
Return
------
shape : tuple
Tuple containing the shape of the coordinates
"""
return tuple(coordinates[i].size for i in coordinates.dims)
|
a3e261b401c54951ff2ccf78050f50faf0bbe772
| 58,907 |
def _calculate_singles(row):
"""
Returns:
The number of singles by a player as an int
based on the following formula:
1B = H - 2B - 3B - HR
"""
singles = row['H'] - row['2B'] - row['3B'] - row['HR']
return singles
|
4fc05c781863d682050a2bd5a411a54d3bd1a183
| 203,704 |
def _longest_variable(string: str) -> int:
"""Computes the longest variable in the input.
Example: _longest_variable("test[a]") == 1
Args:
string (str): the string to analyze
Returns:
int: the length of the longest variable
"""
thislen = 0
maxlen = 0
inside = False
for s in string:
if inside and s != "]":
thislen += 1
elif s == "]":
inside = False
maxlen = max(thislen, maxlen)
elif s == "[":
inside = True
thislen = 0
return maxlen
|
b1f4d0c80966aa94d41b09a5a7b2bb8d419b8e04
| 184,874 |
import re
def standardize_annotator(annotation, annotator="annotator"):
"""Substitute coder name in annotation for annotator."""
# Anvil line for coder: <info key="coder" type="String">TMW</info>
# regex matches line prior to the coder name in group 1, coder name
# in group 2, and remainder in group 3, so return string with group
# 2 substituted as 'annotator'
return re.sub(
r"(<.+key=\"coder\".+>)(.+)(</info>)", r"\1" + annotator + r"\3", annotation
)
|
15136b0dfb60e6dcb317facadf997baaf40c7eec
| 566,964 |
def preceding(iterable, item):
"""The item which comes in the series immediately before the specified item.
Args:
iterable: The iterable series in which to search for item.
item: The item to search for in iterable.
Returns:
The previous item.
Raises:
ValueError: If item is not present in iterable beyond the first item.
"""
iterator = iter(iterable)
try:
current = next(iterator)
if current == item:
raise ValueError(f"No item preceding {item!r} in iterable series")
except StopIteration:
raise ValueError("Iterable series is empty")
previous = current
for current in iterator:
if current == item:
return previous
previous = current
raise ValueError(f"No item {item!r} in iterable series for which to return the preceding item")
|
5e40c74fe900ae56d5a403fd80da6471b3767785
| 258,080 |
def get_long_words(tg, threshold=127):
"""
Returns all words that are longer than a given threshold.
:param tg: (TermGenerator) Object with parsed input.
:param threshold: (int) Minimum length (in characters) of the words.
:return: (List) All words longer than the threshold.
"""
return [el for el in tg.terms if len(el) > threshold]
|
120325e748c5a943c9681fa73c15f44eef53e93a
| 98,174 |
import logging
def _has_level_handler(logger: logging.Logger) -> bool:
"""Check if there is a handler in the logging chain that will handle
the given logger's effective level.
"""
level = logger.getEffectiveLevel()
current = logger
while current:
if any(handler.level <= level for handler in current.handlers):
return True
if not current.propagate:
break
current = current.parent # type: ignore
return False
|
11a593339dd2761e973eea0d1b983b1c9795fb4f
| 342,431 |
from datetime import datetime
import pytz
def to_naive_utc_dt(dt):
"""
Converts a datetime to a naive datetime (no tzinfo)
as follows:
if inbound dt is already naive, it just returns it
if inbound is timezone aware, converts it to UTC,
then strips the tzinfo
"""
if not isinstance(dt, datetime):
raise TypeError('Arg must be type datetime')
if dt.tzinfo is None:
return dt
return dt.astimezone(pytz.utc).replace(tzinfo=None)
|
218f4ebeffec0399422c275f8af1a00a36015d81
| 436,350 |
import itertools
def node_and_primary_descendants(node):
"""Gets an iterator for a node and all of its visible descendants
:param node Node: target Node
"""
return itertools.chain([node], node.get_descendants_recursive(lambda n: n.primary))
|
d041127cba2d86cde9617470e3bd388ffff802f7
| 547,182 |
def luminance_to_contrast_ratio(luminance1, luminance2):
"""Calculate contrast ratio from a pair of relative luminance.
:param luminance1: Relative luminance
:type luminance1: float
:param luminance2: Relative luminance
:type luminance2: float
:return: Contrast ratio
:rtype: float
"""
(l1, l2) = sorted((luminance1, luminance2), reverse=True)
return (l1 + 0.05) / (l2 + 0.05)
|
bdd0adb7a01dc822a517f7472aa678749d994030
| 222,856 |
def split_learning_verification(dataframe, verification_ids):
"""
Takes a Pandas dataframe and splits it into training and testing sets. The input IDs are the testing set, the
rest of the dataframe is the training set
Parameters
----------
dataframe : Pandas dataframe
A Pandas dataframe of the whole set that will be split
verification_ids : list
The ID(s) that will be separated from the dataframe to constitute the testing set
Returns
-------
test_df
Returns a Pandas dataframe with the testing set consisting of the products with the IDs in the input list
train_df
Returns a Pandas dataframe with the training set consisting of every product with IDs that were not
in the input list
"""
ids = list(map(int, dataframe.id.unique()))
learn_ids = []
for e in ids:
if e not in verification_ids:
learn_ids.append(e)
test_df = dataframe[dataframe['id'].isin(verification_ids)]
train_df = dataframe[dataframe['id'].isin(learn_ids)]
return test_df, train_df
|
68c9ac2c1920a8a731760dffd75673543d7968c9
| 237,451 |
import re
def format_phone_number(raw_phone_number):
"""
Our data set has phone numbers that look like "(919) 666-6666"
The Yelp API takes a phone number in the format "+19196666666"
"""
numbers_only = re.sub(r'\D', '', raw_phone_number)
if len(numbers_only) > 10:
numbers_only = numbers_only[0:10]
return "+1" + numbers_only
|
e3491a19b0f21f3eef8f7bfae063078905ed5422
| 289,362 |
import socket
import struct
def int2ip(number):
"""
Converts the given integer value to an IP address.
:type number: long
:param number: An IP as a number.
:rtype: string
:return: The IP address.
"""
number &= 0xFFFFFFFF
return socket.inet_ntoa(struct.pack('!L', number))
|
21b56396adba43555b2d493fd2a25e72bffbb535
| 153,007 |
def _get_social_page_image_url(file_attachments):
"""Return the URL to an image used for social media sharing.
This will look for the first attachment in a list of attachments that can
be used to represent the review request on social media sites and chat
services. If a suitable attachment is found, its URL will be returned.
Args:
file_attachments (list of reviewboard.attachments.models.FileAttachment):
A list of file attachments used on a review request.
Returns:
unicode:
The URL to the first image file attachment, if found, or ``None``
if no suitable attachments were found.
"""
for file_attachment in file_attachments:
if file_attachment.mimetype.startswith('image/'):
return file_attachment.get_absolute_url()
return None
|
51529abedc9228598bc783b7cef1c724cc32309d
| 495,138 |
def _byte_str(num, unit='auto', precision=2):
"""
Automatically chooses relevant unit (KB, MB, or GB) for displaying some
number of bytes.
Args:
num (int): number of bytes
unit (str): which unit to use, can be auto, B, KB, MB, GB, or TB
References:
https://en.wikipedia.org/wiki/Orders_of_magnitude_(data)
Returns:
str: string representing the number of bytes with appropriate units
Example:
>>> import ubelt as ub
>>> num_list = [1, 100, 1024, 1048576, 1073741824, 1099511627776]
>>> result = ub.repr2(list(map(_byte_str, num_list)), nl=0)
>>> print(result)
['0.00KB', '0.10KB', '1.00KB', '1.00MB', '1.00GB', '1.00TB']
>>> _byte_str(10, unit='B')
10.00B
"""
abs_num = abs(num)
if unit == 'auto':
if abs_num < 2.0 ** 10:
unit = 'KB'
elif abs_num < 2.0 ** 20:
unit = 'KB'
elif abs_num < 2.0 ** 30:
unit = 'MB'
elif abs_num < 2.0 ** 40:
unit = 'GB'
else:
unit = 'TB'
if unit.lower().startswith('b'):
num_unit = num
elif unit.lower().startswith('k'):
num_unit = num / (2.0 ** 10)
elif unit.lower().startswith('m'):
num_unit = num / (2.0 ** 20)
elif unit.lower().startswith('g'):
num_unit = num / (2.0 ** 30)
elif unit.lower().startswith('t'):
num_unit = num / (2.0 ** 40)
else:
raise ValueError('unknown num={!r} unit={!r}'.format(num, unit))
fmtstr = ('{:.' + str(precision) + 'f}{}')
res = fmtstr.format(num_unit, unit)
return res
|
0926ebb4549e51355893dac99bd25d4b545dab31
| 273,996 |
def get_size(image_array):
""" This function gets the size of our original image.
:param image_array: Our image as a numpy array.
:return: Returns a tuple (width, height)
"""
# The method .shape returns a list of features about the numpy array.
shape = image_array.shape
width = shape[1]
height = shape[0]
return width, height
|
fd6c1df15f08718e5b9750b2e6aa26f02d14465f
| 143,456 |
def get_euler_number(facet_list, vertex_dict, edge_dict):
"""
Compute the euler number of the surface.
"""
return len(vertex_dict) - len(edge_dict) +len(facet_list)
|
f87ca380376ffa6c2638185bd9acac8805a2f0f2
| 208,830 |
def convert_to_list_of_words(lyrics):
"""Returns a list of words
Parameters:
lyrics (string)
Returns:
list: a list of words
"""
return lyrics.replace(',','').lower().strip().split()
|
2e64bc89cd22dd9b0447c7f4f7dd421825cc4d32
| 60,315 |
def inflect(word: str,
count: int):
"""
Minimal inflection module.
:param word: The word to inflect.
:param count: The count.
:return: The word, perhaps inflected for number.
"""
if word in ['time', 'sentence']:
return word if count == 1 else word + 's'
elif word == 'was':
return 'was' if count == 1 else 'were'
else:
return word + '(s)'
|
7293e076ead3a76ac5e1c8af14e058d10756681e
| 332,858 |
def get_service_name(pod_name):
"""Returns the service name from the pod name."""
return pod_name[:-6]
|
42b5d489efb530d278d57049f3236fc0bd8cfd99
| 230,091 |
def get_group_role_assignments(client, group, project):
"""Get group role assignments
:param client:
:type client: keystoneclient.v3.client.Client
:param group:
:type group: keystoneclient.v3.groups.Group
:param project:
:type project: keystoneclient.v3.projects.Project
:return:
:rtype: list[keystoneclient.v3.roles.Role]
"""
return client.role_assignments.list(group=group, project=project)
|
ca7428723f6ea08f3e2fa0f9118179a678bc2e8a
| 456,254 |
import json
def format_source(src):
"""Format source query as commented text."""
j = json.loads(src)
fmt_src = ""
for i in json.dumps(j, indent=2).split("\n"):
fmt_src += "#%s\n" % i
return fmt_src
|
bc2866fb3cd82be4c820242649608549cff0d2d4
| 369,291 |
import re
def split_coeff(ions):
"""Splits coeffiecients from adduct so it can be processed by molmass
Parameters
----------
ion : list of strings
Returns
-------
list of lists of strings
each internal list represents one atom and its multiplier in the adduct
"""
coeff = []
all_coeff = []
for ion in ions:
if ion[0].isdigit():
coeff = re.findall('\d+|\D+',ion)
else:
coeff = [ion]
all_coeff.append(coeff)
return all_coeff
|
6716007c3bc15e9f293bbb74ce8e1dd6eaf17adb
| 564,945 |
def get_estimator_name(clf):
"""
Extract the estimator name from the the estimator object {clf}
:param clf: estimator object
:return: string name
"""
return str(type(clf)).split('.')[-1].replace("'>", "")
|
25af24e382485c72734b2096ebe15e68551f71e4
| 405,492 |
def cleanPath(path):
"""
Cleans up raw path of pdf to just the name of the PDF (no extension or directories)
:path: path to pdf
:returns: The PDF name with all directory/extensions stripped
"""
filename = path.split("/")[-1]
filename = filename.split(".")[0]
return filename
|
0cafdf13cbdc784abaedb49b9b757054c3ff25f6
| 696,595 |
def contains_at_depth(haystack, needle, n):
"""Is the needle in haystack at depth n?
Return true if the needle is present in one of the sub-iterables in haystack
at depth n. Haystack must be an iterable.
"""
# Specifically use hasattr rather than isinstance(..., collections.Iterable)
# because we want to include lists but reject strings.
if not hasattr(haystack, '__iter__'):
return False
if n == 0:
return needle in haystack
else:
for item in haystack:
if contains_at_depth(item, needle, n - 1):
return True
return False
|
470c8b76d59f7ff9cf37cf53431c62853d576852
| 344,347 |
import itertools
def take(iterable, amount):
"""Return first n items of the iterable as a tuple.
Taken from the Python documentation. Under the PSF license.
:param amount:
The number of items to obtain.
:param iterable:
Iterable sequence.
:returns:
First n items of the iterable as a tuple.
"""
return tuple(itertools.islice(iterable, amount))
|
8af21cae7b0caf4de1266d3c97bc508f550eba3f
| 298,783 |
import socket
def connect(target):
"""
Open a TCP connection to the target.
Args:
target (tuple): A tuple containing the host in `str` and port
in `int`.
Returns:
The TCP socket object.
"""
_socket = socket.socket()
_socket.connect(target)
return _socket
|
aad78d09691ec8c37d4308209f83884180f7c23c
| 202,496 |
def solve(a, m, k):
"""
Solve simple linear conguruence equation
Find the least x such that ax=k (mod m)
"""
if a == 0:
if k == 0:
return 0
else:
raise ValueError(f"{a}x%{m}={k} - No SOLUTIONS")
if a == 1:
return k % m
if k % a == 0:
return k // a
new_x = solve(
a=m % a,
m=a,
k=(a - k) % a
)
x = (k + new_x * m) // a
return x
|
a5ac98fd80d11623066b8071edd36ea74410b06d
| 434,776 |
import requests
import re
def download_song(url):
"""
takes a URL and downloads the lyrics from that URL
"""
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
seite = requests.get(url, headers=headers)
text = re.findall("Sorry about that. -->(.+)<!-- MxM banner -->", seite.text, re.DOTALL)[0]
text = re.sub("\<i\>.{1,50}\<\/i\>|\<br\>|\n|\r|\<\/div\>|\(|\)|-|\.|\;|\:|\!|\,|'|\?|\"", " ", text)
text = text.lower()
return text
|
20399be47f86fe4f45ab30975dae51e30e3af914
| 574,458 |
from pathlib import Path
import importlib
def rtb_path_to_datafile(*filename, local=True):
"""
Get absolute path to datafile
:param filename: pathname of datafile
:type filename: str
:param local: search for file locally first, default True
:type local: bool
:raises FileNotFoundError: File does not exist
:return: Absolute path
:rtype: Path
The positional arguments are joined, like ``os.path.join``.
If ``local`` is True then ``~`` is expanded and if the file exists, the
path is made absolute, and symlinks resolved.
Otherwise, the file is sought within the ``rtbdata`` package and if found,
return that absolute path.
Example::
loadmat('data/map1.mat') # read rtbdata/data/map1.mat
loadmat('foo.dat') # read ./foo.dat
loadmat('~/foo.dat') # read $HOME/foo.dat
"""
filename = Path(*filename)
if local:
# check if file is in user's local filesystem
p = filename.expanduser()
p = p.resolve()
if p.exists():
return p
# otherwise, look for it in rtbdata
rtbdata = importlib.import_module("rtbdata")
root = Path(rtbdata.__path__[0])
path = root / filename
if path.exists():
return path.resolve()
else:
raise ValueError(f"file {filename} not found locally or in rtbdata")
|
f486bd8b803eb4d9269dafd330d9be8cbe6d2bcd
| 293,709 |
def valid_exception(error):
"""There are certain Exceptions raised that indicate successful authorization.
This method will return True if one of those Exceptions is raised
"""
VALID_EXCEPTIONS = [
'DryRunOperation',
# S3
'NoSuchCORSConfiguration',
'ServerSideEncryptionConfigurationNotFoundError',
'NoSuchConfiguration',
'NoSuchLifecycleConfiguration',
'ReplicationConfigurationNotFoundError',
'NoSuchTagSet',
'NoSuchWebsiteConfiguration',
'NoSuchKey',
# EC2
'InvalidTargetArn.Unknown',
]
for exception in VALID_EXCEPTIONS:
if exception in str(error):
return True
return False
|
9bea8b4b6db21f582e35742a6f2d76a6fb2e583e
| 349,594 |
def list_encoder_factory(type_callable):
"""
Creates a function encoder that iterates on the elements of a list to apply the specified type_callable format.
:param type_callable: type to apply to data
:return: function that applies type_callable to a supplied list of data
"""
def inner(data):
return [type_callable(x) for x in data]
return inner
|
6a892956d94e88e24ad738de6a19e2394aa34842
| 21,470 |
def fn_exec_args(opts) -> str:
"""Return exec args."""
return f'[gray][dim]{str(opts.exec[:30] + "..." if opts.exec else "-")}'
|
4a9489195a8b785a8a114cda1a6f683935701b7c
| 182,394 |
def parse_params_bool(params, p):
""" Get and parse a boolean value from request params. """
val = params.pop(p, None)
if not val:
return False
return val.lower() in ("yes", "1", "true")
|
dc4464c89087c2a45a5548f67a6fa18aa1276b6b
| 577,612 |
def _permissions(annotation):
"""
Return a permissions dict for the given annotation.
Converts our simple internal annotation storage format into the legacy
complex permissions dict format that is still used in some places.
"""
read = annotation.userid
if annotation.shared:
read = 'group:{}'.format(annotation.groupid)
return {'read': [read],
'admin': [annotation.userid],
'update': [annotation.userid],
'delete': [annotation.userid]}
|
5fb4b0a28023a1ec5b74cc8edaf680a27432a43f
| 633,416 |
import math
def mutate(value, mode='NONE'):
"""Mutate value according provided mode."""
if mode == 'NUMBOOL':
return float(value != 0.0)
if mode == 'FLOOR':
return math.floor(value)
if mode == 'CEIL':
return math.ceil(value)
return value
|
cc0f2f6e2d8c6c89aae51e057ff263a0d011587b
| 348,444 |
from typing import List
def string_wrap(text: str, wrap_length: int) -> List[str]:
"""
Split a string into groups of wrap length.
:param text: Original text
:param wrap_length: Length at which the string has to be wrapped
:return: List of wrapped strings
"""
string_list = []
while text:
string_list.append(text[:wrap_length])
text = text[wrap_length:]
return string_list
|
92e5dc0be6be621e3ce3ee121fc72de9b1670a60
| 619,831 |
def format_seconds(n: int) -> str:
"""Format seconds into pretty string format."""
days = int(n // (24 * 3600))
n = n % (24 * 3600)
hours = int(n // 3600)
n %= 3600
minutes = int(n // 60)
n %= 60
seconds = n
if days > 0:
strtime = f'{days}d {(hours)}h:{minutes}m:{int(seconds)}s'
elif hours > 0:
strtime = f'{(hours)}h:{minutes}m:{int(seconds)}s'
elif minutes > 0:
strtime = f'{minutes}m:{int(seconds)}s'
else:
strtime = f'{seconds:.2f}s'
return strtime
|
7ed0a835100d13d3123ab2077a7700cd6e48a126
| 493,181 |
def _edge_is_between_selections(edge, selection_a, selection_b):
"""
Returns ``True`` is the edge has one end in each selection.
Parameters
----------
edge: tuple[int, int]
selection_a: collections.abc.Container[collections.abc.Hashable]
selection_b: collections.abc.Container[collections.abc.Hashable]
Returns
-------
bool
"""
return (
(edge[0] in selection_a and edge[1] in selection_b)
or (edge[1] in selection_a and edge[0] in selection_b)
)
|
808ee767b44a05fb8258a2bef5621d22131e6467
| 20,050 |
import socket
import json
def get_json_data(stats_sock):
"""Returns uwsgi stats data as dict from the socket file."""
data_dict = {}
data = ""
try:
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.connect(stats_sock)
while True:
d = s.recv(4096)
if len(d) < 1:
break
data += d.decode("utf8", "ignore")
s.close()
data_dict = json.loads(data)
except Exception as e:
print(e)
pass
return data_dict
|
67b6687fabecfeb84c4c460d133a93da21cc8f5e
| 32,518 |
import random
def paretoint(avg, alpha):
"""Returns a random integer that's avg on average, following a power law.
alpha determines the shape of the power curve. alpha has to be larger
than 1. The closer alpha is to 1, the higher the variation of the returned
numbers."""
return int(random.paretovariate(alpha) * avg / (alpha / (alpha - 1)))
|
9d9835a36255b13a04f55ef2e5bdf3bfb3fe1b1b
| 536,053 |
def fgs_range(fg_plays, start, end):
"""
Given a list of field goal `nfldb.PlayPlayer` objects, return only
the field goals in the range `[start, end]`.
"""
def pred(p):
if p.kicking_fgm == 1:
return start <= p.kicking_fgm_yds <= end
return start <= p.kicking_fgmissed_yds <= end
return filter(pred, fg_plays)
|
0bfeb9fc39453a839dc9a409e981c379479d945c
| 515,494 |
def rc_list(mod):
"""
Retuns a list of response coefficients of a model.
The list contains both species and flux response coefficients and
response coefficients follow the syntax of 'rc_responder_parameter'.
Parameters
----------
mod : PysMod
The Pysces model contains the reactions, species and parameters
which is used to construct the response coefficient list.
Returns
-------
list of str
The rc_list is sorted alphabetically.
See Also
--------
cc_list, ec_list, prc_list
"""
rcs = []
for base_param in mod.parameters:
for top_species in mod.species:
rc = 'rc%s_%s' % (top_species, base_param)
rcs.append(str(rc))
for top_reaction in mod.reactions:
rc = 'rcJ%s_%s' % (top_reaction, base_param)
rcs.append(str(rc))
rcs.sort()
return rcs
|
7d4af00cbf87b2b8332e6bf741bc35bb1f1d3dbe
| 342,019 |
def take_away_from(y):
"""take_away_from(y)(x) = y - x
Be careful with this because subtraction is not commutative.
The flipped version of this is 'take_away'
>>> list(map(take_away_from(2), [1,2,3,4,5]))
[1, 0, -1, -2, -3]
"""
return lambda x : y - x
|
0f9d44f298f498696f6e05c5ab7ded0b509d8ec3
| 393,632 |
def digest_lines(digest_input):
"""
Read the lines of the Digest file output
"""
outlines = []
with open(digest_input, 'r') as infile:
for line in infile:
outlines.append(line)
return outlines
|
fe2627af2a15d51f399364bcfd0c0ef68e4973df
| 34,714 |
from typing import Union
import difflib
def find_similar_word(word : str, template_word : Union[list,set]) -> str :
"""
Find similar word in template_word using difflib
Return the most similar word if there are no close matches return word.
"""
tmp = difflib.get_close_matches(word, template_word)
if len(tmp) == 0 : return word
else : return tmp[0]
|
2d0e449c2fd6027ab87afe742e8a5e91a972634e
| 611,960 |
def compare_dicts(dict1, dict2):
"""
Compares two dictionaries. Returns True if they hold the same
keys and the same values, returns False otherwise.
"""
if not isinstance(dict1, dict) or not isinstance(dict2, dict):
return False
key_set_1 = set(dict1.keys())
key_set_2 = set(dict2.keys())
same_keys = key_set_1 & key_set_2
if len(same_keys) == len(key_set_1) and len(same_keys) == len(key_set_2):
for k in same_keys:
if dict1[k] != dict2[k]:
return False
return True
else:
return False
|
8fd1aaea5d04a5787d3a593dcddc14d66d31d86c
| 564,027 |
def make_num(a,s,num_base=10):
"""
Given a dictionary of symbol values with symbol as the key
Given a string representing a number
Given the number base
Convert string in s to number using dictionary in a and num_base
"""
n=0
for i in s:
n=num_base*n+a[i]
return(n)
|
9b9c06fcbdfc3b3d5d0ad00b8503b115e3b49c09
| 440,589 |
def get_top_15_marketcap(data):
"""
Gets the top 15 companies based on their marketcap.
Parameters:
(list)data: A compiled list of lists of company data.
"""
top_15_marketcap = list()
all_company_marketcap = list()
for line in data:
company_tag = line[0]
company_marketcap = line[3]
all_company_marketcap.append([company_marketcap, company_tag])
all_company_marketcap.sort(reverse=True)
for i in range(0,15):
top_15_marketcap.append(all_company_marketcap[i])
return top_15_marketcap
|
aec72d1e90bf2bd153089eba72a84a547ffe971c
| 293,692 |
def is_odd(n: int) -> bool:
"""
Checks if a number is odd(lol).
:param n: The number.
:return: True if it is odd else false.
"""
return not (n % 2 == 0)
|
fa4d1f0ad7fa153f72e509d5240f620c7c47c685
| 519,617 |
def gen_compare_cmd(single_tree, bootstrapped_trees):
"""
Returns a command list for Morgan Price's "CompareToBootstrap" perl script <list>
Input:
single_tree <str> -- path to reference tree file (Newick)
bootstrapped_trees <str> -- path to bootstrapped trees file (Newick)
"""
cmp_prog_path = '/home/alexh/bin/MOTreeComparison/CompareToBootstrap.pl'
compare_cmd = ['perl', cmp_prog_path, '-tree', single_tree, '-boot', bootstrapped_trees]
return compare_cmd
|
df9d2d4c21ca28012107b8af69706d45804e5637
| 24,953 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.