content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import math
def _tree_depth(num_leaves: int, arity: int):
"""Returns the depth of the tree given the number of leaf nodes and arity."""
return math.ceil(math.log(num_leaves) / math.log(arity)) + 1
|
8633da9e1079cd9338126d1174c0e632d49662bd
| 42,965 |
from typing import Any
def _rebase_entity_dict(
entity_dict: str, group: dict[str, Any], base_channel_no: int
) -> dict[int, Any]:
"""Rebase entity_dict with base_channel_no."""
new_fields = {}
if fields := group.get(entity_dict):
for channel_no, field in fields.items():
new_fields[channel_no + base_channel_no] = field
return new_fields
|
d1b0e6ad0f2e4acb7b333aaacfa10d00449bf0e0
| 42,966 |
def __top_frond_right(dfs_data):
"""Returns the frond at the top of the RF stack."""
return dfs_data['RF'][-1]
|
faf59ef6af0ea2bd4045fa7de46b67e4ff3e9410
| 42,971 |
def evaluate_sampler( sampler_fn, obs, action ):
"""
:param sampler_fn:
fn(o,k) Function returning sample value
[p1,p2..] List of values per-arm
p1 Scalar value for all arms
:param obs:
:param action:
:return:
"""
if callable(sampler_fn):
sample = sampler_fn(obs, action)
elif type(sampler_fn) == list:
sample = sampler_fn[action]
else:
sample = sampler_fn # assume float or int
return sample
|
10baa933330cb7fc81529ad8091f479d180de34a
| 42,973 |
import copy
def unwind(data, max_depth = 1000, stop_term = ''):
""" Unwind nested dictionaries by repeating higher level fields.
Args:
max_depth: (int), maximum depth to unwind.
stop_term: (str), stop unwinding once this term appears as a key in dict.
Returns:
Unwound dictionary
"""
result_list = []
def unwinder(data, row = None, depth = 0):
# keep copying
# first sort values according to whether they are list or not
if row is None:
row = {}
else:
row = copy.deepcopy(row)
for key, value in data.items():
if key != 'items':
row[key] = data[key]
if 'items' in data.keys():
if (depth < max_depth) and (stop_term not in data.keys()):
for item in data['items']:
unwinder(item,row, depth = depth + 1)
else:
row['items'] = data['items']
result_list.append(row)
else:
result_list.append(row)
row = {}
unwinder(data, row)
return result_list
|
ab2ae7ec74d7ec8d84ee885e5760cd9685db2387
| 42,977 |
import math
def delta_angle_degrees(from_angle_degrees, to_angle_degrees):
"""Calculates the shortest signed delta angle."""
delta = to_angle_degrees - from_angle_degrees
return delta - 360.0 * math.floor((delta + 180.0) / 360.0)
|
a86f5596aef5580aedceb050bef6ed8317ebd9d4
| 42,981 |
def slowComplete(prefix, list_of_words, top):
"""
For a given prefix, provide top suggestions from this list of words.
Parameters
----------
prefix: Signal word used here
list_of_words: a file that has the example format
top: top many suggestions as output
Return
------
the top k recommendations with given prefix and list
"""
file = open(list_of_words, 'r')
data = file.readlines()
data_list = []
for i in range(len(data)):
if i != 0:
data_list.append(data[i])
num_list = []
word_list = []
for l in data_list:
if l != '\n':
entry = l.split('\t')
num_list.append(int(entry[0]))
word_list.append(entry[1][:-1])
candidate_list = []
for i in range(len(word_list)):
if word_list[i].startswith(prefix):
candidate_list.append((word_list[i],num_list[i]))
sorted(candidate_list, key=lambda x: x[1])
final_list = candidate_list[0:top]
return(final_list)
|
87ddf8727ba8418d3a1d28e0a9153fea7a9532fb
| 42,982 |
def get_folder_items(bucket_items, folder_prefix):
"""
Returns items in bucket which belong to a folder
:param bucket_items: items in the bucket
:param folder_prefix: prefix containing the folder name
:return: list of items in the folder without the folder prefix
"""
return [
item['name'][len(folder_prefix):]
for item in bucket_items
if item['name'].startswith(folder_prefix)
]
|
1890be035b994e9e37d9f4a5f174121d174a3fb8
| 42,985 |
from typing import Tuple
def bbox_center(p: Tuple[float, float],
q: Tuple[float, float]) -> Tuple[float, float]:
"""
Return middle point between two points p and q.
"""
(min_lon, min_lat), (max_lon, max_lat) = p, q
center = min_lon + (max_lon - min_lon) / 2, min_lat + \
(max_lat - min_lat) / 2
return center
|
017a31d06de4bed0ca2f1e8d63bfe7d7234cfaef
| 42,989 |
import math
def generate_timecode(ms: int) -> str:
"""
Convert a duration in seconds to ISO8601 hh:mm:ss.sss format
"""
hours = math.floor(ms / (60 * 60 * 1000))
minutes = math.floor(ms / (60 * 1000)) % 60
seconds = math.floor(ms / 1000) % 60
milliseconds = ms % 1000
return (
str(hours).rjust(2, "0")
+ ":"
+ str(minutes).rjust(2, "0")
+ ":"
+ str(seconds).rjust(2, "0")
+ "."
+ str(milliseconds).rjust(3, "0")
)
|
817a0d7f8732547f9c30943548870e84533ad18a
| 42,990 |
def map_args(tree, args):
"""
Given a tree and a list of arguments, produce the tree with the arguments
instead of integers at the leaves of the tree.
E.g. for tree = [[1, 2], [3, 4]] and args = [a, b, c, d] we get
[[a, b], [c, d]].
"""
(s, t, a) = tree
if a[0] == 1:
return args[0]
return [map_args((1, t[0], a[1]), args[:a[1][0]]),
map_args((1, t[1], a[2]), args[a[1][0]:])]
|
ffe0b977d508d33227586a56dee8cae36f21b31b
| 42,991 |
def _TransformOperationName(resource):
"""Get operation name without project prefix."""
# operation name is in the format of:
# operations/projects/{}/instances/{}/.../locations/{}/operations/{}
operation_name = resource.get('name')
results = operation_name.split('/')
short_name = '/'.join(results[3:])
return short_name
|
542968ede55b0da7176f03d9515a5144d6316757
| 42,992 |
import re
def find_col_index(header, col_name):
""" Extracts the column index of the given variable in the data.
Given a list of headers, searches the list for one that first one that contains
the col_name. Uses regex match to search through each header.
Parameters
----------
header : list of strs
List of headers in the dataset.
col_name : str
Name of column to be indexed.
Returns
-------
int
If a match is found, returns the index. If no match, it raises an Exception.
"""
pat = re.compile('^(")*%s(")*$' % col_name.lower())
for i, _ in enumerate(header):
if re.match(pat, header[i].lower()):
return i
raise Exception("Column name not found: %s" % col_name)
|
a5504c2e4bd14966de2b332963bc518e378bd348
| 42,999 |
def formatMultiplier(stat : float) -> str:
"""Format a module effect attribute into a string, including a sign symbol and percentage symbol.
:param stat: The statistic to format into a string
:type stat: float
:return: A sign symbol, followed by stat, followed by a percentage sign.
"""
return f"{'+' if stat >= 1 else '-'}{round((stat - (1 if stat > 1 else 0)) * 100)}%"
|
5ab9df157d54a5414fc9cba46d0f73512a8a6c4c
| 43,002 |
def camel2snake(text):
"""Convert camel case to snake case. This assumes the input is valid camel
case (if you have some weird hybrid of camel and snake case, for instance,
you'd want to do some preprocessing first).
Parameters
----------
text: str
Camel case string, e.g. vaderSentimentScore.
Returns
-------
str: `text` converted to snake case, e.g. vader_sentiment_score.
"""
res = []
for char in text:
if char.islower():
res.append(char)
else:
res.extend(['_', char.lower()])
return ''.join(res)
|
120345ba898777a31adfe720b6ca1041fe39907c
| 43,004 |
def _ok_to_all_filter(x):
"""This is the default filter function."""
return True
|
bf78924eb68b21f736366007f083bfb09c8dedcd
| 43,010 |
def compare(a, b):
"""None-aware comparison helper for Lisplet.
>>> compare(None, None)
0
>>> compare(None, 12)
-1
>>> compare(12, None)
1
>>> compare(12, 12)
0
>>> compare(12, -12)
1
>>> compare(-12, 12)
-1
"""
if a is None:
if b is None:
return 0
return -1
if b is None:
return 1
if a < b:
return -1
if a > b:
return 1
return 0
|
c5fd25f613dc0727c0db0a10dbad4ef3b3d93235
| 43,014 |
def center_crop_numpy(img, cropx, cropy):
"""
Givenn an image numpy array, perform a center crop.
Args:
img : numpy image array
cropx : width of crop
cropy : height of crop
Returns:
cropped numpy image array
"""
y,x = img.shape[:-1]
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy, startx:startx+cropx, :]
|
ab69271a947906a6ea515f6846d5c09614b15853
| 43,019 |
import json
def extract_genres(genres_str):
"""Extracts the genres in string form as a list of genres
Arguments:
genres_str {string} -- string containing the genres
Returns:
list -- the extracted genres
"""
genres_str = genres_str.replace("'", '\"')
genres_json = json.loads(genres_str)
genres_list = []
for elem in genres_json:
genres_list.append(elem['name'])
return genres_list
|
34dcc0ad7927f61610ac393f71bc744fff18e215
| 43,025 |
import time
def cookie_to_har(cookie):
"""
Convert a Cookie instance to a dict in HAR cookie format.
"""
c = {
'name': cookie.name,
'value': cookie.value,
'secure': cookie.secure,
}
if cookie.path_specified:
c['path'] = cookie.path
if cookie.domain_specified:
c['domain'] = cookie.domain
if cookie.expires:
tm = time.gmtime(cookie.expires)
c['expires'] = time.strftime("%Y-%m-%dT%H:%M:%SZ", tm)
http_only = cookie.get_nonstandard_attr('HttpOnly')
if http_only is not None:
c['httpOnly'] = bool(http_only)
if cookie.comment:
c['comment'] = cookie.comment
return c
|
bcfe6258a95b4eea0632023399b3540db6574426
| 43,027 |
def questionize_label(word):
"""Convert a word to a true/false style question format.
If a user follows the convention of using `is_something`, or
`has_something`, for a boolean value, the *property* text will
automatically be converted into a more human-readable
format, e.g. 'Something?' for is_ and Has Something? for has_.
Args:
word (str): The string to format.
Returns:
word (str): The formatted word.
"""
if word is None:
return ''
if word.startswith('is_'):
return '{0}?'.format(word[3:])
elif word.startswith('has_'):
return '{0}?'.format(word[4:])
return word
|
d69e100a3b37f2632a6e0400c395f99fcba00b4c
| 43,029 |
def is_not_empty_value(value):
"""
Checks for empty response values. Demisto recommends returning the None type if a value is empty,
rather than an empty string/list.
"""
return value != "" and value != [] and value != [""]
|
202edb774c04b00095be9c96b65614b1dbfdbb28
| 43,030 |
import math
def get_distance(a, b):
"""Return Euclidean distance between points a and b."""
return math.sqrt(pow(a.x - b.x, 2) + pow(a.y - b.y, 2))
|
89c66e586a37a88dbce25460ad3310a035044c73
| 43,031 |
def square_crop(im, target_size=None):
""" Crop image to `target_size`. If that's None the image is squared
to the smallest size
"""
w = im.size[0]
h = im.size[1]
target_size = target_size if target_size else min(w, h)
dx = (w - target_size) / 2
dy = (h - target_size) / 2
return im.crop((dx, dy, dx + target_size, dy + target_size))
|
28fb58b21ca4b15e6d48c9345fb31aa333cd7276
| 43,038 |
def output(s):
"""Convert to text output format."""
out = ' 1 2 3 4 5 6 7 8 9\n'
out += ' +-----+-----+-----+\n'
for i in range(9):
out += str(i + 1) + '|'
for j in range(9):
v = s[i * 9 + j]
if v == 0:
out += ' '
else:
out = out + str(v)
if j % 3 == 2:
out += "|"
else:
out += ' '
out += '\n'
if i % 3 == 2:
out += ' +-----+-----+-----+\n'
return out
|
f0c80b7ec7350813dd80f2aab8c1f6f3e05847da
| 43,040 |
def annotate(origin, reference, ops, annotation=None):
"""
Uses a list of operations to create an annotation for how the operations
should be applied, using an origin and a reference target
:param origin: The original iterable
:type origin: str or list
:param reference: The original target
:type reference: str or list
:ops: The operations to apply
:type ops: list of Operation
:param annotation: An optional initialization for the annotation. If none
is provided, the annotation will be based on the
origin. If one is provided, it will be modified by this
method.
:return: An annotation based on the operations
:rtype: list of str
"""
annotation = annotation or list(origin)
for oper in ops:
oper.annotate(annotation, origin, reference)
for i, label in reversed(list(enumerate(annotation))):
if label[0] == '+' and i > 0:
annotation[i-1] += label
del annotation[i]
return annotation
|
d6012776b26d90944af535ce04a9d25e6298ffe7
| 43,044 |
from typing import Dict
from typing import Callable
import torch
def evaluate_dict(
fns_dict: Dict[str, Callable], source: torch.Tensor, target: torch.Tensor, reduction: str = "mean"
) -> Dict:
"""Evaluate a dictionary of functions.
Examples
--------
> evaluate_dict({'l1_loss: F.l1_loss, 'l2_loss': F.l2_loss}, a, b)
Will return
> {'l1_loss', F.l1_loss(a, b, reduction='mean'), 'l2_loss': F.l2_loss(a, b, reduction='mean')
Parameters
----------
fns_dict: Dict[str, Callable]
source: torch.Tensor
target: torch.Tensor
reduction: str
Returns
-------
Dict[str, torch.Tensor]
Evaluated dictionary.
"""
return {k: fns_dict[k](source, target, reduction=reduction) for k, v in fns_dict.items()}
|
3ff55304f2b16440683ed80b7f5447ef6e9782da
| 43,047 |
def replace_apostrophes(input: str) -> str:
"""Treats the presence of apostrophes so it doesn't break the XPath filter expression.
Args:
input (str | int): input
Returns:
str: XPath filter expression with apostrophes handled.
"""
if not isinstance(input, str):
return str(input)
if "'" in input:
prefix: str = ""
elements = input.split("'")
output = "concat("
for s in elements:
output += prefix + "'" + s + "'"
prefix = ',"\'",'
output += ")"
return output
else:
return "'" + input + "'"
|
11df3265a8ed69999bf43d427dd8ec66ee88ccfb
| 43,048 |
import json
def j(d):
""" Print dict as json view """
try:
d.pop('_id', None)
return json.dumps(d, indent=4, default=str, ensure_ascii=False)
except:
return d
|
ce4fe6c15df9c5e73b6ec00cb6795ada4629eb47
| 43,054 |
from typing import Dict
from typing import Any
def rename_dict_keys(input_dict: Dict[str, Any], prefix: str) -> Dict[str, Any]:
"""
Creates a copy of an input_dict with keys carrying the prefix specified
"""
current_keys = input_dict.keys()
new_keys = [prefix + i for i in current_keys]
new_dict = {
new: input_dict[current] for current, new in zip(current_keys, new_keys)
}
return new_dict
|
32bdd0a6f8046bb6528d2cdaf45c237c972996d1
| 43,060 |
def _get_mapping_dict(ch_names: list[str]) -> dict:
"""Create dictionary for remapping channel types.
Arguments
---------
ch_names : list
Channel names to be remapped.
Returns
-------
remapping_dict : dict
Dictionary mapping each channel name to a channel type.
"""
remapping_dict = {}
for ch_name in ch_names:
if ch_name.startswith("ECOG"):
remapping_dict[ch_name] = "ecog"
elif ch_name.startswith(("LFP", "STN")):
remapping_dict[ch_name] = "dbs"
elif ch_name.startswith("EMG"):
remapping_dict[ch_name] = "emg"
elif ch_name.startswith("EEG"):
remapping_dict[ch_name] = "eeg"
elif ch_name.startswith(
("MOV", "ANALOG", "ROT", "ACC", "AUX", "X", "Y", "Z", "MISC")
):
remapping_dict[ch_name] = "misc"
else:
remapping_dict[ch_name] = "misc"
return remapping_dict
|
782cda9c43749f71241dbef65f5654eafd7e07f4
| 43,066 |
import shutil
def find_program(*programs):
"""Returns the path to the first program in PATH with a name in `programs`.
Returns None on failure."""
for prog in programs:
val = shutil.which(prog)
if val:
return val
return None
|
92d3c49f9b7738c203f4dd1f55252052001ed5b3
| 43,073 |
def calculate_overlap_area(cloth: list) -> int:
"""
Calculate the total area of overlapping claims
:param cloth: List of claims made on each square inch of the cloth
:return: Area of overlapping claims
"""
area = 0
for row in cloth:
for col in row:
area += (len(col) >= 2)
return area
|
fa357c69e095571670ef8650c53d577b42ce09b1
| 43,074 |
def isotopeMaxBD(isotope):
"""Setting the theoretical max BD shift of an isotope
(if 100% incorporation).
Parameters
----------
isotope : str
name of isotope
Returns
-------
float : max BD value
"""
psblIsotopes = {'13C' : 0.036,
'15N' : 0.016}
try:
return psblIsotopes[isotope.upper()]
except KeyError:
raise KeyError('Isotope "{}" not supported.'.format(isotope))
|
7fff5bc6a54034e68357af6a08e000de34d59283
| 43,081 |
import re
def parse_frequency(freq):
"""
Parses a frequency string and returns the number of seconds.
Supported formats: 1s, 1m, 1h, 1d, 1w, 1y
"""
m = re.search('^(\d+)(s|m|h|d|w|y)$', freq.lower())
if m is None:
raise ValueError('Input not in required format')
multipliers = {
's': 1,
'm': 60,
'h': 3600,
'd': 86400,
'w': 604800,
'y': 31536000,
}
return int(m.group(1)) * multipliers[m.group(2)]
|
f08306fcf95ca86a4caa5344e629974c5c20d008
| 43,082 |
def construct_doc2author(corpus, author2doc):
"""Make a mapping from document IDs to author IDs."""
doc2author = {}
for d, _ in enumerate(corpus):
author_ids = []
for a, a_doc_ids in author2doc.items():
if d in a_doc_ids:
author_ids.append(a)
doc2author[d] = author_ids
return doc2author
|
4f07174d9569019fa2488320052952e110addaeb
| 43,083 |
def versionless(package):
"""
Removes the version from the package reference
"""
return package[:1+package[1:].find('@')]
|
2f52ab9bb406df8a2e74ee56c8f4631cfa83ee6d
| 43,084 |
def is_valid_field(field, allow_quote=False, minimum=None, maximum=None):
"""
Validates a generic user inputted field, such as a "name" for an
object. For now, it basically only validates whether single quote
characters should be allowed in the string.
:type field: str
:param field: The data to be validated.
:type allow_quote: bool
:param allow_quote: If True, a single quote character (') will be allowed
to pass validation.
:type minimum: int
:param minimum: If defined, values with fewer characters than this value
will be rejected.
:type maximum: int
:param maximum: If defined, values with more characters than this value
will be rejected.
:rtype: bool
:return: True or False depending on whether field passes validation.
"""
if field is None:
return False
if not allow_quote and "'" in field:
return False
if minimum:
if len(field) < minimum:
return False
if maximum:
if len(field) > maximum:
return False
return True
|
375b96d891a37115d8a367bc228e020f719da946
| 43,085 |
def set_bits(n, start, end, value):
"""Set bits [<start>:<end>] of <n> to <value> and return <n>"""
mask = ( 1 << end ) - ( 1 << start )
return (int(n) & ~mask) | (int(value) << start) & mask
|
203fb5d94750534dbeb136dc3e580be2f7c9d68d
| 43,088 |
def _process_string(value):
"""Strip a few non-ascii characters from string"""
return value.strip('\x00\x16')
|
5a318bb0336d5b358ef856c39f88bd5037880a2c
| 43,094 |
import requests
def request_weather(url):
"""request the weather from openweathermap.org API. Returns a dict of the json file"""
response = requests.get(url)
response_dict = response.json()
return response_dict
|
c6ae4d38cb849b7956a505a2f0c15c2c1bc98da0
| 43,099 |
def get_reason_from_exception(ex):
"""
Turns an exception into a string similar to the last line of a traceback.
"""
return '{}: {}'.format(ex.__class__.__name__, str(ex))
|
2c8b5d3114c6b950eaef1383f2e88d380f38c965
| 43,101 |
def shrink(pos, fact_x, fact_y=None):
""" Shrink networkx positionings """
if fact_y is None:
fact_y = fact_x
return {i: (x*fact_x, y*fact_y) for i, (x, y) in pos.items()}
|
075ced8c46bd9181bed5e9de206e853fb2a508bd
| 43,102 |
def is_scalar(x):
"""True if x is a scalar (constant numeric value)
"""
return isinstance(x, (int, float))
|
243786089e4fa7d1a05fa3b8873b87b43ece20a7
| 43,107 |
import functools
def update_wrapper(wrapper, wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
""" Update a wrapper function to look like the wrapped function.
Modified version to support partial and other non-__dict__ objects.
See functools.update_wrapper for full documentation.
"""
for attr in assigned:
try:
setattr(wrapper, attr, getattr(wrapped, attr))
except AttributeError:
pass
for attr in updated:
try:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
except AttributeError:
pass
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
|
e40b2abcdd1593f87418340b769484ebea2f4746
| 43,110 |
import json
def read_json(file_path):
"""
Function to read a json file into a data dictionary structure
:param file_path: Path and name of json file to read
:return: A data dictionary with the json content as dict
>>> read_json('./data/test/test_puzzle.json')
{'train': [{'input': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 'output': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]]}, {'input': [[0, 0, 0, 8, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 'output': [[0, 0, 0, 8, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 4, 0, 4, 0]]}], 'test': [{'input': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 'output': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 7, 0, 0]]}]}
"""
with open(file_path) as json_file:
data_dict = json.load(json_file)
return data_dict
|
dbf06a8002f1bda0963eebc2045e9716fb69d938
| 43,113 |
import re
def natural_sort_key(text):
"""
Sort numeric parts of text numerically and text parts alphabetically. Example:
>>> sorted(["9 Foo", "10 Foo", "9A Foo"], key=natural_sort_key)
['9 Foo', '9A Foo', '10 Foo']
"""
return [int(part) if part.isdigit() else part for word in text.split() for part in re.split('(\d+)', word)]
|
a34793ef0b98cf91d2aac8807bcb60e47e197b1c
| 43,116 |
def get_subreport_zeros_and_ones(subreport, n_bits):
"""Gets the number of zeros and ones that have each column within the submarine report.
Args:
subreport (): Subset to count from.
n_bits (int): How many bits have each binary number.
Returns:
tuple: Zeros and ones that each column has.
"""
zeros = [0]*n_bits
ones = [0]*n_bits
for bits in subreport:
for i in range(n_bits):
if bits[i] == '1':
ones[i] += 1
elif bits[i] == '0':
zeros[i] += 1
return zeros, ones
|
2c8f2839a78690e7056761513d804948e8d1a79d
| 43,117 |
def checkCardInHandClicked(game, clickX, clickY):
"""
Returns the card in current player's hand that was clicked, else None
"""
for card in reversed(game.currPlayer.hand): # Reversed so checked from top layer down on gui
if card.imageObj.collidepoint(clickX, clickY):
return card
|
17e8293c1536e3a99f5e24a43cceb0a090a78f5d
| 43,121 |
import types
def iscoroutine(object):
"""Return true if the object is a coroutine."""
return isinstance(object, types.CoroutineType)
|
e2b60ba01ddf3a9863be2773128d35cb12adf1c4
| 43,122 |
def mask_shift_set(value, mask, shift, new_value):
"""
Replace new_value in value, by applying the mask after the shift
"""
new_value = new_value & mask
return (value & ~(mask << shift)) | (new_value << shift)
|
e6ed92669b7f4fb85a4d96cf130dcdf9ffe3d950
| 43,123 |
from typing import Callable
def raise_not_implemented_gen(message: str) -> Callable[[], None]:
"""
Make a function that will raise a NotImplemented error with a custom
message
"""
def not_implemented() -> None:
raise NotImplementedError(message)
return not_implemented
|
03aac0a266c686db439ff179c80b0c213a5bb033
| 43,124 |
import torch
def sentences_similarity(first_sentence_features, second_sentence_features) -> float:
"""
Given two senteneces embedding features compute cosine similarity
"""
similarity_metric = torch.nn.CosineSimilarity()
return float(similarity_metric(first_sentence_features, second_sentence_features))
|
f7ba47162e9a2348eba23a134c474f400fc3da3a
| 43,130 |
def Capitalize(text : str):
"""Returns Capitalized text."""
return text.capitalize()
|
955db0a852c14b654fcf489d3f77ce6dbca6bf95
| 43,131 |
def get_param_dict(job, model_keys):
"""
Get model parameters + hyperparams as dictionary.
"""
params = dict((" ".join(k.replace(".__builder__", "").split(".")),
job.get("hyper_parameters." + k, None))
for k in model_keys)
return params
|
0cbb24dde3ab28b41b2b5af6fe6f3bdfc66da9bb
| 43,132 |
import time
def get_interval_number(ts, duration):
"""Returns the number of the current interval.
Args:
ts: The timestamp to convert
duration: The length of the interval
Returns:
int: Interval number.
"""
return int(time.mktime(ts.timetuple()) / duration)
|
2fe90e1e40f7a1d76c8a4410295c5bd80e5e83e6
| 43,138 |
from typing import Tuple
def unit_center(
indices: Tuple[int, int], image_size: Tuple[int, int], stride: int
) -> Tuple[float, float]:
"""Get single prior unit center.
:param indices: current unit's indices tuple
:param image_size: image shape tuple
:param stride: stride for feature map
:return: unit center coords
"""
y_index, x_index = indices
y_scale, x_scale = image_size[0] / stride, image_size[1] / stride
x_center = (x_index + 0.5) / x_scale
y_center = (y_index + 0.5) / y_scale
return y_center, x_center
|
91f0a2070a26d8335c0e8d2cfd1c80eb59c98b8d
| 43,142 |
def __get_int_ordinals(string):
"""
Return the integer ordinals of a string.
"""
output = ""
for char in string:
output += str(ord(char)).rjust(3, " ") + ", "
output = output.rstrip(", ")
return output
|
8260716a23773bf0eb7434c0601539fcd1fcc285
| 43,145 |
def modulo(a, b, c) :
"""
Calculates modulo
"""
#print "Modulo of %0d^%0d mod %0d is %0d" % (a, b, c, (int(a)**int(b)) % int(c))
#print a,b,c;
return ((int(a)**int(b)) % int(c))
|
eba9d0f633eaa307c4f35020c33496968b939fe6
| 43,147 |
def items_list(mapping, items):
"""Return a list of values from `mapping` in order of the given `items`."""
return [mapping[item] for item in items]
|
1707b4339c13aa59c56210c0ae54c882c270d759
| 43,152 |
def get_file_options_string(args):
""" Create a string containing the relevant flags and options based on the
file options from the arguments.
Parameters
----------
args: argparse.Namespace
The parsed arguments
"""
overwrite_str = ""
if args.overwrite:
overwrite_str = "--overwrite"
keep_intermediate_files_str = ""
if args.keep_intermediate_files:
keep_intermediate_files_str = "--keep-intermediate-files"
options_str = "{} {}".format(overwrite_str,
keep_intermediate_files_str)
return options_str
|
f5939692c0ef43b1ae26b97539d276c88c2503b1
| 43,157 |
def sum_of_ints(n):
"""Return the sum of 1 to n"""
return ((n + 1) * n) >> 1
|
fa548544d25303deca43e8ea8e1d2fc8f766252a
| 43,158 |
import random
def generate_answers(question_3, question_5, range_3, range_5, sample_size=32):
"""
Helper function that generates answers with response times in
a certain range
:param question_3: The question in block 3
:param question_5: The question in block 5
:param range_3: Range of response times in block 3
:param range_5: Range of response times in block 5
:param sample_size: Amount of images to associate
:return: List of answers
"""
block_3_times = random.choices(range_3, k=sample_size)
block_5_times = random.choices(range_5, k=sample_size)
block_3 = list(map(lambda x: {"block_nr": 2, "response_time": x, "question_id": question_3.id},
block_3_times))
block_5 = list(map(lambda x: {"block_nr": 4, "response_time": x, "question_id": question_5.id},
block_5_times))
block_3.extend(block_5)
return block_3
|
6d84284df73cb36bd0602ee4151af72b84bf8ba3
| 43,164 |
def get_date(isoformattime: str) -> str:
"""Extract {YYYY-MM-DD} from ISO 8601 formatted time string.
Parameters
----------
`isoformattime`: `str`\n
An ISO 8601 formatted time string.
Returns
-------
`str`\n
A string containing the date portion of the original time string.
Raises
------
`ValueError`\n
If the 'T' prefacing the time part of the string is missing.
"""
T_index = isoformattime.find('T')
if T_index == -1:
raise ValueError('Improperly formatted time string given.')
date = isoformattime[0: T_index]
return date
|
65cf41b841f6133529aada5e914efe25c0bfd503
| 43,166 |
import ast
def ParseTryjobBuildbucketId(msg):
"""Find the buildbucket-id in the messages from `cros tryjob`.
Args:
msg: messages from `cros tryjob`
Returns:
buildbucket-id, which will be passed to `cros buildresult`
"""
output_list = ast.literal_eval(msg)
output_dict = output_list[0]
if 'buildbucket_id' in output_dict:
return output_dict['buildbucket_id']
return None
|
e664f8ae73ce05d6c87e861e2690fdd06611d6fa
| 43,168 |
def to_string(cls):
"""
Return the string representation of a syntax class or a
syntax class instance. Return 'string' by default.
"""
return getattr(cls, 'typed_name', 'string')
|
25be3480a9518df3c45137c072f6fe050b4da050
| 43,169 |
import re
def _format_kv_name(setting_path):
"""Return normalized name for use as a KeyVault secret name."""
return re.sub("[^0-9a-zA-Z-]", "-", setting_path)
|
a722253a065abaa7e4e876e9390c24df47be7af5
| 43,172 |
import re
def ellipsicate(message: str, max_length: int = 40, strip: bool = True) -> str:
"""Return a shortened version of a string if it exceeds max_length.
This will turn 'bizbazfrobnicator' into "biz ... tor".
"""
msg = re.sub(r'\s+', ' ', str(message)) # only allow ' ' for whitespace
if strip:
msg = msg.strip()
if len(msg) <= max_length:
return msg
snip_length = int((max_length - 5) / 2) # ellipsis padded with spaces
return str(msg[:snip_length] + ' ... ' + msg[-snip_length:])
|
feb61e075421b55cf50f835f7ebe55171485249b
| 43,177 |
import ipaddress
def first_subnet(subnets, version=4):
"""
Returns the first subnetwork of a list, filtered by version
"""
for subnet in subnets:
network = ipaddress.ip_network(subnet)
if network.version == version:
return subnet
return ""
|
2d60489d0f87f0dade643cccadee281ebabc9c46
| 43,179 |
def create_record_set(model, X, y):
"""Create a record set for AWS model training"""
X_float = X.astype("float32")
if y:
y_float = y.astype("float32")
return model.record_set(X_float, labels = y_float)
else:
return model.record_set(X_float)
|
fff904a26bd12ec597f998daafa4709fc663f227
| 43,182 |
import requests
def make_api_call(csv_target):
"""Make http request to IEX Trading API and return useful dataset"""
my_string = ','.join(str(row['symbol']) for row in csv_target)
url = "https://api.iextrading.com/1.0/tops/last?symbols={0}".format(my_string)
response = requests.get(url)
data = response.json()
api_data = [
(item['symbol'], item['price'], item['size'], item['time'])
for item in data
]
return api_data
|
c067c81b167918707eb18ae8acf7f3d07ad31290
| 43,185 |
def buffer(geom, urban_rural):
"""Create DHS cluster buffers
Buffer size:
- 2km for urban
- 5km for rural (1% of rural have 10km displacement, but ignoring those)
Metric units converted to decimal degrees by dividing by width of one decimal
degree in km at equator. Not an ideal buffer created after reprojecting,
but good enough for this application.
"""
if urban_rural == "U":
return geom.buffer(2000)
elif urban_rural == "R":
return geom.buffer(5000)
else:
raise ValueError("Invalid urban/rural identified ({})".format(urban_rural))
|
ab5264f85ff3da21c23dba937879550fa3c4ac49
| 43,192 |
import math
def calc_saturated_vapour_pressure_air_FAO(temp_air):
"""Saturated vapour pressure of air at temp_air in kPa
From: http://www.fao.org/3/X0490E/x0490e0k.htm
"""
return 0.611 * math.exp((17.27 * temp_air) / (temp_air + 237.3))
|
722b1f5ae8b9b76c56d7c0a5a5961b53e0328fbe
| 43,193 |
from typing import Iterable
from typing import Tuple
def get_maximum_path(tree_level_strings: Iterable[str]) -> Tuple[int, Iterable[int]]:
"""Get maximum path value and maximum path for a given tree, represented as level strings.
Solution idea: Compute maximum path from *bottom up*. Hence, the time complexity is linear.
"""
tree_levels = [
[int(number_string) for number_string in tree_level_string.split()] \
for tree_level_string in tree_level_strings
]
max_path_tree_levels = [[(value, -1) for value in tree_levels[-1]]]
for level_idx in range(len(tree_levels) - 2, -1, -1):
current_tree_level = tree_levels[level_idx]
previous_path_tree_level = max_path_tree_levels[-1]
new_path_tree_level = []
for idx, value in enumerate(current_tree_level):
left_val = previous_path_tree_level[idx][0]
right_val = previous_path_tree_level[idx + 1][0]
if left_val >= right_val:
new_path_tree_node = (value + left_val, 0)
else:
new_path_tree_node = (value + right_val, 1)
new_path_tree_level.append(new_path_tree_node)
max_path_tree_levels.append(new_path_tree_level)
max_path_tree_levels.reverse()
max_path_tree = []
node_idx = 0
for level_idx, level in enumerate(max_path_tree_levels):
max_path_tree.append(tree_levels[level_idx][node_idx])
node_idx += level[node_idx][1]
return max_path_tree_levels[0][0][0], max_path_tree
|
eafc3dfc82bc120742efac5cce793b44db22692a
| 43,198 |
def standardize(data, mean, std):
"""Standardize datasets using the given statistics.
Args:
data (np.ndarray or list of np.ndarray): Dataset or list of
datasets to standardize.
mean (number): Mean statistic.
std (number): Standard deviation statistic.
Returns:
np.ndarray or list of np.ndarray: The standardized dataset(s).
"""
if isinstance(data, list):
return [(x - mean) / std for x in data]
return (data - mean) / std
|
c5011d651f7b42f1069da6304f0be8d605ec0b53
| 43,208 |
def api(uri):
"""
Given a URI that uses the ConceptNet API, such as "/c/en/test", get its
fully-qualified URL.
"""
return "http://api.conceptnet.io" + uri
|
07cbc671f5c190ecfbed3d04b00392ce4a393f43
| 43,212 |
def potential_reciprocity(s, G):
"""For authors, check whether an acknowledged commenter is author and
has coauthors; for commenters, check whether she is author and has papers
without authors of the paper she is acknowledged on.
"""
if isinstance(s, list):
return any(c in G.nodes() and len(list(G.neighbors(c))) > 0 for c in s)
else:
authors = set(s['auth'])
return any(c in G.nodes() and
len(set(G.neighbors(c)) - authors) > 0 for c in s['coms'])
|
ad5a4434a0a0510af719d4d0a964caf11c1a1d24
| 43,213 |
import logging
def handle_depricated_arguments(args):
"""Warn about depricated arguments, use them when possible."""
if hasattr(args, 'block') and args.block:
if hasattr(args, 'coverage'):
logging.warning("--block is depricated, using --coverage %s.",
args.block)
args.coverage = [args.block] # block is a string, coverage is a list
else:
logging.warning("--block is depricated, use --coverage instead.")
args.block = None
if hasattr(args, 'htmldir') and args.htmldir:
if hasattr(args, 'reportdir'):
logging.warning("--htmldir is depricated, using --reportdir %s.",
args.htmldir)
args.reportdir = args.htmldir
else:
logging.warning("--htmldir is depricated, use --reportdir instead.")
args.htmldir = None
if hasattr(args, 'srcexclude') and args.srcexclude:
if hasattr(args, 'exclude'):
logging.warning("--srcexclude is depricated, using --exclude %s.",
args.srcexclude)
args.exclude = args.srcexclude
else:
logging.warning("--srcexclude is depricated, "
"use --exclude instead.")
logging.warning("--srcexclude and --exclude use slight different "
"regular expressions.")
args.srcexclude = None
if hasattr(args, 'blddir') and args.blddir:
logging.warning("--blddir is depricated, ignoring --blddir.")
args.blddir = None
if hasattr(args, 'storm') and args.storm:
logging.warning("--storm is depricated, ignoring --storm.")
args.storm = None
return args
|
9e10e19e310a241c67c5136085a7fd26b24f60cc
| 43,218 |
def is_pft(df):
"""Check if df is a per-pft dataframe."""
col_names = df.columns.values
return 'Total' in col_names
|
57c5c8f7951f569411e9308809f31aea8a65c160
| 43,225 |
def drop(num, iterator):
"""Drop the first n elements on an iterator"""
try:
for _ in range(num):
next(iterator)
except StopIteration:
return iterator
return iterator
|
0e6f05b2a68410523d949e26037ed31dd0409088
| 43,227 |
def uniform(feature, bins):
"""Equal width bin, take a uniform distribution for the sample value range.
Args:
feature: pd.Series, model feature values.
bins: int, split bins of feature.
Returns:
the list of split threshold of feature.
"""
t = (feature.max()-feature.min())/bins
m = feature.min()
return [t*i+m for i in range(bins)]+[feature.max()]
|
1ea90dbc477457499a2ceb2c20ac9bca34e5e7a9
| 43,229 |
def identity(x, *arg, **kw):
""" Identity layer that returns the first input, ignores the rest arguments. """
return x
|
7b041be55defb0d9e82f0d028745b15b13ac9df5
| 43,232 |
def getBasePath(request):
""" Get base path where page dirs for attachments are stored. """
return request.rootpage.getPagePath('pages')
|
c640a83bea5109cfd8652cf3e2e6237f188a10e4
| 43,233 |
def HasAbstractFieldPath(abstract_path, store):
"""Whether a store contains abstract_path.
Makes no provision for repeated fields. I suppose if we did we'd
have it mean, that /any/ of the repeated subfields had such a
subpath but, we happen to not need it.
Args:
abstract_path: the path to test.
store: the store.
Returns:
Whether the store contains abstract_path.
"""
tokens = abstract_path.split(".")
for key in tokens:
if not isinstance(store, dict) or key not in store:
return False
store = store.get(key, {})
# It's ok (maybe) to even /prematurely/ reach the end of abstract_path.
return True
|
465f4c8cc98cf557c9c2036254b985b132071d93
| 43,234 |
def render_dashboard(category, tabs, prefix):
"""Renders a dashboard config string.
Follows this format:
{
name = 'dashboard_name'
dashboard_tab = [
tab('tab-name', 'test-group-name'),
...
]
}
"""
if '\'' in prefix:
raise ValueError(prefix)
if '\'' in category:
raise ValueError(category)
for tab in tabs:
if '\'' in tab:
raise ValueError(tab, tabs)
return """{
name = '%(prefix)s-%(category)s'
dashboard_tab = [
%(tabs)s
]
},""" % dict(
prefix=prefix,
category=category,
tabs='\n '.join('tab(\'%s\', \'%s\'),' % (tab, path)
for (tab, path) in sorted(tabs)))
|
64be8cab5e93f53ad2f9b46ed6c21d483b90def5
| 43,235 |
import pickle
def unpickle(filename: str) -> object:
"""
Unpickles a file and returns the object
"""
pickleIn = open(filename, "rb")
pickledObject = pickle.load(pickleIn)
pickleIn.close()
return pickledObject
|
891347cfc1f491a40d797332c2967f7b293630af
| 43,240 |
def highcharts_plot_view(context):
"""
Dependencies for highcharts_plot_view gizmo.
"""
return ('tethys_gizmos/vendor/highcharts/js/highcharts.js',
'tethys_gizmos/vendor/highcharts/js/highcharts-more.js',
'tethys_gizmos/vendor/highcharts/js/modules/exporting.js')
|
05be05ed63964d14e76576ba64134053b7a2745f
| 43,247 |
import itertools
def fast_forward_to_length(sequences, length):
"""
Return an itertools.dropwhile that starts from
the first sequence that has the given length.
>>> list(fast_forward_to_length([list(range(n)) for n in range(6)], 4))
[[0, 1, 2, 3], [0, 1, 2, 3, 4]]
"""
return itertools.dropwhile(lambda seq: len(seq) != length, sequences)
|
41650d1bede05d96bfb1c1ceb4b94eee2a1c6f53
| 43,248 |
import re
def parse_text_annotations(ann_file):
""" Parses BRAT annotations provided in the .ann file and converts them
to annotation spans of (start_position, end_position, entity_class).
Args:
ann_file (str): full path to the BRAT .ann file.
Returns:
annotations (list((int, int, str))): list of annotation spans.
Spans are triples of (start_offset, end_offset, entity_class)
where offset is relative to the text.
"""
annots = []
fann = open(ann_file, "r")
for line in fann:
cols = re.split(r"\s+", line.strip())
if not cols[0].startswith("T"):
continue
annots.append((int(cols[2]), int(cols[3]), cols[1]))
fann.close()
return annots
|
1537f2c044b4562bdc5b2ff89ee74254c399192f
| 43,249 |
def _evaluate_expression(frame, expression):
"""Helper function to evaluate expression in the context of input frame
and throw error if evaluation failed. The evaluated SBValue is returned.
"""
result_value = frame.EvaluateExpression(expression)
if result_value is None or (
result_value.GetError() and result_value.GetError().Fail()
):
raise Exception(
"Fail to evaluate {}: {}".format(
expression, result_value.GetError().GetCString()
)
)
return result_value
|
ef1e51443c0a22b61e1e0a0b9ea2703f8411321a
| 43,250 |
import hashlib
def gravatar(email, size=48):
"""
Simply gets the Gravatar for the commenter. There is no rating or
custom "not found" icon yet. Used with the Django comments.
If no size is given, the default is 48 pixels by 48 pixels.
Template Syntax::
{% gravatar comment.user_email [size] %}
Example usage::
{% gravatar comment.user_email 48 %}
"""
hash = hashlib.md5(email).hexdigest()
return """<img src="http://www.gravatar.com/avatar/%s?s=%s" width="%s"
height="%s" alt="gravatar" class="gravatar" />""" % (hash, size, size, size)
|
62a6e47047c5be668995ce8c283f424b2dd28594
| 43,252 |
def get_user_path(path, root):
"""
Gets the path used as the key in the database,
e.g. "/2017/2017 08-19 Yosemite"
:param path: path on the local disk where the photo or dir
is located
:param root: path on the local disk that is the root of all
photos and dirs
"""
user_path = path.lstrip(root)
if not user_path.startswith('/'):
user_path = "/{}".format(user_path)
return user_path
|
68d41043bf8cc3f168e50fc597e32d144d88655a
| 43,255 |
import math
def euc_dst(pnt0, pnt1):
"""return the distance between pnt0 and pnt1,
using the euclidean formula.
`pnts` are geographic and result is in meters.
Args:
pnt0 (list): an xyz data list
pnt1 (list): an xyz data list
Returns:
float: the distance beteween pnt0 and pnt1
"""
rad_m = 637100
distance = math.sqrt(sum([(a-b) ** 2 for a, b in zip(pnt0, pnt1)]))
return(rad_m * distance)
|
8bfb4cd2bb30e2c448e4ec9ea63e0cc7c655b50c
| 43,258 |
def get_int_ip(ip):
"""get ip address from ip/mask info
Args:
ip (str): ip with mask
Returns:
str: ip address
"""
return ip.split("/")[0]
|
15f7f6dd6b3a8dfdd6b664eba1487eeb33404130
| 43,261 |
def convert_coordinates(coords, stac=False):
"""
Converts footprint coordinates that have been retrieved from the metadata of source SLC scenes stored in an
:class:`~pyroSAR.drivers.ID` object OR a product extent retrieved using :func:`spatialist.vector.Vector.extent` to
either `envelop` and `center` for usage in the XML metadata files or `bbox` and `geometry` for usage in STAC
metadata files. The latter is returned if the optional parameter `stac` is set to True, else the former is returned.
Parameters
----------
coords: list[tuple(float, float)] or dict
List of coordinate tuple pairs as retrieved from an :class:`~pyroSAR.drivers.ID` objects of source SLC scenes
OR the product extent retrieved using :func:`spatialist.vector.Vector.extent` in the form of a dictionary with
keys: xmin, xmax, ymin, ymax
stac: bool, optional
If set to True, `bbox` and `geometry` are returned for usage in STAC metadata file. If set to False (default)
`envelop` and `center` are returned for usage in XML metadata files.
Returns
-------
envelop: str
Acquisition footprint coordinates for the XML element 'eop:Footprint/multiExtentOf'.
center: str
Acquisition center coordinates for the XML element 'eop:Footprint/centerOf'.
Notes
-------
If `stac=True` the following results are returned instead of `envelop` and `center`:
bbox: list[float]
Acquisition bounding box for usage in STAC Items. Formatted in accordance with RFC 7946, section 5:
https://datatracker.ietf.org/doc/html/rfc7946#section-5
geometry: dict
Acquisition footprint geometry for usage in STAC Items. Formatted in accordance with RFC 7946, section 3.1.:
https://datatracker.ietf.org/doc/html/rfc7946#section-3.1
"""
if isinstance(coords, (list, tuple)) and len(coords) == 4:
c = coords
x = [c[0][0], c[1][0], c[2][0], c[3][0]]
y = [c[0][1], c[1][1], c[2][1], c[3][1]]
xmin = min(x)
xmax = max(x)
ymin = min(y)
ymax = max(y)
elif isinstance(coords, dict) and len(coords.keys()) == 4:
xmin = coords['xmin']
xmax = coords['xmax']
ymin = coords['ymin']
ymax = coords['ymax']
x = [xmin, xmin, xmax, xmax]
y = [ymin, ymax, ymax, ymin]
else:
raise RuntimeError('Coordinates must be provided as a list of coordinate tuples OR as a dictionary with '
'keys xmin, xmax, ymin, ymax')
if stac:
bbox = [xmin, ymin, xmax, ymax]
geometry = {'type': 'Polygon', 'coordinates': (((x[0], y[0]), (x[1], y[1]), (x[2], y[2]), (x[3], y[3]),
(x[0], y[0])),)}
return bbox, geometry
else:
x_c = (xmax + xmin) / 2
y_c = (ymax + ymin) / 2
center = '{} {}'.format(y_c, x_c)
envelop = '{} {} {} {} {} {} {} {} {} {}'.format(y[0], x[0], y[1], x[1], y[2], x[2], y[3], x[3], y[0], x[0])
return center, envelop
|
361eeef4322fd1976f025e51835c049df23eafa7
| 43,265 |
import importlib
def _load_driver(backend, **kargs):
"""Load the correct backend driver for data persistent."""
bk_module = importlib.import_module('backend', __package__)
driver_cls = getattr(bk_module, str.capitalize(backend) + 'Backend')
return driver_cls(**kargs)
|
79066605cf32c99fa8d9b583d333c1b19d6b4a6d
| 43,276 |
import sqlite3
def get_schema(db):
"""
Get database's schema, which is a dict with table name as key
and list of column names as value
:param db: database path
:return: schema dict
"""
schema = {}
conn = sqlite3.connect(db)
cursor = conn.cursor()
# fetch table names
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [str(table[0].lower()) for table in cursor.fetchall()]
# fetch table info
for table in tables:
cursor.execute("PRAGMA table_info({})".format(table))
schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]
return schema
|
f50ec1eeb237a3c7be6eb34ffc8e197fed333811
| 43,278 |
def get_labels_for_ids(labels, ids, ids_are_one_indexed=False):
"""Get the human-readable labels for given ids.
Args:
labels: dict, string-ID to label mapping from ImageNet.
ids: list of ints, IDs to return labels for.
ids_are_one_indexed: whether to increment passed IDs by 1 to account for
the background category. See ArgParser `--ids_are_one_indexed`
for details.
Returns:
list of category labels
"""
return [labels[str(x + int(ids_are_one_indexed))] for x in ids]
|
bc39fe8e7ccaac9ba2abc8a5a2e2fa0a779c82bf
| 43,282 |
def div(a, b):
"""Divide a by b."""
return a / b
|
0a951296d520391c765f5867eb9d5000b4614aea
| 43,291 |
def sum_posts(kinesis_actors):
"""Sum all posts across an array of KinesisPosters
"""
total_records = 0
for actor in kinesis_actors:
total_records += actor.total_records
return total_records
|
e3198a37d4678383321e0624b6a2ffe2ca8cc038
| 43,294 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.