content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def parse_arxiv_json(data):
""" Parses the response from the arxiv_metadata collection in Solr
for the exact search on arxiv_identifier. It returns the published date of
the first version of the paper (i.e. if there are multiple versions, it
ignores the revision dates)"""
docs = data['response']['docs']
# There is only one result returned (num_rows=1 and that is the nature of the
# data, there is only one paper with 1 arxiv identifier). As a JSON array is
# returned and we only want the first date, we take only the first element of the
# array.
return docs[0].get('published_date')[0]
|
c012ad6d0fb9c63d56413adc22321edbea3efa85
| 154,890 |
def get_protocol(url, default="http"):
"""
Extracts the protocol, or returns value of default param (defaults to http)
"""
double_slash = url.find("//")
if double_slash == -1:
protocol = default
else:
protocol = url[:url.find('//')]
if protocol.endswith(':'):
protocol = protocol[:-1]
if protocol == "":
protocol = default
return protocol
|
d385ae97040744e2f2ffaea7d67c801d34098ae0
| 590,547 |
def _check_if_found(new_term: str, targets: list) -> bool:
"""
Checks if `new_term` matches `targets`.
:param new_term: string to check
:param targets: list of strings to match to `new_term`. Targets can be a list of specific targets, e.g.
['UBERON:123219', 'UBERON:1288990'] or of general ontology prefixes, e.g. ['UBERON']
:return:
"""
if (':' in targets[0]) and (new_term in targets): # specific
relation_found = True
elif (':' not in targets[0]) and (new_term.split(':')[0] in targets): # general
relation_found = True
else:
relation_found = False
return relation_found
|
05b4227c466e17942e3d1849a94af704c20880e7
| 495,967 |
def rotate(list, places):
"""Shift the elements in a list. A positive place will move the list
to the left, a negative place to the right."""
return list[places:] + list[:places]
|
d7c06f233d0f0946274f555b45b8ab0cb08fe62d
| 88,989 |
def ip4_subnet_c(ip):
"""returns C subnet of ipv4 address e.g.: 1.2.3.4 -> 1.2.3.0"""
return ".".join(ip.split(".")[:-1]) + ".0"
|
c7f68e5a1d82ea179d9ffd61eb46665cbb95593d
| 489,611 |
def has_value(value):
"""
We want values like 0 and False to be considered values, but values like
None or blank strings to not be considered values
"""
return value or value == 0 or value is False
|
1ffabaed22b2a1b83b89eb4d551c6e776a8e13c0
| 676,224 |
def repeat_list_elements(a_list, rep):
""" Creates a list where each element is repeated rep times. """
return [element for _ in range(rep) for element in a_list]
|
c503ecef4e29fe334cfa565ea1dab5fa20406b5c
| 447,769 |
def count_meme_entries(motif_path):
"""Count number of meme entries"""
with open(motif_path, "r") as f:
counter = 0
for line in f:
if line[:6] == "letter":
counter += 1
return counter
|
90c0cc72b2693c18a2d7f91451044fc096f71b90
| 477,891 |
def get_domains(contents_list):
"""
Return list of domains without <br/>
"""
return [item for item in contents_list if str(item) != '<br/>']
|
fcd90195dac03c486abdbb69504d3edb765d5437
| 326,610 |
from pathlib import Path
def broken_config(tmp_path):
"""
Writes a improperly formatted config.yml file into the directory
broken_config.
"""
# Create directory for the config (to avoid messing up the config
# in tmp_path used by other tests)
dir = Path(tmp_path, "broken_config")
dir.mkdir()
configpath = Path(dir, "config.yml")
# write broken file (we can't use yaml / ruamel for this, because
# it would throw an error writing the file)
# The broken line is the *.csv without quotes
with open(configpath, "w") as f:
f.write("files_to_ignore:\n")
f.write("- .DS_Store\n")
f.write("- .ipynb_checkpoints\n")
f.write("- *.csv\n")
return dir
|
02512fb3e38804420c5c969fb905b5d8cee716cd
| 247,202 |
def _validate_prefix(prefix, avoid):
"""
Validate an install prefix.
"""
if prefix.startswith(avoid):
rest = prefix[0:len(avoid)]
return rest != "/" and rest != ""
return True
|
6be541f65334a28ea09d6237f9e54cbfce9347f0
| 641,272 |
def cowsay(input_string):
"""Return ASCI art of a cow saying the input_string."""
output_string = " "
for letter in list(input_string):
output_string += "_"
output_string += "__\n< {} >\n ".format(input_string)
for letter in list(input_string):
output_string += "-"
output_string += "--\n"
output_string += (" \ ^__^\n" +
" \ (oo)\_______\n" +
" (__)\ )\/\\\n" +
" ||----w |\n" +
" || |\n")
return output_string
|
e692b429f54b1d7de96360a90adfc66fe56d0346
| 216,902 |
def _coerce_to_list(obj):
"""Return [obj] if obj is not a list, otherwise obj."""
if not isinstance(obj, list):
return [obj]
else:
return obj
|
0763f05cf6e781ca8847c61c45845351dc375691
| 470,012 |
def rgb2rgb01(r, g, b):
""" Convert between RGB [0-255] and RGB [0-1] """
return tuple([ float(i)/255. for i in [r,g,b] ])
|
4cd3d47c41de3927d8a0878130651cb714e44256
| 495,293 |
def global_flip_lr(boxes3d):
"""Flip y axis (y coordinate of the center position and yaw angle)"""
boxes3d['c'][:,1] *= -1
boxes3d['r'][:,2] *= -1
return boxes3d
|
01cc62960740c47d16070b0b863f3731fcf10edc
| 148,310 |
import shutil
def tempfiles(tmpdir, filespath):
"""Fixture for copying test files into a temporary directory"""
dest = tmpdir.join('testfiles')
shutil.copytree(filespath, str(dest))
return str(dest)
|
88dafc571a6c4bc678ce18463122abe994cb6dd0
| 601,403 |
def strings_to_ints(lst):
"""transforms a list of strings to a list of ints
ARGS:
list of strings
RETURNS:
list of ints
RAISES:
ValueError: when the string in the list is not a number
"""
return [int(x.strip()) for x in lst]
|
bf81cdcc03fa7750391594f25e46fcb99a229a9c
| 531,945 |
import hmac
def calculate_message_signature(key: str, msg: str):
"""
calculate a message signature using a SHA1 HMAC
:param key: the secret token
:param msg: the message
:return: the calculated HMAC
"""
return hmac.new(key.encode(), msg=msg.encode(), digestmod='sha1').hexdigest()
|
d38ca1d4c219cc3e3e47b87c0ad71e4921f63e93
| 624,413 |
from typing import List
def count_occupied_seats(layout: List[List[str]]) -> int:
"""
Count quantity of occupied seats in a given layout
:param layout: seat layout
:return: quantity of occupied seats
"""
occupied_seats = sum(sum(1 for seat in r if seat == '#') for r in layout)
return occupied_seats
|
64dd38f46ec8148d04dba5ee2bb2f8765ef8b6d0
| 356,220 |
def vb_scale(vb, p_a_r, doc_width, doc_height):
""""
Parse SVG viewbox and generate scaling parameters.
Reference documentation: https://www.w3.org/TR/SVG11/coords.html
Inputs:
vb: Contents of SVG viewbox attribute
p_a_r: Contents of SVG preserveAspectRatio attribute
doc_width: Width of SVG document
doc_height: Height of SVG document
Output: sx, sy, ox, oy
Scale parameters (sx,sy) and offset parameters (ox,oy)
"""
if vb is None:
return 1,1,0,0 # No viewbox; return default transform
else:
vb_array = vb.strip().replace(',', ' ').split()
if len(vb_array) < 4:
return 1,1,0,0 # invalid viewbox; return default transform
min_x = float(vb_array[0]) # Viewbox offset: x
min_y = float(vb_array[1]) # Viewbox offset: y
width = float(vb_array[2]) # Viewbox width
height = float(vb_array[3]) # Viewbox height
if width <= 0 or height <= 0:
return 1,1,0,0 # invalid viewbox; return default transform
d_width = float(doc_width)
d_height = float(doc_height)
if d_width <= 0 or d_height <= 0:
return 1,1,0,0 # invalid document size; return default transform
ar_doc = d_height / d_width # Document aspect ratio
ar_vb = height / width # Viewbox aspect ratio
# Default values of the two preserveAspectRatio parameters:
par_align = "xmidymid" # "align" parameter (lowercased)
par_mos = "meet" # "meetOrSlice" parameter
if p_a_r is not None:
par_array = p_a_r.strip().replace(',', ' ').lower().split()
if len(par_array) > 0:
par0 = par_array[0]
if par0 == "defer":
if len(par_array) > 1:
par_align = par_array[1]
if len(par_array) > 2:
par_mos = par_array[2]
else:
par_align = par0
if len(par_array) > 1:
par_mos = par_array[1]
if par_align == "none":
# Scale document to fill page. Do not preserve aspect ratio.
# This is not default behavior, nor what happens if par_align
# is not given; the "none" value must be _explicitly_ specified.
sx = d_width/ width
sy = d_height / height
ox = -min_x
oy = -min_y
return sx,sy,ox,oy
"""
Other than "none", all situations fall into two classes:
1) (ar_doc >= ar_vb AND par_mos == "meet")
or (ar_doc < ar_vb AND par_mos == "slice")
-> In these cases, scale document up until VB fills doc in X.
2) All other cases, i.e.,
(ar_doc < ar_vb AND par_mos == "meet")
or (ar_doc >= ar_vb AND par_mos == "slice")
-> In these cases, scale document up until VB fills doc in Y.
Note in cases where the scaled viewbox exceeds the document
(page) boundaries (all "slice" cases and many "meet" cases where
an offset value is given) that this routine does not perform
any clipping, but subsequent clipping to the page boundary
is appropriate.
Besides "none", there are 9 possible values of par_align:
xminymin xmidymin xmaxymin
xminymid xmidymid xmaxymid
xminymax xmidymax xmaxymax
"""
if (((ar_doc >= ar_vb) and (par_mos == "meet"))
or ((ar_doc < ar_vb) and (par_mos == "slice"))):
# Case 1: Scale document up until VB fills doc in X.
sx = d_width / width
sy = sx # Uniform aspect ratio
ox = -min_x
scaled_vb_height = ar_doc * width
excess_height = scaled_vb_height - height
if par_align in {"xminymin", "xmidymin", "xmaxymin"}:
# Case: Y-Min: Align viewbox to minimum Y of the viewport.
oy = -min_y
# OK: tested with Tall-Meet, Wide-Slice
elif par_align in {"xminymax", "xmidymax", "xmaxymax"}:
# Case: Y-Max: Align viewbox to maximum Y of the viewport.
oy = -min_y + excess_height
# OK: tested with Tall-Meet, Wide-Slice
else: # par_align in {"xminymid", "xmidymid", "xmaxymid"}:
# Default case: Y-Mid: Center viewbox on page in Y
oy = -min_y + excess_height / 2
# OK: Tested with Tall-Meet, Wide-Slice
return sx,sy,ox,oy
else:
# Case 2: Scale document up until VB fills doc in Y.
sy = d_height / height
sx = sy # Uniform aspect ratio
oy = -min_y
scaled_vb_width = height / ar_doc
excess_width = scaled_vb_width - width
if par_align in {"xminymin", "xminymid", "xminymax"}:
# Case: X-Min: Align viewbox to minimum X of the viewport.
ox = -min_x
# OK: Tested with Tall-Slice, Wide-Meet
elif par_align in {"xmaxymin", "xmaxymid", "xmaxymax"}:
# Case: X-Max: Align viewbox to maximum X of the viewport.
ox = -min_x + excess_width
# Need test: Tall-Slice, Wide-Meet
else: # par_align in {"xmidymin", "xmidymid", "xmidymax"}:
# Default case: X-Mid: Center viewbox on page in X
ox = -min_x + excess_width / 2
# OK: Tested with Tall-Slice, Wide-Meet
return sx,sy,ox,oy
return 1,1,0,0
|
fafd361fc29a49d30f3ee0092feca1ec91b99402
| 506,424 |
def swap(number, i1, i2):
"""
Swap given bits from number.
:param number: A number
:type number: int
:param i1: Bit index
:type i1: int
:param i2: Bit index
:type i2: int
:return: A number with given bits swapped.
:rtype: int
>>> swap(0b101011, 1, 4) == 0b111001
True
"""
b1 = (1 << i1)
b2 = (1 << i2)
v1 = number & b1
v2 = number & b2
if v1 and v2 or not (v1 or v2):
return number
if v1:
number ^= b1
number |= b2
else:
number |= b1
number ^= b2
return number
|
b02399ef5242a0e396a9e09e3dc807958d44faf4
| 404,698 |
def get_support(cluster):
"""
Returns support
>>> get_support({5: {'11111': ['ab', 'ac', 'df', 'bd', 'bc']},
... 4: {'11101': ['ef', 'eg', 'de', 'fg'], '11011': ['cd']},
... 3: {'11001': ['ad'], '10101': ['dg']},
... 2: {'10010': ['dh', 'bh'], '11000': ['be'], '10100': ['gh'],
... '10001': ['ce']},
... 1: {'00100': ['fh', 'eh'], '10000': ['hi']}})
[100.0, 80.0, 60.0, 40.0, 20.0]
"""
return [i * 100 / len(cluster) for i in cluster]
|
33082e6e89a6c116f93a8aed0d64f0e1fa2e74bb
| 451,488 |
def is_end_word(word):
"""
Determines if a word is at the end of a sentence.
"""
if word == 'Mr.' or word == 'Mrs.':
return False
punctuation = "!?."
return word[-1] in punctuation
|
9165fede070654b3d0b10b2bb02855307f9ab0c5
| 67,187 |
import math
def lmoments_parameter_estimation_generalized_logistic(lambda1, lambda2, tau):
"""Return the location, scale and shape or the generalized logistic distribution
Based on SUBROUTINE PELGLO of the LMOMENTS Fortran package version 3.04, July 2005
:param lambda1: L-moment-1
:param lambda2: L-moment-2
:param tau: L-moment-3 / L-moment-2
:return: (*float*) location, scale and shape
"""
assert lambda2 > 0 and -1 < -tau < 1
try:
k = -tau
a = math.sin(k * math.pi) / (k * math.pi)
s = lambda2 * a
m = lambda1 - (s / k) * (1.0 - 1.0 / a)
return m, s, k
except ZeroDivisionError:
return lambda1, lambda2, 0.0
|
d25b5c5891cdc00779c7a20458bae0c5dd061510
| 252,470 |
def is_same_scanner_position(*images):
"""Check that all images have the same unique scanner position.
Parameters
----------
images : sequence[MRI]
Returns
-------
True if all elements of the input set have the same unique scanner
position (which is not `None`).
"""
uids = [img.scanner_position for img in images
if img.scanner_position is not None]
uids = set(uids)
return len(uids) == 1
|
b75e9c790aefbf184675f8e29aebdcec4e12d21f
| 480,174 |
def sizeof(bsObj):
""" Size of BotSense object in bytes. Size is contextual by object type. """
return bsObj.__sizeof__()
|
70c55a9002e336d2d827ee21d04260dfc328d7fc
| 689,127 |
from typing import Any
import torch
def is_singleton_tensor(x: Any) -> bool:
"""Is x a dimensionless tensor?"""
return torch.is_tensor(x) and x.dim() == 0
|
6da89e669928f43a1a87a848b6a512f0c2df2bf2
| 394,258 |
def convert_attr_name(s):
"""Convert all underline in string name to space and capitalize
"""
return " ".join(map(str.capitalize, s.strip().split("_")))
|
6ba56ccef5091ae33588c9a48a244bfefebd9d45
| 607,089 |
def kilometers_to_miles(input_km_value):
"""Convert kilometers to miles.
:param input_km_value: Kilometer value to convert to miles
:type input_km_value: float, int
:return: Miles
"""
return input_km_value * 0.621371
|
9b57983029fe48aec49d45d8bebc74abbb6983c7
| 440,423 |
def format_fuzzy_result(es_result):
"""
format the es search result to front end processable format
@param es_result: the es search result
@type es_result: dict
@return: the front end processable format, while will be like this::
[{'compound_id': id, 'name': name},...]
@rtype: list
"""
compound_result = es_result['hits']['hits']
result = list()
if len(compound_result) != 0:
for compound_item in compound_result:
info = compound_item['_source']
compound_info = {
'compound_id': info["compound_id"],
'name': info['name'],
}
result.append(compound_info)
return result
|
d5ed35d0b11b60d1dfe4ef72b1c374584a8661b4
| 142,591 |
def get_element_tag(element):
"""Returns the tag name string without namespace of the passed element."""
return element.tag.rsplit('}')[1]
|
6b5ace48209a711db1c1d33e5acc28f7e245ea3e
| 634,263 |
def _convert_ITP_to_path_to_index(index_to_path):
"""convert index_to_path pandas series to path_to_index dict
----
index_to_path:(pandas.series)
returns
path_to_index:(dict) {path:[indices corresponding to that path]}
"""
path_to_index = dict()
for i in index_to_path.index:
index, path = i, index_to_path[i]
if path in set(path_to_index.keys()):
path_to_index[path].append(index)
else:
path_to_index.update({path:[index]})
return path_to_index
|
84f816b8af4e9cfa56e1d6c8100428e39b746419
| 312,146 |
import json
def js_r(file_path: str) -> dict:
"""
Read a .json file into a dictionary
Parameters
----------
file_path: str
the path to the .json file
Returns
-------
the .json file in dictionary format
"""
with open(file_path) as f_in:
return json.load(f_in)
|
5cfe7ce205f46c0de88357933a046e3317f68d68
| 371,777 |
def _get_action_profile(x, indptr):
"""
Obtain a tuple of mixed actions from a flattened action profile.
Parameters
----------
x : array_like(float, ndim=1)
Array of flattened mixed action profile of length equal to n_0 +
... + n_N-1, where `out[indptr[i]:indptr[i+1]]` contains player
i's mixed action.
indptr : array_like(int, ndim=1)
Array of index pointers of length N+1, where `indptr[0] = 0` and
`indptr[i+1] = indptr[i] + n_i`.
Returns
-------
action_profile : tuple(ndarray(float, ndim=1))
Tuple of N mixed actions, each of length n_i.
"""
N = len(indptr) - 1
action_profile = tuple(x[indptr[i]:indptr[i+1]] for i in range(N))
return action_profile
|
ffeb0a38f07d16079723beddf73ce090135af43c
| 626,742 |
def _get_all_osc(centers, osc_low, osc_high):
"""Returns all the oscillations in a specified frequency band.
Parameters
----------
centers : 1d array
Vector of oscillation centers.
osc_low : int
Lower bound for frequency range.
osc_high : int
Upper bound for frequency range.
Returns
-------
osc_cens : 1d array
Osc centers in specified frequency band.
"""
# Get inds of desired oscs and pull out from input data
osc_inds = (centers >= osc_low) & (centers <= osc_high)
osc_cens = centers[osc_inds]
return osc_cens
|
9199283080bd0111d8ca3cb74f4c0865de162027
| 13,903 |
def coin_snapshot(self, fsym, tsym):
"""
https://www.cryptocompare.com/api/#-api-data-coinsnapshot-
Keyword arguments:
fsym - The symbol of the currency you want to get that for
tsym - The symbol of the currency that data will be in.
"""
self._is_params_valid(fsym=fsym, tsym=tsym)
return self._fetch_data(self.COIN_SNAPSHOT_URL+'?fsym='+fsym+'&tsym='+tsym)
|
087e76cc56d9246da3293aacaccc1e5a847e79f2
| 339,755 |
def vumps_params(checkpoint_every=500, gauge_via_svd=True, gradient_tol=1E-3,
max_iter=200):
"""
Bundles parameters for the vumps solver itself.
PARAMETERS
----------
gradient_tol (float) : Convergence is declared once the gradient norm is
at least this small.
max_iter (int) : VUMPS ends after this many iterations even if
unconverged.
checkpoint_every (int) : Simulation data is pickled at this periodicity.
out_directory (string) : Output is saved here. The directory is created
if it doesn't exist.
gauge_via_svd (bool, True): With the Jax backend, toggles whether the gauge
match at the
end of each iteration is computed using
an SVD or the QDWH-based polar decomposition.
The former is typically faster on the CPU
or TPU, but the latter is much faster on the
GPU. With the NumPy backend, this
parameter has no effect and the SVD is always
used.
RETURNS
-------
A dictionary storing each of these parameters.
"""
return {"checkpoint_every": checkpoint_every,
"gauge_via_svd": gauge_via_svd,
"gradient_tol": gradient_tol, "max_iter": max_iter}
|
9b5bd7f4f9f560a3991c1205ca1549d5eb12dabf
| 143,703 |
def finite_diff_gradient_descent(f, begin, end, x0=None, niters=10, lr=1):
"""Find the local minima using gradient descent
Parameters:
f (function): Function to find the local minima
begin (int): beginning of the interval
end (int): end of interval
x0 (int): initial point
niters (int): number of iterations
lr: learning rate
"""
eps = (end-begin)/1000
if x0 is None:
x0 = (begin + end) / 2
x = x0
for i in range(niters):
df = (f(x+eps)-f(x-eps))/(2*eps)
x -= lr*df
return x
|
fa1738c0ccb82f353053a6654b0856900b8f0996
| 219,322 |
def _get_module_src_file(module):
"""
Return module.__file__, change extension to '.py' if __file__ is ending with '.pyc'
"""
return module.__file__[:-1] if module.__file__.endswith(".pyc") else module.__file__
|
8fe71809a1d3c1ff4efe01c740e9724c2dbf702a
| 213,510 |
def find_index_list(inputlist, key):
"""get a list of index for key in inputlist"""
start = 0
indexlist = []
while 1:
try:
index = inputlist.index(key, start)
except:
break
indexlist.append(index)
start = index + 1
return indexlist
|
453365cc45c927446f08b9d3f448fcd62c54f6da
| 57,689 |
def parse_test_names(test_name_args):
"""Returns a dictionary mapping test case names to a list of test functions
:param test_name_args: The parsed value of the ``--test`` or ``--skip``
arguments
:return: None if ``test_name_args`` is None, otherwise return a dictionary
mapping test case names to a list of test functions to run. If list is
empty, no specific function was given for that class
"""
if test_name_args is None:
return None
class_map = {}
for test_name in test_name_args:
# Split <module>.[<function>]
test_name_parts = test_name.split('.')
class_map.setdefault(test_name_parts[0], [])
# If a function was specified, append it to the list of functions
if len(test_name_parts) > 1:
class_map[test_name_parts[0]].append(test_name_parts[1])
return class_map
|
a1c5aed647f9b77289c1c23b16b3703b722cb432
| 600,554 |
def _ValidateRepoToDepPathConfig(repo_to_dep_path):
"""Checks that the repo_to_dep_path is properly formatted.
Args:
repo_to_dep_path (dict): A dictionary mapping repository url to its
chromium repo path.
For example:
{
"https://boringssl.googlesource.com/boringssl.git":
"src/third_party/boringssl/src",
"https://chromium.googlesource.com/android_ndk.git":
"src/third_party/android_ndk",
"https://chromium.googlesource.com/angle/angle.git":
"src/third_party/angle",
...
}
Returns:
True if ``repo_to_dep_path`` is properly formatted, False otherwise.
"""
if not isinstance(repo_to_dep_path, dict):
return False
return True
|
1ac03d9c3aa20f2c988cf9990202cbe34ff3ed2a
| 383,906 |
def _pad_with_nulls(data, len_):
""" Pad string with null bytes.
Parameters
----------
data : str/bytes
the string/bytes to pad
len_ : int
the final desired length
"""
return data + (b'\x00' * (len_ - len(data)))
|
238cc6c7b883d087cfa09131eec2b463054f791e
| 662,260 |
def ensure_keys(dict_obj, *keys):
"""
Ensure ``dict_obj`` has the hierarchy ``{keys[0]: {keys[1]: {...}}}``
The innermost key will have ``{}`` has value if didn't exist already.
"""
if len(keys) == 0:
return dict_obj
else:
first, rest = keys[0], keys[1:]
if first not in dict_obj:
dict_obj[first] = {}
dict_obj[first] = ensure_keys(dict_obj[first], *rest)
return dict_obj
|
e8d87444ed8961d8d650b49c8670dca1496623b1
| 29,550 |
import io
def read_file(file):
"""Open and read file input."""
f = io.open(file, 'r', encoding='utf-8')
text = f.read()
f.close()
return text
|
8c5e65f59e0475473c29798a8aa10d038628a9b4
| 51,459 |
def fst(pair):
""" First of a pair."""
return pair[0]
|
62f207e03b07a731bb9616c27d9393b2eb8ff2a1
| 485,813 |
def calculate_test_values(
total_words, ocr_recognized_words,
tp, tn, fn
):
"""
Calculates the model test values :
TP : True Positive (There are words and every word has been recognized)
TN : True Negative (There is no word and no word has been recognized)
FP : False Positive (There is no word but a word (or more) has been recognized)
FN : False Negative (There are words and NOT every word has been recognized)
"""
if total_words == 0:
tn += 1
else:
if ocr_recognized_words/total_words == 1:
tp += 1
else:
fn += 1
return (tp, tn, fn)
|
e0de958ff308ac3c6a1425203ff3b92b1ecb5fca
| 32,265 |
def get_message_ids(service):
"""
Returns all the message ids from the user
'INBOX'.
"""
msg_list = service.users().messages().list(userId='me',
labelIds='INBOX').execute()
message_ids = msg_list['messages']
return message_ids
|
24c8a2ccbef25449487171895df8555db4b875e8
| 192,530 |
def get_square_color(box, x, y, side):
"""Gets the color of the square with its top left corner at (x, y) and with the sides being a length of side
Returns None if they are not all the same color or if side == 1"""
colors = set()
for i in range(side-1):
for j in range(side-1):
colors.add(box[x + i][y + j])
if len(colors) == 1:
return colors.pop()
else:
return None
|
2841b25ce03046cda61f4af43ac14d7f14c0629a
| 514,491 |
def consume_byte(content, offset, byte, length=1):
"""Consume length bytes from content, starting at offset. If they
are not all byte, raises a ValueError.
"""
for i in range(length-1):
if content[offset + i:offset + i+1] != byte:
raise ValueError(("Expected byte '0x%s' at offset " +
"0x%x but received byte '0x%s'.") % (byte.hex(), offset+i,
content[offset + i:offset + i+1].hex()))
return offset + length
|
1ff372d2ae0766aedbddefae86c52851d6c172ce
| 134,276 |
def merge_qs_maps(obj1, obj2):
"""
Merge queryset map in `obj2` on `obj1`.
"""
for model, [qs2, fields2] in obj2.items():
query_field = obj1.setdefault(model, [model.objects.none(), set()])
query_field[0] |= qs2 # or'ed querysets
query_field[1].update(fields2) # add fields
return obj1
|
d8d669eea385c73ac0322519dde27227fc737772
| 526,310 |
def generate_factor_list(factor_list, df):
"""
Create a dictionary that contains the unique values of different
factors based on the input dataset for visualizations toggling.
Args:
factor_list(list): List containing
factors the user want to toggle when interacting with
the visualizations.
df(dataframe): Dataframe that is used to
calculate the unique values of each factor.
Returns:
the dictionary produced
"""
factors = dict()
for _, key in enumerate(factor_list):
factors[key] = df[key].value_counts().index.values
return factors
|
a7311e8f000e324c33f49f2766192d2f88e74918
| 415,950 |
def forward_box(box):
"""Increase box level (max 4)
Parameters
----------
box: int
question box level
Returns
-------
int: updated box
"""
if box < 4:
box += 1
return box
|
ae0de5b0821e8bde81063471f1f3768022d1856e
| 104,249 |
def _quadratic_bezier(y_points, t):
"""
Makes a single quadratic Bezier curve weighted by y-values.
Parameters
----------
y_points : Container
A container of the three y-values that define the y-values of the
Bezier curve.
t : numpy.ndarray, shape (N,)
The array of values between 0 and 1 defining the Bezier curve.
Returns
-------
output : numpy.ndarray, shape (N,)
The Bezier curve for `t`, using the three points in `y_points` as weights
in order to shift the curve to match the desired y-values.
References
----------
https://pomax.github.io/bezierinfo (if the link is dead, the GitHub repo for the
website is https://github.com/Pomax/BezierInfo-2).
"""
one_minus_t = 1 - t
output = (
y_points[0] * one_minus_t**2 + y_points[1] * 2 * one_minus_t * t + y_points[2] * t**2
)
return output
|
7c5cf27ce2fadb0843039729dc0f01473dfa946c
| 696,382 |
def all_1_bits() -> int:
"""
Returns:
Examples:
>>> all_1_bits()
-1
>>> bin(all_1_bits())
'-0b1'
"""
return ~0
|
174bef7cd9315152b2bd4232352724bb6de32660
| 170,644 |
def ignore(name, *names):
"""
Function decorator used to ignore certain parameters of your function
when it is converted to command. You can specify one or more parameters
to ignore.
Parameters
----------
name : str
Name of the parameter.
names :
Specify more parameters to ignore.
"""
def decorator(func):
all_names = set((name, ) + names)
if hasattr(func, 'ignore'):
func.ignore = func.ignore.union(all_names)
else:
func.ignore = all_names
return func
return decorator
|
72b14dd6cd7490c1b34b4fd12af6d3a963023c64
| 456,500 |
def checkBaseClassesMatch(bases, typename):
"""Recursively check if the name of the types in bases (or parents) are equal to typename
| *Args:*
| bases - A tuple of types
| typename : str - A name of a type
| *Returns:*
| True if any of the names of types in bases or any of their parent bases equals typename,
| False otherwise
"""
for baseClass in bases:
if (typename == baseClass.__name__):
return True
else:
if checkBaseClassesMatch(baseClass.__bases__, typename):
return True
|
f71a26fa9dd1af65aff54c5b850624177636c679
| 191,294 |
def get_equipped_in_slot(actor, slot):
"""
Returns Equipment in a slot, or None.
"""
if hasattr(actor, 'inventory'):
for obj in actor.inventory:
if obj.equipment and obj.equipment.slot == slot and obj.equipment.is_equipped:
return obj.equipment
return None
|
68afd2af8fe7418756ba3b40cb40918be0559cd7
| 575,804 |
def my_reverse(L):
"""
Accepts a list `L` and reverses its elements. Solves problems 1 & 2.
Parameters
----------
L : list
The list to be reversed.
Returns
-------
revL : list
The reversed list.
"""
# Initialisations
revL = list() # The empty list to be appended to
for i in range(len(L)): # Insert the `i`-th element of `L` to the front of `revL`
revL.insert(0, L[i])
return revL
|
8bb582c4bd41923120899b17cd34f3302a3fdec2
| 387,011 |
def GetExamples(node):
"""Returns the examples (if any) for some Unit node."""
return node.examples
|
592a6bcd01469ab02c8a3a761d18c50408274405
| 217,780 |
def get_Id_Iq(self):
"""Return Id and Iq
Parameters
----------
self : OPdq
An OPdq object
Returns
-------
I_dict : dict
Dict with key "Id", "Iq"
"""
return {"Id": self.Id_ref, "Iq": self.Iq_ref}
|
cba09701c3cd1fb9dda0a560c14819560a38b48e
| 598,639 |
from typing import Tuple
def split_str_date_dt_sc(str_date: str) -> Tuple:
"""Split string date to date and time.
:param str_date: String date in the format YYYY-MM-DD
:return: Tuple with date and time
"""
if ' ' not in str_date:
dt = str_date
sc = '00:00:00'
else:
dt, sc = str_date.split()
return dt, sc
|
67854369cbac20370d658bcfd2c2356049606236
| 153,712 |
def bubble_sort(array):
"""
Sort array in ascending order by bubble sort
Bubble sort, sometimes referred to as sinking sort,
is a simple sorting algorithm that repeatedly steps
through the list, compares adjacent pairs and swaps
them if they are in the wrong order.
- Best-case time performance: O(n)
- Worst-case time performance: O(n^2)
- Average time performance: O(n^2)
- Worst-case space complexity: O(1)
:param array: given unsorted array
:type array: list
:return: sorted array in ascending order
:rtype: list
"""
# traverse through all array elements
for i in range(len(array)):
# traverse the array from 0 to n-i-1
# last i elements are already in place
for j in range(len(array) - i - 1):
# swap if the element found is greater
# than the next element
if array[j] > array[j + 1]:
array[j], array[j + 1] = array[j + 1], array[j]
return array
|
6947d7a3fb646f6cb44b11359fbf4bcede35e584
| 352,088 |
def get_strip_square(image, idx):
"""gets the idxth square from a horizontal strip of images"""
square_size = image.size[1]
x_start = square_size * idx
result = image.crop((x_start, 0, x_start + square_size, square_size))
result.load()
return result
|
6640f79b38b56b80ea6ae38a404d4fb136424dd0
| 170,169 |
def create_cmd(parts):
"""Join together a command line represented as list"""
sane_parts = []
for part in parts:
if not isinstance(part, str):
# note: python subprocess module raises a TypeError instead
# of converting everything to string
part = str(part)
sane_parts.append(part)
return ' '.join(sane_parts)
|
c7fe6e58d32c96fe4d70a3cc1290638170351b88
| 636,123 |
def get_login_url(client_id: str, redirect_uri: str) -> str:
"""
Returns the url to the website on which the user logs in
"""
return f"https://login.live.com/oauth20_authorize.srf?client_id={client_id}&response_type=code&redirect_uri={redirect_uri}&scope=XboxLive.signin%20offline_access&state=<optional;"
|
924732c531287d592607f8cb8741b5e620fec8d9
| 441,058 |
def eval_input(variable, table):
"""
Requests user input for given variable and assigns it to a copy of the
table
"""
copy_table = table.copy()
input_val = int(input("Enter value for " + variable + ": "))
copy_table[variable] = input_val
return copy_table
|
bfd0be3a599a81218462ccabe3b0c8f127fc19e2
| 363,981 |
import torch
def from_onehot(onehot, dim=-1, dtype=None):
"""Argmax over trailing dimension of tensor ``onehot``. Optional return
dtype specification."""
indexes = torch.argmax(onehot, dim=dim)
if dtype is not None:
indexes = indexes.type(dtype)
return indexes
|
c011060789ec4eeb46a3aae29be4691ba215f313
| 215,830 |
from typing import Any
import io
def is_file_like(value: Any) -> bool:
"""Check if a value represents a file like object"""
return isinstance(value, io.IOBase)
|
291d72b0ef930951872928a769b7126e35576cdd
| 647,994 |
def to_secs(x: str) -> int:
"""Convert time from hh:mm:ss (str) format to seconds (int).
"""
h, m, s = x.split(':')
return int(h) * 3600 + int(m) * 60 + int(s)
|
56ea0efb12ddb287c4cb218e1625d497d1f1a8a0
| 532,928 |
def create_mae_rescaled(scale_factor):
"""Create a callback function which tracks mae rescaled
Arguments:
scale_factor: Scaling factor with which the labels were scaled initially
"""
def mae_rescaled(y_true, y_pred):
difference = abs(y_pred - y_true)
return difference / scale_factor
return mae_rescaled
|
70380a2845f58e4ddb80252819c0e5aaa8521e43
| 247,728 |
def filter_arglist(args, defaults, bound_argnames):
"""
Filters a list of function argument nodes (``ast.arg``)
and corresponding defaults to exclude all arguments with the names
present in ``bound_arguments``.
Returns a pair of new arguments and defaults.
"""
new_args = []
new_defaults = []
required_args = len(args) - len(defaults)
for i, arg in enumerate(args):
if arg.arg not in bound_argnames:
new_args.append(arg)
if i >= required_args:
new_defaults.append(defaults[i - required_args])
return new_args, new_defaults
|
d6346cfdc7a8579411223f11fcfc946dc4ac4a10
| 689,991 |
def only_sig(row):
"""Returns only significant events"""
if row[-1] == 'yes':
return row
|
12ff2d7b5cea4a01f3ecbba81dc1f4f982f7bd5a
| 109,229 |
def rowmul(matlist, index, k, K):
"""
Multiplies index row with k
"""
for i in range(len(matlist[index])):
matlist[index][i] = k * matlist[index][i]
return matlist
|
aa365da33d80e2cb90c14e2b1af1a19cbfacb947
| 147,612 |
from typing import List
from typing import Any
def _partition(items: List[Any], left: int, right: int) -> int:
"""Partitions the array around the pivot value.
Picks a pivot value and sorts everything to the left <= pivot,
and everything greater than pivot to the right.
At the end, the pivot will be in its correct position.
Example: [100, 5, -2, 1, 52, 3, 4]
1. Pick a pivot value. (rightmost item)
2. Set a pointer (i) to the left -- this is the greatest element.
3. Iterate until the pivot.
3.1 If we find an element <= pivot
3.2 Swap it with the greater value (i)
3.3 Increment greater element pivot (i)
4. Swap the pivot with the greater element pointer
5. Return the pivot position.
Args:
items: The list of items to sort.
left: The leftmost index.
right: The rightmost index.
Returns: The index of the pivot value.
"""
# Choose rightmost element as pivot.
pivot_val = items[right]
# Greater element pointer
# We know the pointer must point to something greater
# because if not it would have been swapped.
i = left
for j in range(left, right):
if items[j] <= pivot_val:
# An item smaller than the pivot was found:
# Swap it with the greater element (i)
items[i], items[j] = items[j], items[i]
i += 1
# Swap the pivot element with the greater element
items[i], items[right] = items[right], items[i]
# Return position where partition finished
return i
|
4d28a10efc8034be97bc693956686c0f5d2db537
| 277,043 |
def sorted_equality(v1, v2, read):
"""
Equality for fields where the values must be sorted before equality tested.
"""
return sorted(v1) == sorted(v2)
|
ff9a4cfd917b04dda5655ef2c28489f9fa46626c
| 567,501 |
def get_model(dataset, model_hparams):
"""Return the dataset class with the given name
Args:
dataset (str): name of the dataset
model_hparams (dict): model hyperparameters
"""
if model_hparams['model'] not in globals():
raise NotImplementedError("Dataset not found: {}".format(model_hparams['model']))
model_fn = globals()[model_hparams['model']]
return model_fn(dataset, model_hparams)
|
106413e5874a90322da047062a822c027905b138
| 289,151 |
import json
import requests
def metadata_post_request(file_name, metadata, auth_parameter, url):
"""
Used to structure and make the post request for metadata to Zenodo.
Parameters
----------
file_name : str
The name of the file to be created.
metadata : dict
The PresQT metadata file to be created.
auth_parameter : dict
The Zenodo authentication parameter.
url : str
The url to issue the post request.
Returns
-------
The status code of the request.
"""
# Prepare the request values
data = {'name': file_name}
metadata_bytes = json.dumps(metadata, indent=4).encode('utf-8')
files = {'file': metadata_bytes}
# Make the request
response = requests.post(url, params=auth_parameter, data=data, files=files)
return response.status_code
|
150d342257cdd9ee7a3576878a17aced2be9faa3
| 515,318 |
import re
def uncamel(s):
"""Convert CamelCase to underscore_case."""
return re.sub('(?!^)([A-Z])(?=[^A-Z])', r'_\1', s).lower()
|
4192de6b8cdf5c2b7cd380e40e662bb3f7892152
| 344,052 |
import re
def _GenerateSensitiveWordsRe(words):
"""Returns the regexp for matching sensitive words.
Args:
words: A sequence of strings.
Returns:
A pattern object, matching any of the given words. If words is empty,
returns None.
"""
if not words:
return None
union = []
for word in words:
union.append(word)
union.append(word.capitalize())
union.append(word.upper())
return re.compile(u'(%s)' % u'|'.join(union))
|
4ee004d96b57480d307a1f9a450600d30ed12741
| 223,360 |
def get_bill_to_address(user):
"""
Create an address appropriate to pass to billTo on the CyberSource API
Args:
user (User): the user whose address to use
Returns:
dict:
User's legal_address in the appropriate data structure
"""
legal_address = user.legal_address
# minimally require fields
billing_address = {
"firstName": legal_address.first_name,
"lastName": legal_address.last_name,
"email": user.email,
"street1": legal_address.street_address_1,
"street2": legal_address.street_address_2,
"street3": legal_address.street_address_3,
"street4": legal_address.street_address_4,
"city": legal_address.city,
"country": legal_address.country,
}
# these are required for certain countries, we presume here that data was validated before it was written
if legal_address.state_or_territory:
# State is in US-MA format and we want that send part
billing_address["state"] = legal_address.state_or_territory.split("-")[1]
if legal_address.postal_code:
billing_address["postalCode"] = legal_address.postal_code
return billing_address
|
8558a4e2185d4f634695bf4d8248b744c17cc557
| 57,360 |
def fold_whoopsies(whoopsies1, whoopsies2):
""" Merge whoopsies2 into whoopsies1
sorted on query, then descending on magnitude
of the whoops (so biggest whoops for queries come first)"""
whoopsies1.extend(whoopsies2)
whoopsies1.sort(key=lambda x: (x.qid, 1000-x.magnitude()))
return whoopsies1
|
078532ce2ec15c2e2c27fb935b7b7e634adce636
| 344,635 |
def set_ipu_model_options(opts, compile_ipu_code=True):
"""Set the IPU Model options.
Args:
compile_ipu_code: Whether or not to actually compile real IPU code for
modelling.
Returns:
The IpuOptions configuration protobuf, with IPU model options set.
"""
opts.ipu_model_config.compile_ipu_code = compile_ipu_code
return opts
|
d5e9577fb9ebad81b6fedb1988561197dbd3028e
| 697,255 |
def _ta_append(tensor_array, value):
"""Append a value to the end of a tf.TensorArray."""
return tensor_array.write(tensor_array.size(), value)
|
1bad0472a2c1e51c7563608c8d2439c18026f9b6
| 190,958 |
def build_cmd(*args, **kwargs):
"""
>>> build_cmd('script.py', 'train', model_pickle='tmp.pkl', shuffle=True)
'script.py train --model_pickle "tmp.pkl" --shuffle'
"""
options = []
for key, value in kwargs.items():
if isinstance(value, bool):
if value:
options.append("--%s" % key)
else:
options.append("--no_%s" % key)
elif isinstance(value, int) or isinstance(value, float):
options.append("--%s %s" % (key, value))
else:
options.append('--%s "%s"' % (key, value))
return " ".join(list(args) + options)
|
c6adf3618b4541d54c0c005e6fcab99288c1f1b2
| 244,552 |
def StripSo(name):
"""Strip trailing hexidecimal characters from the name of a shared object.
It strips everything after the last '.' in the name, and checks that the new
name ends with .so.
e.g.
libc.so.ad6acbfa => libc.so
foo.bar.baz => foo.bar.baz
"""
stripped_name = '.'.join(name.split('.')[:-1])
if stripped_name.endswith('.so'):
return stripped_name
return name
|
ac7914867388e0a84f0252be5c29a8ceecc7b9b6
| 312,706 |
def any_ga(geoawi):
"""
GEOAWI: GA W/I GRID
0=None
1=Quest
2=<I2
3=<O2
4=<1/2 DA
5=<1DA
6=<2DA
7=>2DA
8=CG
Returns:
0, 1, 88
"""
if geoawi == 0:
return 0
elif 1 <= geoawi <= 7:
return 1
elif geoawi == 8:
return 88
else:
raise KeyError('geoawi: %s' % geoawi)
|
1db941f44a23f8ec99fb27efd395cc12c8902544
| 669,299 |
def transpose(table):
"""
Returns a copy of table with rows and columns swapped
Example:
1 2 1 3 5
3 4 => 2 4 6
5 6
Parameter table: the table to transpose
Precondition: table is a rectangular 2d List of numbers
"""
result = [] # Result (new table) accumulator
# Loop over columns
# Add each column as a row to result
return result
|
fe84714d3e09deb22058fd75ac3333c2206f77c3
| 704,561 |
def shake_drop_eval(x, mask_prob, alpha_min, alpha_max):
"""ShakeDrop eval pass.
See https://arxiv.org/abs/1802.02375
Args:
x: input to apply ShakeDrop to
mask_prob: mask probability
alpha_min: alpha range lower
alpha_max: alpha range upper
Returns:
"""
expected_alpha = (alpha_max + alpha_min) / 2
# See Eqn 6 in https://arxiv.org/abs/1802.02375
x = (mask_prob + expected_alpha - mask_prob * expected_alpha) * x
return x
|
ee19a07eaf42d4be3fcccf2850109044eea1c826
| 457,431 |
def measure(obj, depth=0):
"""
Returns the number of nodes, properties and the depth of an inspect tree.
`obj` is a dict read from JSON that represents inspect data
"""
nodes = 0
properties = 0
max_depth = depth
for (_, child) in obj.items():
# ensure this is a node that is not a histogram
if type(child) is dict and 'buckets' not in child:
(child_nodes, child_properties, child_depth) = measure(
child, depth=depth + 1)
nodes += child_nodes + 1
properties += child_properties
max_depth = max(max_depth, child_depth)
continue
properties += 1
return nodes, properties, max_depth
|
cd7a4d7a7d2a2fea41a09b1edd5b1cb1a4403bea
| 124,640 |
def shrink_sides(image, ts=0, bs=0, ls=0, rs=0):
"""Shrinks/crops the image through shrinking each side of the image.
params:
image: A numpy ndarray, which has 2 or 3 dimensions
ts: An integer, which is the amount to shrink the top side
of the image
bs: An integer, which is the amount to shrink the bottom side
of the image
ls: An integer, which is the amount to shrink the left side
of the image
rs: An integer, which is the amount to shrink the right side
of the image
return: A numpy ndarray, which has the same number of dimensions as image
"""
return image[ts:image.shape[0] - bs, ls:image.shape[1] - rs]
|
6858a75516626affb3d65b9c8aad8bd207cfe495
| 54,186 |
import re
def opensearch_clean(f):
"""
Some opensearch clients send along optional parameters from the opensearch
description when they're not needed. For example:
state={openoni:state?}
These can cause search results not to come back, and even can cause Solr's
query parsing to throw an exception, so it's best to remove them when
present.
"""
def f1(request, **kwargs):
new_get = request.GET.copy()
for k, v in list(new_get.items()):
if type(v) == str and re.match(r'^\{.+\?\}$', v):
new_get.pop(k)
request.GET = new_get
return f(request, **kwargs)
return f1
|
862bf8cbb9a2629949746a92b78b3b23bdfd7c49
| 38,273 |
def GetLoans(sliver_name):
"""Return the list of loans made by the specified sliver"""
rec = sliver_name
return rec.get('_loans', [])[:]
|
4736034a729a7135e851110b1f57c996367b7232
| 577,154 |
def humanize_seconds(seconds):
"""
Returns a humanized string representing time difference
between now() and the input timestamp.
The output rounds up to days, hours, minutes, or seconds.
4 days 5 hours returns '4 days'
0 days 4 hours 3 minutes returns '4 hours', etc...
"""
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
if hours > 0:
if hours == 1: return "{0} hour".format(hours)
else: return "{0} hours".format(hours)
elif minutes > 0:
if minutes == 1:return "{0} minute".format(minutes)
else: return "{0} minutes".format(minutes)
elif seconds > 0:
if seconds == 1:return "{0} second".format(seconds)
else: return "{0} seconds".format(seconds)
else:
return None
|
22051f1467ebffa89d061cb5301b789338f69eb5
| 521,479 |
import types
def is_object_module(obj):
"""
Test if the argument is a module object.
:param obj: Object
:type obj: any
:rtype: boolean
"""
return isinstance(obj, types.ModuleType)
|
b9dc22543b19be02f0cd2ac396370affcd0e3aeb
| 643,223 |
def convert_to_list(obj):
""" receives an object and if type tuple or list, return list. Else
return a list containing the one object
"""
if type(obj) is None: return [] # None implies empty list...
if type(obj) is list: return obj
if type(obj) is tuple:
return [x for x in obj]
return [obj]
|
aa78ea08f06ffab91feead5898a4433807966c02
| 622,964 |
def fibi(n: int) -> int:
"""Fibonacci numbers saving just two previous values
>>> fibi(20)
6765
>>> fibi(1)
1
>>> fibi(2)
1
>>> fibi(3)
2
"""
if n == 0:
return 0
if n == 1:
return 1
f_n2, f_n1 = 1, 1
for _ in range(3, n+1):
f_n2, f_n1 = f_n1, f_n2+f_n1
return f_n1
|
1e7d331e5572c5a1886c31e7d51d0d3d9710c37f
| 390,711 |
def interval_overlap_length(i1,i2):
"""Compute the length of overlap of two intervals.
Parameters
----------
i1, i2 : pairs of two floats
The two intervals.
Returns
-------
l : float
The length of the overlap between the two intervals.
"""
(a,b) = i1
(c,d) = i2
if a<c:
if b<c:
return 0.
elif b<d:
return b-c
else:
return d-c
elif a<d:
if b<d:
return b-a
else:
return d-a
else:
return 0
|
a22fdd3cf76a503700055bc4077ef9584b6b394b
| 640,149 |
def create_mock_resource_resp(dm):
"""Given an instance of Deployment, transforms into resource info as API response."""
return {
"name": dm.name,
"insertTime": dm.insert_time,
"properties": "zone: " + dm.zone,
}
|
6c140a2389b220b26f231c066e2d7fe108f7b117
| 599,368 |
from typing import List
def get_characters_from_file(file_path: str) -> List[str]:
"""
Opens the specified file and retrieves a list of characters.
Assuming each character is in one line.
Characters can have special characters including a space character.
Args:
file_path (str): path to the file
Returns:
List[str]: List of character names
"""
characters = []
with open(file_path, 'r') as characters_file:
characters = [
# Remove leading/trailing spaces
character_name.strip()
for character_name in characters_file.readlines()
# It may contain empty lines or comments
if character_name.strip() and character_name[0] != '#'
]
return characters
|
50fda9d3e4b2e1dd5724174549967165879dcc14
| 687,413 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.