content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import copy
def threshold(
array,
thresh_max=None,
thresh_min=None,
val_max=None,
val_min=None,
inPlace_pref=False):
"""
Thresholds values in an array and sets them to defined values
RH 2021
Args:
array (np.ndarray): the mean position (X, Y) - where high value expected. 0-indexed. Make second value 0 to make 1D gaussian
thresh_max (number, scalar): values in array above this are set to val_max
thresh_min (number, scalar): values in array below this are set to val_min
val_max (number, scalar): values in array above thresh_max are set to this
val_min (number, scalar): values in array above thresh_min are set to this
inPlace_pref (bool): whether to do the calculation 'in place', and change the local input variable directly
Return:
output_array (np.ndarray): same as input array but with values thresheld
"""
if val_max is None:
val_max = thresh_max
if val_min is None:
val_min = thresh_min
if inPlace_pref:
output_array = array
else:
output_array = copy.deepcopy(array)
if thresh_max is None:
output_array[output_array < thresh_min] = val_min
elif thresh_min is None:
output_array[output_array > thresh_max] = val_max
else:
output_array[output_array < thresh_min] = val_min
output_array[output_array > thresh_max] = val_max
return output_array
|
3f60c92e89a982b5037aeb6106602334a7c2673c
| 39,622 |
def read_txt_file(path):
"""
For a given .txt path, reads the file and returns a string
:param path: The path to the .txt file
:return: string of the file
"""
file = open(path, "r", encoding='utf-8')
text_string = file.read()
return text_string
|
364dc24fc13e60716ed84f03c035296f411081db
| 39,623 |
def findall(s, sub):
"""Returns all indices of 'sub' within 's', as a list"""
ret = []
cur = 0
while 1:
n = s.find(sub, cur)
if n < 0: return ret
ret.append(n)
cur = n+1
return ret
|
7a20518048237eff2a3e8e6d1d525f1c47fca3d0
| 39,624 |
def unzip(l):
"""Unzips a list of tuples into a tuple of two lists
e.g. [(1,2), (3, 4)] -> ([1, 3], [2, 4])
"""
xs = [t[0] for t in l]
ys = [t[1] for t in l]
return xs, ys
|
72ad40e0cadc11bab62f25861893fa3c01952b24
| 39,625 |
def _CurrentRolesForAccount(project_iam_policy, account):
"""Returns a set containing the roles for `account`.
Args:
project_iam_policy: The response from GetIamPolicy.
account: A string with the identifier of an account.
"""
return set(binding.role
for binding in project_iam_policy.bindings
if account in binding.members)
|
64bb0a51600778580016e3545fe7230949b46d63
| 39,627 |
def extract_clas(the_sidd):
"""
Extract the classification string from a SIDD as appropriate for NITF Security
tags CLAS attribute.
Parameters
----------
the_sidd : SIDDType|SIDDType1
Returns
-------
str
"""
class_str = the_sidd.ProductCreation.Classification.classification
if class_str is None or class_str == '':
return 'U'
else:
return class_str[:1]
|
1745de16393035a7ba0ee8035e6c3be15a4b4af4
| 39,637 |
def auto_cmap(labels):
"""
Find an appropriate color map based on provide labels.
"""
assert len(labels) <= 20, "Too many labels to support"
cmap = "Category10_10" if len(labels) <= 10 else "Category20_20"
return cmap
|
99bdf74197b17d5908237e6a45b0882330c96024
| 39,639 |
import re
def create_target_population(cov_pop_burden_df, param_df, index):
"""Adds a new column to cov_pop_burden_df which is the target population
Inputs:
cov_pop_burden_df - a df which must contain 'incidence_number' and
'pop_0-0' columns
param_df - a df of parameters which must contain the column 'intervention_type'
where all of the values are 'Therapeutic', 'Diagnostic' or 'Vaccine'
index - a string that is one of the indexes of param_df
Returns:
a cov_pop_burden_df with target_pop column added
"""
intervention_type = param_df.loc[index, 'intervention_type']
# Select incidence as the target population if it is a therapeutic or diagnostic
if re.search('Therapeutic', intervention_type):
cov_pop_burden_df['target_pop'] = cov_pop_burden_df['incidence_number']
elif intervention_type in ['Device', 'Rapid diagnostic test']:
cov_pop_burden_df['target_pop'] = cov_pop_burden_df['incidence_number']
# Select population column if it is a vaccine
#~ This assumes it is an infant vaccination
elif param_df.loc[index, 'intervention_type'] == 'Vaccine':
cov_pop_burden_df['target_pop'] = cov_pop_burden_df['pop_0-0']
else:
raise ValueError('The value of intervention_type for is not valid')
return cov_pop_burden_df
|
147137634fa1b65b7aae59db4af615fd79e606af
| 39,641 |
def append_space(prompt):
"""Adds a space to the end of the given string if none is present."""
if not prompt.endswith(' '):
return prompt + ' '
return prompt
|
d6a408d613f0c790cce6d35ccdb1edfeb2aca865
| 39,643 |
def remove_page(api, assessment_id):
"""Remove all pages from an assessment."""
allPages = api.pages.get()
for page in allPages:
if page.name.startswith(assessment_id):
api.pages.delete(page.id)
return True
|
4075c7245cc9c01117d58b69eb06a33d2bbc7e37
| 39,648 |
def _replace_bucket_unit(match_obj):
"""Replace the intern('unit') in `bucket()` with just the string
literal, because the unit determines the return type of the column and the
function would not be able to validate a unit if it was interned."""
full = match_obj.group(0)
interned = match_obj.group(1)
unit = match_obj.group(2)
# from "bucket(col, intern('unit'))" to "bucket(col, 'unit')"
return "{0}'{1}')".format(full[0 : full.index(interned)], unit)
|
2fd0fa094bb816cf3842a410eb668e202031bde9
| 39,654 |
def remove_char(fasta_d):
"""
Removes the > from fasta headers in the fasta dictionaries,
coming from empty lines in the fastas
Parameters
----------
fasta_d : dict
Dictionary for fasta, chromosome names as values
Returns
-------
The dictionaries of chromosome names as value and sequence as key,
minus the '>'
"""
for i in fasta_d.keys():
fasta_d[i] = fasta_d[i].lstrip(">")
return(fasta_d)
|
5023ea9db1a81a0e5e5645fd5e0f2a079ea15056
| 39,657 |
from datetime import datetime
def convert_to_datetime(date: str, time: str) -> datetime:
"""
Converts a date and time string into a datetime object.
"""
return datetime.strptime(date + time, '%Y-%m-%d%I:%M %p')
|
8fe295138ed4796396e8874e738f172fd455e9ef
| 39,658 |
import atexit
def init_context(dev):
"""
Create a context that will be cleaned up properly.
Create a context on the specified device and register its pop()
method with atexit.
Parameters
----------
dev : pycuda.driver.Device
GPU device.
Returns
-------
ctx : pycuda.driver.Context
Created context.
"""
ctx = dev.make_context()
atexit.register(ctx.pop)
return ctx
|
878b49c23394a6940255e2382d370314c62119d1
| 39,676 |
import sympy
def get_idxs(exprs):
"""
Finds sympy.tensor.indexed.Idx instances and returns them.
"""
idxs = set()
for expr in (exprs):
for i in expr.find(sympy.Idx):
idxs.add(i)
return sorted(idxs, key=str)
|
99c8b2cc63d346f8a8994aef12907767b46a5b98
| 39,686 |
def check_sequence_signing_type(sequence):
"""
Checks that only one of signing_key or self_signed is present in the entry
"""
for entity in sequence:
# Check only if the entity has a defined certificate
if 'certificate' not in entity:
continue
cert = entity['certificate']
# Check the keys are not present at the same time
if 'signing_key' in cert and 'self_signed' in cert:
return {
'result': False,
'message': ("The certificate '%s' can't define signing_key and self_signed at the "
"same time.") % entity['name']
}
return {
'result': True,
'message': "All certificates have a correct private key attribute."
}
|
32c3d620c637a378ccab8f427be6e6627d6e9d7c
| 39,688 |
import struct
def read_subheader(subheader):
"""
Return the subheader as a list
Parameters
----------
subheader (string):
32 character string in the subheader format
Returns
-------
list:
10 item list with the following data members:
[0] subflgs
[1] subexp
[2] subindx
[3] subtime
[4] subnext
[5] subnois
[6] subnpts
[7] subscan
[8] subwlevel
[9] subresv
"""
subhead_str = "<cchfffiif4s"
items = struct.unpack(subhead_str.encode('utf8'), subheader)
item_cpy = [ord(i) for i in items[:2]]
item_cpy += items[2:]
return item_cpy
|
fe6079457cd5e7e1ef9defbb7470933f6d2bde79
| 39,691 |
def mutateScript(context, script, mutator):
"""Apply `mutator` function to every command in the `script` array of
strings. The mutator function is called with `context` and the string to
be mutated and must return the modified string. Sets `context.tmpBase`
to a path unique to every command."""
previous_tmpbase = context.tmpBase
i = 0
mutated_script = []
for line in script:
number = ""
if len(script) > 1:
number = "-%s" % (i,)
i += 1
context.tmpBase = previous_tmpbase + number
mutated_line = mutator(context, line)
mutated_script.append(mutated_line)
return mutated_script
|
e6aa1d1c021505f67e5025b6100bed43bd03d44c
| 39,697 |
from datetime import datetime
def get_date(date_str: str):
"""Get a datetime objects from a DD/MM string."""
try:
date = datetime.strptime(date_str, "%d/%m")
return date.replace(year=2016)
except ValueError:
return None
|
4adc6537b256a01c01f2ad4b663652bb8dd5fa11
| 39,699 |
def __key2str(key):
"""
Take a key and return in string format.
"""
if type(key) is tuple:
return " ".join(key)
else:
return key
|
34bac54532980870c0d461c79eb6d4b162706050
| 39,713 |
import requests
def get_lihkg_response(resp):
"""
get_lihkg_response(resp)
Obtain the data of the response object.
Return:
-------
A dictionary.
"""
response = dict()
if isinstance(resp, requests.models.Response):
if resp.status_code == 200:
response = resp.json()
if response.get('success', 0) == 1:
response = response.get('response', dict())
return response
else:
raise TypeError('resp must be a \'requests.models.Response\' object.')
|
46f3677fd42b5eaf7779cdb3daa70f3f616ebb1b
| 39,715 |
def get_resources(connection):
""" Do an RTSP-DESCRIBE request, then parse out available resources from the response """
resp = connection.describe(verbose=False).split('\r\n')
resources = [x.replace('a=control:','') for x in resp if (x.find('control:') != -1 and x[-1] != '*' )]
return resources
|
7ba421b9ac930d08e035b5f221bfc38920da3e8a
| 39,720 |
import socket
def check_connection(addr: str, port: int, *, timeout: float = 0.1) -> bool:
"""
Attempt to make a TCP connection. Return if a connection was made in
less than ``timeout`` seconds. Return True if a connection is made within
the timeout.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(float(timeout))
try:
s.connect((addr, port))
except Exception as error:
return False
return True
|
1b378b394df2c6433b3bbdd87f5038fdcc919678
| 39,721 |
from typing import List
from typing import Any
from typing import Iterable
def as_list(item_or_sequence) -> List[Any]:
"""Turns an arbitrary sequence or a single item into a list. In case of
a single item, the list contains this element as its sole item."""
if isinstance(item_or_sequence, Iterable):
return list(item_or_sequence)
return [item_or_sequence]
|
2edf2d9adb03c0efb16e59a507a918149fdae524
| 39,727 |
import json
def LoadPropDatabase(filename):
"""Loads a propellor database from a .json file."""
with open(filename, 'r') as f:
prop_data = json.loads(f.read())
return prop_data
|
d5eb392e7c2c8258d55fffefa69876e519e29ac5
| 39,729 |
import time
def elapsed_time(t0):
"""Given a start time (time.time() object), computes and returns elapsed
time as a string.
Keyword arguments
=================
:param t0: output of time.time()
Start time to compute elapsed time from (no default)
:return: str
Elapsed time.
"""
t = time.time() - t0
if t < 60:
t = "{:2.1f} sec.".format(t)
elif 60 < t < 3600:
t = "{:2.1f} min.".format(t / 60)
else:
t = "{:2.1f} hr.".format(t / 3600)
return t
|
9ad33267df8a89eaab45b7e274656fbb23e29baa
| 39,730 |
def format_attribute(name, val, lng):
"""Format a string for displaying the name and value of an attribute.
Args:
name: name of the attribute to display.
val: value of the attribute to display.
lng: length of the string to be returned, in number of
characters. Blank space will be padded with '-' characters.
Returns: a string.
"""
name += ' '
if val is not None:
val = ' ' + val
lng -= len(val)
return '{:-<{pad}.{trunc}}{}'.format(
name, val, pad=lng, trunc=lng)
else:
return '{:<{pad}.{trunc}}'.format(name, pad=lng, trunc=lng)
|
72cf9f0a5499e9e219292eac5e30752ebc0477fa
| 39,731 |
from typing import Union
def seconds_to_datetime(seconds: Union[float, int]) -> str:
"""Convert seconds to datetime string."""
# NOTE(xames3): Inspired from `timedelta` class of datetime module.
mm, ss = divmod(int(seconds), 60)
hh, mm = divmod(mm, 60)
dd, hh = divmod(hh, 24)
return f"{dd:02d}:{hh:02d}:{mm:02d}:{ss:02d}"
|
4bd16548386737d2408d5a9e732ca4ce4842698c
| 39,733 |
def unpack_and_add(l, c):
"""Convenience function to allow me to add to an existing list
without altering that list."""
t = [a for a in l]
t.append(c)
return(t)
|
0e40a59bb39d855bf09edb65990a91979d23da61
| 39,739 |
def reg_n_correct(prediction, y, significance=None):
"""Calculates the number of correct predictions made by a conformal
regression model.
"""
if significance is not None:
idx = int(significance * 100 - 1)
prediction = prediction[:, :, idx]
low = y >= prediction[:, 0]
high = y <= prediction[:, 1]
correct = low * high
return y[correct].size
|
190eec0754ef61bf2bac3b93d184233a6e2c3316
| 39,740 |
def color_from_code(code):
"""Generate a color based on a simple code
Args:
code (int): an integer going from 1 to 999
Returns:
[tuple]: the rgb color code
"""
if code == 0:
return (255, 255, 255)
assert code < 1000
color = [0, 0, 0]
for i, div in enumerate([100, 10, 1]):
digit = (code // div) % 10
color[i] = (255 // 9) * digit
return color
|
bf329b2dd4627f92ee37e2e11287ca9121718f67
| 39,744 |
def compareEvents(test, actualEvents, expectedEvents):
"""
Compare two sequences of log events, examining only the the keys which are
present in both.
@param test: a test case doing the comparison
@type test: L{unittest.TestCase}
@param actualEvents: A list of log events that were emitted by a logger.
@type actualEvents: L{list} of L{dict}
@param expectedEvents: A list of log events that were expected by a test.
@type expected: L{list} of L{dict}
"""
if len(actualEvents) != len(expectedEvents):
test.assertEqual(actualEvents, expectedEvents)
allMergedKeys = set()
for event in expectedEvents:
allMergedKeys |= set(event.keys())
def simplify(event):
copy = event.copy()
for key in event.keys():
if key not in allMergedKeys:
copy.pop(key)
return copy
simplifiedActual = [simplify(event) for event in actualEvents]
test.assertEqual(simplifiedActual, expectedEvents)
|
53216d1c77cf8d2e104197ee5f7fb32963505433
| 39,745 |
def arg_name(arg_index=lambda ctx: ctx["operands"][0].value):
"""
Returns a lambda that gets the name of the argument at the given index.
The index defaults to the first operand's value.
"""
return lambda ctx: (ctx["arg_names"][arg_index(ctx)]
if arg_index(ctx) < len(ctx["arg_names"])
else "var%s" % arg_index(ctx))
|
7b8bb99f6bfe8860f92a66f62c702a87b2f46322
| 39,746 |
def bool(anon, obj, field, val):
"""
Returns a random boolean value (True/False)
"""
return anon.faker.bool(field=field)
|
c667627c02f295affc0c67b3db12812591cddfa9
| 39,748 |
def strip_chr(chr):
"""Removes the 'chr' prefix if present.
Args:
chr (str): The chromosome.
Returns:
str: The chromosome without a 'chr' prefix.
Examples:
>>> strip_chr('22')
'22'
>>> strip_chr('chr22')
'22'
"""
return chr[3:] if chr[0:3] == 'chr' else chr
|
39a833dda595140a38226ce5b52dd3b8fd97337d
| 39,757 |
def recording_in_db(rec_id, con):
"""
Returns True if the recording with ID rec_id is already in the database
pointed to by "con"
Parameters
----------
rec_id : int
ID of th3 recording.
con : SQLite3 connection
Connection to open DB.
Returns
-------
Bool - True if the rec_id is already an ID in the database, Fasle otherwise
"""
cur = con.cursor()
rows = cur.execute("SELECT id from scores where id = ?",(rec_id,))
r = rows.fetchone()
return r is not None
|
8954ec39899e0d68483933cba66a2923c4b6d4f0
| 39,759 |
def centre_table(t):
"""Centre cells in a Markdown table."""
lines = t.split("\n")
return t.replace(lines[1], "|".join([":-:"] * (lines[0].count("|") - 1)))
|
e75daec6ba10bb7a361bb1fcd588673b6ad52336
| 39,760 |
def _format_names(name: str) -> str:
"""Format dictionary key names to be human friendly.
Args:
name (str): The Unicode type name.
Returns:
str: The formatted Unicode type name.
"""
return name[0].upper() + name[1:].replace("_", " ")
|
5256bfad00079f658ea2a8a7d95f4fbea39cb6a1
| 39,762 |
def map_lbls_id(skills, label, id, lookup):
"""Map the skills labels to the skills ids
Args:
skills (df): skills dataframe
label (str): col to create, either 'class_lbl' or 'subclass_lbl'
id (str): col to use for mapping, either 'class_id' or 'subclass_id'
lookup (dict): to use for mapping - keys of ids, values of labels
Returns:
df: skills dataframe with additional class label column
"""
skills[label] = skills[id].astype(str).map(lookup)
if label == "class_id":
skills[id] = skills[id].astype(int)
if label == "subclass_id":
skills[id] = skills[id].astype(float)
return skills
|
14751efb08aa0cbee4844a25c0119fb8e29ffe2e
| 39,769 |
import typing
import struct
def unpackMIMEFieldImplHeap(heap: bytes, start: int, unused_http: object) -> typing.List[int]:
"""
Unpacks a MIMEFieldImpl from the given raw byte heap.
Returns a list of the values of the data members of the stored object.
"""
fmt = "4xL4II4i?PIP"
fmt += "3PhH4s"*16
return list(struct.unpack(fmt, heap[start:start+struct.calcsize(fmt)]))
|
61ec683ca5b112fb1028853adb0b8ecfcdd12f35
| 39,773 |
def unprefix(prefix, d, all=False):
"""
Returns a new dict by removing ``prefix`` from keys.
If ``all`` is ``False`` (default) then drops keys without the prefix,
otherwise keeping them.
"""
d1 = dict(d) if all else {}
d1.update((k[len(prefix):], v) for k, v in d.items() if k.startswith(prefix))
return d1
|
81fc47898f9bde8c42b107b5838a3af6bfe3d7f5
| 39,774 |
from pathlib import Path
from datetime import datetime
def get_recording_start(fname):
"""parse the recording date from the emg filename"""
parts = Path(fname).stem.split(" ")
subject = parts[0][0:4]
recdate = datetime.strptime(parts[1], "%Y-%m-%d_%H-%M-%S")
return recdate
|
598c76228496e6e5b46888ea30b42eb908194220
| 39,779 |
import struct
def _convert_filetime_to_timestamp(filetime):
"""
Windows returns times as 64-bit unsigned longs that are the number
of hundreds of nanoseconds since Jan 1 1601. This converts it to
a datetime object.
:param filetime:
A FILETIME struct object
:return:
An integer unix timestamp
"""
hundreds_nano_seconds = struct.unpack(
b'>Q',
struct.pack(
b'>LL',
filetime.dwHighDateTime,
filetime.dwLowDateTime
)
)[0]
seconds_since_1601 = hundreds_nano_seconds / 10000000
return seconds_since_1601 - 11644473600
|
4e4f21b1f75ab367e66a136a58dc615c1d40cc5e
| 39,781 |
def get_region(b):
"""Tries to get the bucket region from Location.LocationConstraint
Special cases:
LocationConstraint EU defaults to eu-west-1
LocationConstraint null defaults to us-east-1
Args:
b (object): A bucket object
Returns:
string: an aws region string
"""
remap = {None: 'us-east-1', 'EU': 'eu-west-1'}
region = b.get('Location', {}).get('LocationConstraint')
return remap.get(region, region)
|
8a773d20348b3fc01f7bd426765546069f79f9d8
| 39,785 |
def safeFilename(filename):
"""Return a filename with only file safe characters"""
validChars = '-_.()abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
return ''.join(c for c in filename if c in validChars)
|
450f392a1222741782c2b9c4d63d5757b534d6ed
| 39,786 |
def sorter(entries):
"""order a list of entries by descending date first and then by name
alphanumerically"""
return sorted(
sorted(entries, key=lambda x: x["name"]),
key=lambda x: x["date"],
reverse=True,
)
|
697981b6b00f05e208fd2487c9ce56deef285d86
| 39,788 |
def tmpdirpath(tmpdir):
"""Convenience fixture to get the path to a temporary directory."""
return str(tmpdir.dirpath())
|
4adf46e970fcdd00250af4d03db1b6b632bb3260
| 39,790 |
def _compute_pmf(gen_graph_distr):
"""
Compute the probability mass function (PMF) of graphs. It can be seen as a normalization between 0 and 1, where
each count is converted into a probability.
:gen_graph_distr: The distribution of generated graphs. It is a dictionary where, for each entry (key, value), the
key is a graph (in graph6 format) and the value is the count of this graph. Make sure to generate enough graphs
to get good results.
:return: The probability mass function (PMF) of graphs. It is a dictionary where, for each entry (key, value), the
key is a graph (in graph6 format) and the value is probability to get this graph.
"""
# Compute the sum of all counts.
count_sum = sum(gen_graph_distr.values())
# Compute the PMF.
pmf = {graph: (count / count_sum) for graph, count in gen_graph_distr.items()}
return pmf
|
d2ec2e50387464782910e52e4ea464752c20b9f1
| 39,794 |
def remove_duplicate_QUBO(QUBO_storage):
"""
De-duplicates the QUBO storage list
Parameters
----------
QUBO_storage : list
List of QUBOs (each QUBO is a dictionary).
Returns
-------
unique : list
de-duplicated list.
"""
unique = []
for a in QUBO_storage:
if a not in unique:
unique.append(a)
return unique
|
7c0ece71be1def18de60fb1d72f69379dc0cf3a8
| 39,795 |
def add_bins_col_to_rank_df(df_feature,
n_bins,
bin_no_col='bin_no',
item_rank_col='equity_rank',
max_rank_col='max_rank'
):
"""
Description: This function takes as input a dataframe with ranks,
and creates a column with the respective bin number computed from the rank.
:param df_feature:Type pandas dataframe. feature and period level dataframe with rank values.
:param n_bins:Type int. number of bins to split the equities into.
:param bin_no_col:Type str. bin number column name.
:param item_rank_col:Type str. individual item rank column name.
:param max_rank_col:Type str. maximum possible rank column name.
:return:Type pandas dataframe. feature and period level dataframe with bin assignments.
"""
df_feature[bin_no_col] = 1 + (n_bins * (df_feature[item_rank_col] - 1) // df_feature[max_rank_col])
return df_feature
|
380bec79d66d66cb3acd1e42de0edada76cc4024
| 39,799 |
def get_concat_level_bits(i, n, mul):
"""Create a string combining the bits of the current mul.
Combine the bits of the multiplication of the current variable (at mul) by the
i-th index of the previous variable.
Args:
i: An integer, the index of the previous variable.
n: An integer, the number of bits in the bitvectors.
mul: An integer, the index of the nested mul we're at.
Returns:
The resulting concat string.
"""
concats = []
if i > 0:
concats.append(f"(concat m_{mul}_{i}_{i} #b{'0' * i})")
else:
concats.append(f"m_{mul}_0_0")
if i < (n - 1):
for j in range(i + 1, n):
rhs = concats[j - i - 1]
concat = ["(concat", f"m_{mul}_{i}_{j}", rhs + ")"]
concats.append(" ".join(concat))
return concats[-1]
|
3e6516c570ea128a6c9d12bca82fb35d45a6686e
| 39,803 |
import re
def remove_tag_and_contents(s, tag=None, tags=None):
"""
>>> remove_tag_and_contents('hi there')
'hi there'
>>> remove_tag_and_contents('<p>hi</p> <style>p {font-weight: 400;}</style><p>there</p>', tag='style')
'<p>hi</p> <p>there</p>'
>>> remove_tag_and_contents('<span class="foo">hi there</span>', tag='span')
''
>>> remove_tag_and_contents('<p>hi</p> <style>p {font-weight: 400;}</style><p>there</p>', tags=('p', 'style'))
' '
>>> remove_tag_and_contents('<p>hi <span>there</span></p> <style>p {font-weight: 400;}</style><p>cat</p>', tags=('span', 'style'))
'<p>hi </p> <p>cat</p>'
>>> remove_tag_and_contents('<p>hi <span class="woot">there</span></p> <style>p {font-weight: 400;}</style><p>cat</p>', tags=('span', 'style'))
'<p>hi </p> <p>cat</p>'
>>> remove_tag_and_contents('<p>Hi There<object classid="clsid:38481807-CA0E-42D2-BF39-B33AF135CC4D" id=ieooui></object></p>', tag='object')
'<p>Hi There</p>'
>>> remove_tag_and_contents('<p>Hi </object>there</p>', tag='object')
'<p>Hi there</p>'
>>> remove_tag_and_contents('<p>Hi <br/>there</p>', tag='br')
'<p>Hi there</p>'
"""
if tag:
tags = [tag]
if isinstance(tags, (list, tuple)):
for t in tags:
# Tries to match a normal tag structure
s = re.sub(pattern=r'<{tag}.*?>.*?</{tag}>'.format(tag=t), repl='', string=s)
# Match any hanging opening or closing versions
s = re.sub(pattern=r'</{tag}[^>]*>'.format(tag=t), repl='', string=s)
s = re.sub(pattern=r'<{tag}[^>]*/ *>'.format(tag=t), repl='', string=s)
return s
|
9c6506b39ff6f926cf9b03f691bd1b4ecbed6c4a
| 39,805 |
def integrate_euler_explicit(x_t, dx_dt, dt):
"""
Explicit euler integration
x(t+1) = x(t) + dx/dt * dt
:param x_t: known value at timestep t
:param dx_dt: derivative dx/dt
:param dt: timestep
:return: x(t+1); solution for the time t+1
"""
x_tp1 = x_t + dx_dt * dt
return x_tp1
|
862feb02512142da98929aedc97707853b41242a
| 39,812 |
def has_attribute(t, key: str) -> bool:
"""
Check if a callable has an attribute
:param t: the callable
:param key: the key, the attributes name
:return: True if callable contains attribute, otherwise False
"""
return hasattr(t, key)
|
09e39d98bfdd5b2d24a8b7b71c74383bf33eb5b1
| 39,815 |
def assert_errors(result, expected_errors):
"""Assert that result errors match expected errors
Uses substring matching to coorelate expected to actual errrors.
Raise if any expected error is not matched or if any actual
errors are found that were not matched by an expected error.
This function has O(n**2) complexity on the number of errors.
"""
def find_and_remove(expected_error):
for i, actual_error in enumerate(actual_errors):
if expected_error in actual_error:
del actual_errors[i]
return True
return False
actual_errors = list(result.errors)
missing_errors = [e for e in expected_errors if not find_and_remove(e)]
errors = []
if missing_errors:
errors.append("missing expected errors:")
errors.extend(missing_errors)
if actual_errors:
if errors:
errors.append("")
errors.append("unexpected errors:")
errors.extend(actual_errors)
assert not errors, "\n".join(errors)
|
6923c4edbc27c0c81ef884aabcaaad06ff4e317c
| 39,817 |
def linear_search(list, value):
"""This function takes a list as input and a value to find.Then linearly it searches for that value"""
for i in range(len(list)):
if list[i] == value:
return i #Returning the index
return -1
|
39fdbbdaed7090275ac75e1df39592017494f5fb
| 39,818 |
def read_models(models_file):
"""
Read the models file to get a list of filenames.
:param models_file: models file path
:type models_file: str
:return: list of filenames
:rtype: [str]
"""
files = []
with open(models_file, 'r') as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
lines = [line for line in lines if line != '']
files = [line.split(' ')[1] for line in lines]
return files
|
bb0dbbcef77af7d3f04608a5a40fd8a8e94cf5a5
| 39,823 |
def unpad(data: bytes) -> bytes:
"""Unpad a previously padded string."""
return data[: -ord(data[len(data) - 1 :])]
|
1fec7f3c08599b139e2e525b6c5af31e6f3681f3
| 39,829 |
def purge_duplicates(list_in):
"""Remove duplicates from list while preserving order.
Parameters
----------
list_in: Iterable
Returns
-------
list
List of first occurrences in order
"""
# Algorithm taken from Stack Overflow,
# https://stackoverflow.com/questions/480214. Content by Georgy
# Skorobogatov (https://stackoverflow.com/users/7851470/georgy) and
# Markus Jarderot
# (https://stackoverflow.com/users/22364/markus-jarderot), licensed
# under CC-BY-SA 4.0.
# https://creativecommons.org/licenses/by-sa/4.0/.
seen = set()
seen_add = seen.add
return [x for x in list_in if not (x in seen or seen_add(x))]
|
349211a9ad9b949fb7061e2b4ad21cb1ec3354f7
| 39,832 |
from typing import Dict
from typing import Any
def build_data_columns(hparams: Dict[str, Any]) -> Dict[str, Any]:
"""Build data columns from hyper-parameters.
Args:
hparams: hyper-parameters for the data columns.
Returns:
data columns.
"""
try:
input_columns = hparams["input"]
except KeyError:
input_columns = None
try:
target_columns = hparams["target"]
except KeyError:
target_columns = None
# create dictionary
if input_columns:
data_columns = {"input": input_columns, "target": target_columns}
else:
data_columns = {"target": target_columns}
return data_columns
|
21bfe8166234b7ed69aae7a3d49a0848c268611d
| 39,834 |
def get_mrca(pi, x, y):
"""
Returns the most recent common ancestor of nodes x and y in the
oriented forest pi.
"""
x_parents = [x]
j = x
while j != 0:
j = pi[j]
x_parents.append(j)
y_parents = {y: None}
j = y
while j != 0:
j = pi[j]
y_parents[j] = None
# We have the complete list of parents for x and y back to root.
mrca = 0
j = 0
while x_parents[j] not in y_parents:
j += 1
mrca = x_parents[j]
return mrca
|
faece8f1fabf09444f1d3f0d42c49ed6f510acd4
| 39,836 |
def stub_read(mote_id, chan_id, read_start_time):
"""
A stub to return nothing of interest; well, a value that is more
easily picked up as "invalid".
"""
return -1
|
b3d94d002f9d112540d62ae1f985bf595b3f2558
| 39,837 |
import torch
def normalize_torch(img: torch.Tensor) -> torch.Tensor:
"""
Standardize image tensor per channel
Args:
-----------
img (torch.Tensor):
input image tensor. shape (C, H, W).
Returns:
-----------
torch.Tensor. Standardized image tensor. Shape (C, H, W).
"""
img = img.float()
chl_means = torch.mean(img.float(), dim=(1, 2))
chl_stds = torch.std(img.float(), dim=(1, 2))
img.sub_(chl_means.view(-1, 1, 1)).div_(chl_stds.view(-1, 1, 1))
return img
|
f6459f8ff465cdb56ace492f4de114eee2321855
| 39,840 |
def format_string(string: str) -> str:
"""Replace specific unicode characters with ASCII ones.
Args:
string: Unicode string.
Returns:
ASCII string.
"""
string \
.replace("\u2013", "-") \
.replace("\u00a0", " ") \
.replace("\u2018", "'") \
.replace("\u2019", "'") \
.replace("\u201c", '"') \
.replace("\u201d", '"') \
.replace("\u00ed", 'i')
return string
|
2a7efea0816096c549642b00bee6f50a29ede0a2
| 39,842 |
import torch
def to_tensor(x):
"""Make x to Tensor."""
try:
return x.clone().detach().float()
except:
return torch.tensor(x).float()
|
14df794b76b8d1d4b845e68b6af340522331dd82
| 39,844 |
def createNamespace(benchmarkInfo, benchmarkResult):
"""
Creates a dictionary representing a namespace containing the member
var/values on the benchmarkInfo and benchmarkResult passed in to eval/exec
expressions in. This is usually used in place of locals() in calls to eval()
or exec().
"""
namespace = dict(benchmarkInfo.__dict__)
namespace.update(benchmarkResult.__dict__)
return namespace
|
79ff65e69e874c7d83085a7e5cc4c0df82ca572c
| 39,845 |
import torch
def npvec_to_tensorlist(vec, params):
""" Convert a numpy vector to a list of tensor with the same dimensions as params
Args:
vec: a 1D numpy vector
params: a list of parameters from net
Returns:
rval: a list of tensors with the same shape as params
"""
loc = 0
rval = []
for p in params:
numel = p.data.numel()
rval.append(torch.from_numpy(vec[loc:loc+numel]).view(p.data.shape).float())
loc += numel
assert loc == vec.size, 'The vector has more elements than the net has parameters'
return rval
|
3cbed80b3896d6f0610a057903f09728ccae0a30
| 39,846 |
def list_certificate_issuer_admins(client, vault_base_url, issuer_name):
""" List admins for a specified certificate issuer. """
return client.get_certificate_issuer(
vault_base_url, issuer_name).organization_details.admin_details
|
37c8411b69c7bd3967d4ffe22c8b039236379625
| 39,850 |
def _set_antecedent(self, descendants):
"""
Set antecedent property of descendents to current branch.
Notes
-----
We want the clusters to know who they are related to. An antecedent is the
immediate parent of a cluster. So when branching set the antecedent property
of all descendants in the current branch to the branch itself. Also set the
antecedent value of the current branch to 'None' to indicate that it doens't
have a parent.
"""
for descendant in descendants:
descendant._antecedent = self
return self._antecedent
|
a69a21954ae44548ea62a4fc290ca6df857bc891
| 39,853 |
def udfize_def_string(code: str) -> str:
"""Given an unindented code block that uses 'input' as a parameter, and output as a
return value, returns a function as a string."""
return """\
def udf(input):
{}
return output
""".format(
" ".join(line for line in code.splitlines(True))
)
|
71084f68ff268eaaa2eec2f8f22394e963fdd894
| 39,858 |
def get_astronomical_twilight(times, events, value):
"""
value = 0 for end of astronomical twilight
value = 1 for the beginning of astronomical twilight (first occurrence)
"""
try:
zindex = events.tolist().index(value)
at_time = times[zindex]
except:
at_time = None
return at_time
|
ce8780a833e6356ad169430720f6b4bdb555e8ed
| 39,859 |
import six
def construct_mirror_name(volume):
"""Constructs MirrorView name for volume."""
return 'mirror_' + six.text_type(volume.id)
|
75ec30c8e5cf204f525301ea0fd988222c1d1cf5
| 39,870 |
def check_for_empty_string(input_data):
"""
Checks if data presented by a user is empty.
"""
if input_data.strip() == "":
return 'All fields are required'
return None
|
dab37e5778d1746e3e3d5c0985d9b24c56184aa3
| 39,871 |
def _count_dollars_before_index(s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
|
4d140e63253ca0aee28f8bd6bb24e5a23e00a0f5
| 39,872 |
import math
def we_calc(xPLL,vd,Kp_PLL):
"""Calculate inverter frequency from PLL."""
return (Kp_PLL*(vd) + xPLL + 2*math.pi*60.0)
|
b958909ce7481a46ae0fb0176d33f96926e3f607
| 39,876 |
def f(x):
"""
A function for testing on.
"""
return -(x + 2.0)**2 + 1.0
|
52d2c4a4dec5acaa34371a5cead727d94a36a478
| 39,878 |
from typing import List
def get_str_from_list(in_list: List, val_type: str = '', n_per_line: int = 10) -> str:
"""
Pretty print of list of values.
Parameters
----------
in_list : List
List of values to print.
val_type : str
Print style of type. Possible values: "float", "int", ""
n_per_line : int
Maximum number of values per line
Returns
-------
str
message
"""
# format function
min_num = min(in_list)
if ("e-" in str(min_num)):
precision = int(str(min_num).split("e-")[1]) + 2
else:
precision = len(str(min_num))
float_precision = ' {:8.' + str(precision) + 'f}'
format_value = lambda val: str(val)
if val_type == 'float':
format_value = lambda val: float_precision.format(val)
elif val_type == 'int':
format_value = lambda val: ' {:8d}'.format(val)
# print each line
message = ""
for line_id in range(int(len(in_list) / n_per_line) + 1):
message += ''.join(format_value(val) for val in
in_list[line_id * n_per_line: min(len(in_list), (line_id + 1) * n_per_line)]) + "\n"
return message
|
2cc9efc0599abf5a01e92d8c818e4c2d0ec3c3b3
| 39,881 |
def count_fixed_points(p):
"""Return the number of fixed points of p as a permutation."""
return sum(1 for x in p if p[x] == x)
|
f64f845609f8624b2d05071e7f225a2c8f73cfb2
| 39,887 |
def get_symbols(wordslist):
"""
Collect the set of letters from a list of strings.
"""
symbols = []
for word in wordslist:
for x in word:
x = x.lower()
if not x.isalpha():
raise ValueError("Only a-z and A-Z are allowed.")
if x not in symbols:
symbols.append(x)
return sorted(symbols)
|
5d065aa910a86430758d36ec459ce46a548579f6
| 39,889 |
def format_headers(headers):
"""
Parameters
----------
headers Headers to be formatted (List of str)
Returns A list of dict, the right format for v.DataTable headers
-------
"""
out_headers = []
for header in headers:
header_dict = {'text': header, 'sortable': True, 'value': header}
out_headers.append(header_dict)
return out_headers
|
481acfe39c66addb3a2b396e66d195b44346ffc6
| 39,890 |
def k_fold_boundaries(values, folds):
"""Take a list of values and number of folds, return equally spaced boundaries as tuples"""
return [
(int((i / folds) * len(values)), int(((i + 1) / folds) * (len(values))))
for i in range(folds)
]
|
8b8b3fe1e2b191e538fb5c96a58de01f59ef66a8
| 39,892 |
def echo_magic(rest):
"""
Echo the argument, for testing.
"""
return "print(%r)" % rest
|
c2d8a14c6ce7fbf9e41e548506bee3a667b7f1de
| 39,893 |
def per_model_mean(runs_df):
"""Computes the grouped mean and std by model type
Args:
runs_df (pd.Dataframe): A list of all the runs
Returns:
A tuple of pandas Dataframes that represent the mean and std of each model
"""
overall = runs_df.drop(columns=['SEED', 'ID']).groupby(['TYPE', 'MODEL', 'DATASET_ID'],
as_index=False).mean()
collected = overall.drop(columns=['DATASET_ID']).groupby(['TYPE', 'MODEL'])
return collected.mean()
|
fd8c2fd10438df8239272551e8c73d1927449c03
| 39,895 |
def _measure(line):
"""
Parse a measure line.
Parse a measure line. Looks similar to the following: '# Measure BrainSeg, BrainSegVol, Brain Segmentation Volume, 1243340.000000, mm^3'
Parameters
----------
line: string
A stats line.
Returns
-------
list of strings
A list of strings, containing the data on the line. The prefix, '# Measure', is discarded.
"""
return line[10:].split(', ') # ignore first 10 characters, the '# Measure' prefix.
|
073696bf4b650e947174daa563ed9550686f5758
| 39,907 |
def multi_mean(input, axes, keepdim=False):
"""
Performs `torch.mean` over multiple dimensions of `input`.
"""
axes = sorted(axes)
m = input
for axis in reversed(axes):
m = m.mean(axis, keepdim)
return m
|
c636ed33d8db3e61dc96e80e32b17bdacad6d3a8
| 39,912 |
from pathlib import Path
def resolve_open_in_colab(content, page_info):
"""
Replaces [[open-in-colab]] special markers by the proper svelte component.
Args:
content (`str`): The documentation to treat.
page_info (`Dict[str, str]`, *optional*): Some information about the page.
"""
if "[[open-in-colab]]" not in content:
return content
package_name = page_info["package_name"]
language = page_info.get("language", "en")
page_name = Path(page_info["page"]).stem
nb_prefix = f"/github/huggingface/notebooks/blob/main/{package_name}_doc/{language}/"
nb_prefix_colab = f"https://colab.research.google.com{nb_prefix}"
nb_prefix_awsstudio = f"https://studiolab.sagemaker.aws/import{nb_prefix}"
links = [
("Mixed", f"{nb_prefix_colab}{page_name}.ipynb"),
("PyTorch", f"{nb_prefix_colab}pytorch/{page_name}.ipynb"),
("TensorFlow", f"{nb_prefix_colab}tensorflow/{page_name}.ipynb"),
("Mixed", f"{nb_prefix_awsstudio}{page_name}.ipynb"),
("PyTorch", f"{nb_prefix_awsstudio}pytorch/{page_name}.ipynb"),
("TensorFlow", f"{nb_prefix_awsstudio}tensorflow/{page_name}.ipynb"),
]
formatted_links = [' {label: "' + key + '", value: "' + value + '"},' for key, value in links]
svelte_component = """<DocNotebookDropdown
classNames="absolute z-10 right-0 top-0"
options={[
"""
svelte_component += "\n".join(formatted_links)
svelte_component += "\n]} />"
return content.replace("[[open-in-colab]]", svelte_component)
|
18ab284b6d750743c57fc7746d64a0a8c8ed3b17
| 39,913 |
def get_odd_numbers(num_list: list) -> list:
"""Returns a list of odd numbers from a list."""
return [num for num in num_list if num % 2 != 0]
|
e7ff779e18b478bf1717912a1ad5b7130eed9b42
| 39,914 |
def find_subsequence_location(sequence, subsequence):
""" Finds the start and end index of the first occurance
of a given subsequence within a larger list. Returns
the two indices corresponding to the postition of
the first and last token of the subseqeunce.
Assumes subsequence is known to be in sequence.
"""
assert len(sequence) >= len(subsequence), "subsequence too long"
start_idx = None
next_subseq_token = subsequence[0]
next_subsequence_idx = 1
for seq_idx, token in enumerate(sequence):
if token == next_subseq_token:
if start_idx is None:
start_idx = seq_idx
if next_subsequence_idx == len(subsequence):
end_idx = seq_idx
return start_idx, end_idx
else:
next_subseq_token = subsequence[next_subsequence_idx]
next_subsequence_idx += 1
else:
start_idx = None
next_subseq_token = subsequence[0]
next_subsequence_idx = 1
raise ValueError("Subsequence not found in sequence")
|
3032fa525edc7bd7da718e50356e6aa4984c4f9a
| 39,922 |
def repository_dirname(template):
""" Utility function getting repository name from the link
Example: for "https://github.com/user/SomeRepo" should return "SomeRepo"
"""
return template.split('/')[-1]
|
8f2987762c93f041458aeccc58133ad6b50a1b96
| 39,924 |
def isInt(val):
""" Return (bool) whether the input is a integer """
return val == int(val)
|
2531179606c2bfae1f50c187d20c7390dc8075d1
| 39,929 |
from PIL import Image
def blackMask(image, alpha=.5):
"""
Draw black mask on image.
:param image: PIL.Image - image to draw
:param alpha: float - black mask intensity (0 - 1)
"""
mask = Image.new('RGB', image.size)
im = Image.blend(image, mask, alpha)
return im
|
c595e98d062ddc63f4d0754be1514c2ded85b0dd
| 39,933 |
import requests
def getHtmlText(url):
"""发出请求获得响应并解码为html
Args:
url (String): 目标页面URL
Returns:
String: 解码后的html内容
"""
response = requests.get(url)
return response.text
|
b9dc79ed546552b6ebd2c4f5a4384a67ebf9422f
| 39,934 |
def nth_child_edge_types(max_child_count):
"""Constructs the edge types for nth-child edges.
Args:
max_child_count: Maximum number of children that get explicit nth-child
edges.
Returns:
Set of edge type names.
"""
return {f"CHILD_INDEX_{i}" for i in range(max_child_count)}
|
dda3400dbc372d848a65c1409c9a88115b6e74be
| 39,935 |
import torch
def box1_in_box2(corners1: torch.Tensor, corners2: torch.Tensor):
"""check if corners of box1 lie in box2
Convention: if a corner is exactly on the edge of the other box, it's also a valid point
Args:
corners1 (torch.Tensor): (B, N, 4, 2)
corners2 (torch.Tensor): (B, N, 4, 2)
Returns:
c1_in_2: (B, N, 4) Bool
"""
a = corners2[:, :, 0:1, :] # (B, N, 1, 2)
b = corners2[:, :, 1:2, :] # (B, N, 1, 2)
d = corners2[:, :, 3:4, :] # (B, N, 1, 2)
ab = b - a # (B, N, 1, 2)
am = corners1 - a # (B, N, 4, 2)
ad = d - a # (B, N, 1, 2)
p_ab = torch.sum(ab * am, dim=-1) # (B, N, 4)
norm_ab = torch.sum(ab * ab, dim=-1) # (B, N, 1)
p_ad = torch.sum(ad * am, dim=-1) # (B, N, 4)
norm_ad = torch.sum(ad * ad, dim=-1) # (B, N, 1)
# NOTE: the expression looks ugly but is stable if the two boxes are exactly the same
# also stable with different scale of bboxes
cond1 = (p_ab / norm_ab > - 1e-6) * (p_ab / norm_ab < 1 + 1e-6) # (B, N, 4)
cond2 = (p_ad / norm_ad > - 1e-6) * (p_ad / norm_ad < 1 + 1e-6) # (B, N, 4)
return cond1*cond2
|
6407b8bafea25ca7606c5d205004d219b22cbffc
| 39,937 |
def dispersion(vmin, dx, fc, coeff=2.0):
"""Compute maximum dt for a stable simulation.
Parameters
----------
vmin: float
Minimum velocity of the medium
dx: float
Grid discretization.
fc: float
Central (peak) frequency of the source wavelet.
coeff: float
Coefficient to compute the maximum frequency of the wavelet:
fmax = coeff * fc.
Returns
-------
dt_stable: float
Maximum temporal discretization.
"""
fmax = coeff * fc
dx_no_dispersion = vmin / fmax / 6.0
if dx > dx_no_dispersion:
print('The simulation will show dispersion!')
return dx_no_dispersion
|
1097d10c900c726ea10430ec4be905f428bdd1b3
| 39,939 |
def reverse_index(index, length):
"""
Reverse the passed in index as if the index direction was flipped.
Taking the string "hello" as an example the regular indexes for
each letter are::
01234
hello
Reversing the indexes yields::
43210
hello
This allows easily indexing into a bitdef on bitdef indexing terms.
Args:
index (int): The index position to reverse.
length (int): The length of the array being indexed into.
Returns:
int: The reversed index.
"""
return length - index - 1
|
a47c2db2aeec9369593936a34885ee2b04722e95
| 39,940 |
import time
import calendar
def adjust_time(t, delta):
"""
Adjust a (UTC) struct_time by delta seconds
:type t: struct_time
:type delta: int
:param delta: seconds
:rtype: struct_time
"""
return time.gmtime(calendar.timegm(t) + delta)
|
4d2ff67523b05a6f0af02ebb3b230e8875a39ae4
| 39,943 |
from typing import List
from typing import Any
def remove_duplicates_in_order(list: List[Any]) -> List[Any]:
"""
Remove duplicates in a list, keeping the first occurrence and preserving order.
Parameters
----------
list: input list
Returns
-------
deduplicated list
"""
# see https://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-whilst-preserving-order
seen = set()
seen_add = seen.add
return [x for x in list if not (x in seen or seen_add(x))]
|
4444aa8e4ca1fa08385b4114055d9bd9eaebcd03
| 39,944 |
def frame_number(frame, speed, particles):
"""
Creates the text for the animation, called every frame to get the
text to be displayed.
Can have your own text the function must have the same input and
output variables as this one.
The first line to be run is pos = frame*speed to get the current
position of the data to be viewed
Parameters
----------
frame : int
The frame number of the animation.
speed : int
The number of cords jumped per frame
particles : object
An object which contains all the particles data
Returns
-------
str
The text to be displayed for a given frame
"""
pos = frame*speed
return f"Position: {pos}, Frame: {frame}"
|
dd91de071c826e1d9e0b46f4d7d6fcbcbb0f358c
| 39,948 |
def comma_separated_list(x):
"""
Parse a restructured text option as a comma-separated list of strings.
"""
return x.split(',')
|
c75c80c5f19fe1843c8ed8e8cc64e4233533bf20
| 39,949 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.