content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def translate_macros(macro_dic: dict, data: str) -> str:
"""Expects a macro dictionary key:value (macro_name:macro_value)
and a string to replace macro. \n
It will replace the macro_name for the macro_value in any string.
"""
for macro_name, macro_value in macro_dic.items():
data = data.replace(macro_name, macro_value)
return data
|
dff25b05229477db2ce2de5eae98585642e13d12
| 22,908 |
import operator
def get_max_queue(queues):
"""Retrieve a queue with max messages as tuple."""
queue, value = max(queues.items(), key=operator.itemgetter(1))
return (queue, value)
|
69b52a6bac89cc61e84f639fed06cffe7a0d697f
| 22,910 |
def set_gpu(gpu_mon, args):
"""
Sets the GPU visibility based on the passed arguments. Takes an already
initialized GPUMonitor object. Sets GPUs according to args.force_GPU, if
specified, otherwise sets first args.num_GPUs free GPUs on the system.
Stops the GPUMonitor process once GPUs have been set
If gpu_mon is None, this function does nothing
Args:
gpu_mon: An initialized GPUMonitor object or None
args: argparse arguments
Returns: The number of GPUs that was actually set (different from
args.num_GPUs if args.force_GPU is set to more than 1 GPU)
"""
num_GPUs = args.num_GPUs
if gpu_mon is not None:
if not args.force_GPU:
gpu_mon.await_and_set_free_GPU(N=num_GPUs, sleep_seconds=120)
else:
gpu_mon.set_GPUs = args.force_GPU
num_GPUs = len(args.force_GPU.split(","))
gpu_mon.stop()
return num_GPUs
|
09c7b4a9956bd0f82666046c890b3e86cfa9d6a9
| 22,911 |
import warnings
import ctypes
def pack(ctypes_obj):
"""Convert a :mod:`ctypes` structure into a Python string.
Args:
ctypes_obj (ctypes.Structure): The :mod:`ctypes` structure to convert to a string.
Returns:
New Python string containing the bytes from memory holding *ctypes_obj*.
.. deprecated:: 1.5
This function is deprecated, use ``bytes(ctypes_obj)`` instead.
"""
warnings.warn(
"This function is deprecated and will be removed, use ``bytes(ctypes_obj)`` instead.",
DeprecationWarning,
stacklevel=2,
)
return ctypes.string_at(ctypes.addressof(ctypes_obj), ctypes.sizeof(ctypes_obj))
|
7ae5a320e93fbcbcec09b5d5ee587f266fa75f9e
| 22,913 |
def pixel_to_terrain_type(pixel):
"""
Convert a RGBA pixel to a terrain type.
The B channel composes the lower 8 bits, the A channel composes the upper 8 bits.
Note that some images do not have an alpha channel.
"""
if len(pixel) > 3:
return pixel[2] & 0xff | ((pixel[3] & 0xff) << 8)
else:
return pixel[2] & 0xff
|
a7a5538756b0566b6a2e978bf3a8fd28cbb0258b
| 22,916 |
def remove_transition(net, trans):
"""
Remove a transition from a Petri net
Parameters
----------
net
Petri net
trans
Transition to remove
Returns
----------
net
Petri net
"""
if trans in net.transitions:
in_arcs = trans.in_arcs
for arc in in_arcs:
place = arc.source
place.out_arcs.remove(arc)
net.arcs.remove(arc)
out_arcs = trans.out_arcs
for arc in out_arcs:
place = arc.target
place.in_arcs.remove(arc)
net.arcs.remove(arc)
net.transitions.remove(trans)
return net
|
85283735ec41e76ff491e562f0b2d2d115fd4114
| 22,923 |
def get_full_intent(intent_json):
"""recovers the full intent json from standalized intent.
Basically we will add fields that are omitted becauase their values are
all or 2 back to the intent json.
"""
# dep/ret time
if 'departure_time' not in intent_json:
intent_json['departure_time'] = 'all'
if 'return_time' not in intent_json:
intent_json['return_time'] = 'all'
# class
if 'class' not in intent_json:
intent_json['class'] = 'all'
# max_connections
if 'max_connections' not in intent_json:
intent_json['max_connections'] = 2
# airline_preference
if 'airline_preference' not in intent_json:
intent_json['airline_preference'] = 'all'
return intent_json
|
3565c36a9bd07f4f84efb4542dc5132ee76b28cb
| 22,925 |
def upper(string):
"""
Return string converted to upper case.
"""
return str(string).upper()
|
fa2bfe354e3308aec8ee37635a0594b1d5699d6e
| 22,935 |
def user_session(request):
"""Add user session information to the template context."""
return {
'chaospizza_user': {
'name': request.session.get('username', None),
'is_coordinator': request.session.get('is_coordinator', False),
'coordinated_order_slug': request.session.get('order_slug', None),
}
}
|
6e98c21cc30508db793dc79ef580757d71e9d6c4
| 22,941 |
def getEventTime(cursor, event_id=None, event_time=None, order="DESC"):
"""Get the event time.
If the event_time is specified, simply return. Else if the event_id is
specified, return its time. Else return the most recent event.
Args:
cursor: Database cursor.
event_id: Event id.
event_time: Event time.
order: ASC or DESC.
Returns:
The event time string.
"""
if event_id == None:
cursor.execute("SELECT id FROM Event ORDER BY time %s"%order)
event_id = int(cursor.fetchone()["id"])
if event_time == None:
cursor.execute("SELECT time FROM Event WHERE id=%d"%event_id)
event_time = str(cursor.fetchone()["time"])
return event_time
|
0e662d92f8da79ce154bd4951bb247b083b09345
| 22,943 |
from typing import List
def get_origin_position_factor(matrix: List[List[float]]):
"""
Calculate average distance between stops.
:matrix: list of lists containing all to all distances
return float
"""
# NOTE: matrix is processed to integers for solver (d * 100)
distance_factor = (sum(matrix[0]) / 100) / len(matrix[0][1:])
return distance_factor
|
daa1ab06ec8a32bf72cf87814edc22e77271cfce
| 22,953 |
def map_to_filtered(rev):
"""Gets hash of rev after filtering.
If rev hasn't been filtered (yet), returns None.
Equivalent to the `map` function exposed by git-filter-branch, except that
function returns rev if the revision hasn't yet been filtered, and that this
function raises an error if rev maps to multiple commits.
"""
#if not workdir:
# raise RuntimeError("workdir environment variable is empty?")
mapfile = '../map/%s' % rev
try:
with open(mapfile, 'r') as f:
lines = f.read().strip().split('\n')
if len(lines) != 1:
raise RuntimeError("mapfile %s doesn't contain a single line: %s" % (mapfile, str(lines)))
return lines[0]
except IOError:
return None
|
0ebbcb04881435c2a0a1ca217b52155ef6e0ad7f
| 22,954 |
def get_hyperparameters(args):
"""
Store all arguments in `main.py`, except `SM_CHANNEL`
and `model`, in a dictionary
return:
Dictionary of selected arguments passed to `main.py`
"""
return {param : val for param, val in args.__dict__.items()
if (not param.endswith('_dir')) and param != 'model'}
|
daeb40eaed3dca172226d29ebd1afcd8345b8802
| 22,957 |
import base64
def decode_attachment_payload(message):
"""Decodes a message from Base64, if fails will outputs its str(message)
"""
msg = message.get_payload()
try:
# In some cases the body content is empty and cannot be decoded.
msg_info = base64.b64decode(msg)
except TypeError:
msg_info = str(msg)
return msg_info
|
633ab2a9572ba481bc5a348ca6717d948b4ffa06
| 22,958 |
import torch
def species_split(dataset, train_valid_species_id_list=[3702, 6239, 511145,
7227, 10090, 4932, 7955],
test_species_id_list=[9606]):
"""
Split dataset based on species_id attribute
:param dataset:
:param train_valid_species_id_list:
:param test_species_id_list:
:return: train_valid dataset, test dataset
"""
# NB: pytorch geometric dataset object can be indexed using slices or
# byte tensors. We will use byte tensors here
train_valid_byte_tensor = torch.zeros(len(dataset), dtype=torch.uint8)
for id in train_valid_species_id_list:
train_valid_byte_tensor += (dataset.data.species_id == id)
test_species_byte_tensor = torch.zeros(len(dataset), dtype=torch.uint8)
for id in test_species_id_list:
test_species_byte_tensor += (dataset.data.species_id == id)
assert ((train_valid_byte_tensor + test_species_byte_tensor) == 1).all()
train_valid_dataset = dataset[train_valid_byte_tensor]
test_valid_dataset = dataset[test_species_byte_tensor]
return train_valid_dataset, test_valid_dataset
|
3693eb9122baf79e41b404755fb500fcbaed7c6c
| 22,959 |
def skip_punishment(player, table, lied_card=None, turns_to_wait=0):
"""
Function used to punish player with turns to skip.
:param player: Player object
:param table: list with cards on table
:param lied_card: tuple with last lied card
:param turns_to_wait: integer value of take card punishment
:return: tuple with last lied card, integer value of turns to skip
"""
player.turns_to_skip = turns_to_wait - 1
player.print_foo(f'{player.name} will have to skip this and next {player.turns_to_skip} turns.')
turns_to_wait = 0
if lied_card:
table.append(lied_card)
lied_card = None
return lied_card, turns_to_wait
|
a3f141243d78eb1919536c414c278fa1ea92637f
| 22,965 |
def calc_f1(precision: float, recall: float) -> float:
"""
Compute F1 from precision and recall.
"""
return 2 * (precision * recall) / (precision + recall)
|
4a20816b0f5b2457826c146da52e352c39b88b16
| 22,969 |
def selectivity(weights, thresh=0.1):
"""Computes normalized selectivity of a set of `weights`.
This is something like "peakiness" of the distribution.
Currently, this is computed by looking at how many weights
are above the given `thresh`.
The result is normalized by the length of `weights`.
"""
return len([w for w in weights if w > thresh])/float(len(weights))
|
b526070770d42f5923c410f6cc4087f858d7c265
| 22,970 |
def subone(bv):
"""
Subtract one bit from a bit vector
>>> print subone(BitVector(bitstring='1111'))
1110
>>> print subone(BitVector(bitstring='0010'))
0001
>>> print subone(BitVector(bitstring='0000'))
1111
@param bv: Bits to add one bit to the right side
@type bv: BitVector
@rtype: BitVector
"""
new = bv
r = range(1,len(bv)+1)
for i in r:
index = len(bv)-i
if 1==bv[index]:
new[index]=0
break
new[index]=1
return new
|
6f30489efb76dd27f8e7ca09003a97278d497c90
| 22,974 |
def ceiling_root(num, pwr):
"""
Returns the integer ``num ** (1. / pwr)`` if num is a perfect square/cube/etc,
or the integer ceiling of this value, if it's not.
"""
res = num ** (1. / pwr)
int_res = int(round(res))
if int_res ** pwr == num:
return int_res
else:
return int(res + 1)
|
3f36275e834ae32ef1bcae0cecc7b733d2e54a69
| 22,977 |
def _parse_fingerprint_terraform(line, host=None):
"""Parse SSH host fingerprint from terraform output line"""
fingerprint = None
if line.find('(remote-exec)') > 0:
host = line.split(' ')[0].split('.')[-1]
fingerprint = line.split(': ', 2)[1]
return host, fingerprint
|
ef57e8c0a505af88e583eb913c3049448bc5077e
| 22,979 |
def split_host_port(host_port):
"""Return a tuple containing (host, port) of a string possibly
containing both. If there is no port in host_port, the port
will be None.
Supports the following:
- hostnames
- ipv4 addresses
- ipv6 addresses
with or without ports. There is no validation of either the
host or port.
"""
colon_count = host_port.count(':')
if colon_count == 0:
# hostname or ipv4 address without port
return host_port, None
elif colon_count == 1:
# hostname or ipv4 address with port
return host_port.split(':', 1)
elif colon_count >= 2:
# ipv6 address, must be bracketed if it has a port at the end, i.e. [ADDR]:PORT
if ']:' in host_port:
host, port = host_port.split(']:', 1)
if host[0] == '[':
# for valid addresses, should always be true
host = host[1:]
return host, port
else:
# no port; may still be bracketed
host = host_port
if host[0] == '[':
host = host[1:]
if host[-1] == ']':
host = host[:-1]
return host, None
|
89fd98aee3a07406c478eca82922bdecf5cb7078
| 22,982 |
import json
from typing import List
def load_encoding(path: str) -> List[str]:
"""
Load character table from OCR engine configuration
:param path: Path to OCR engine config file.
:return: array containing characters from encoding
"""
with open(path, "r") as f:
engine = json.load(f)
return engine['characters']
|
03d4836f1b4d792f3e7dae17e658df908feb24e3
| 22,986 |
def get_standard_action(params):
"""
Return the action that needs to be executed.
Based on the module parameters specified a given action
needs to be executed. The process to determine this action
can be quite verbose. In order to facilitate the reading
of the modules code, we externalize this decision process.
"""
non_determistic_params = ['dci_login', 'dci_password', 'dci_cs_url',
'dci_client_id', 'dci_api_secret', 'embed',
'mime', 'state', 'where', 'active']
deterministic_params = {k: v for k, v in params.items()
if k not in non_determistic_params}
non_empty_values = [item for item in deterministic_params
if deterministic_params[item] is not None]
if 'state' in params and params['state'] == 'absent':
return 'delete'
elif 'status' in non_empty_values:
return 'status'
elif not non_empty_values:
return 'list'
elif non_empty_values == ['id']:
return 'get'
elif 'id' in non_empty_values:
return 'update'
return 'create'
|
f8d972ee9305d030bdc52cc8bb6f9210e8dac595
| 22,990 |
def _format_lazy(format_string, *args, **kwargs):
"""
Apply str.format() on 'format_string' where format_string, args,
and/or kwargs might be lazy.
"""
return format_string.format(*args, **kwargs)
|
2ae51537ee38af02bcbd1c952d92a702192d5866
| 22,995 |
def isiterable(obj):
"""
Return whether an object is iterable
"""
# see https://docs.python.org/3/library/collections.abc.html#collections.abc.Iterable
try:
iter(obj)
return True
except:
return False
|
6c6cc1af2eccaf8e10809da964271471720abdf4
| 22,997 |
def kel2c(tempK):
"""Convert a temperature in Kelvin to degrees Celsius."""
return tempK - 273.15
|
91604c71fc5d7aaceea1f435ae1e5781d0dce169
| 22,998 |
def aslist(value):
"""
Return a list of strings, separating the input based on newlines.
"""
return list(filter(None, [x.strip() for x in value.splitlines()]))
|
249419c02d92d22d4c7cf0a7788ed09dc3bd19ce
| 23,002 |
import random
def uusi_pakka(pakkojen_lkm=1):
"""Palauttaa korttipakan satunnaisesss järjestyksessä.
Yksittäinen kortti on esimerkiksi (12, '♥')
Parametrit
----------
pakkojen_lkm : int
Kuinka monta 52 kortin pakkaa sekoittaa yhteen. Oletus 1.
Palauttaa
---------
array of (int, str)
"""
maat = ['♠', '♥', '♦', '♣']
arvot = list(range(1, 14))
pakka = pakkojen_lkm * [(arvo, maa) for maa in maat for arvo in arvot] # Kortit ovat järjestyksessä
random.shuffle(pakka) # Sekoita pakka
return pakka
|
4be7987e0e4fe156bfa66305b1a607ce3328e6bd
| 23,004 |
def get_apigateway_profile_groups_from_header(groups_header: str) -> list:
"""
Extracts apigateway consumer groups from header
:param groups_header:
:return:
"""
if groups_header not in (None, ''):
return list(map(str.strip, groups_header.split(',')))
return []
|
bc3f1b49892df78574d471e58b2c8ffb47b2584f
| 23,006 |
def copy_dict(source_dict, diffs):
"""Returns a copy of source_dict, updated with the new key-value pairs in diffs."""
result = dict(source_dict)
result.update(diffs)
return result
|
971ea9e79d5a3b279d69d578464d767988891494
| 23,013 |
def __veja(soup):
"""
Gets the most read news from the Veja page
:param soup: the BeautifulSoup object
:return: a list with the most read news from the Veja Page
"""
news = []
headers = soup.find('section', class_='block most-read dark').find_all('h2')
for h2 in headers:
news.append(dict(title=h2.next.next.next.string,
link=h2.parent['href']))
return news
|
224f2b92711bff9baab8a40f89a2f2f0e9b83880
| 23,027 |
def green(frame):
"""
gets the green channel of the frame
:param frame: the frame
:return: the green channel only (as a grayscale frame)
"""
return frame[:, :, 1]
|
86a26fc3462c69ff2fbec6a3df50527c1c14a9b9
| 23,029 |
def filter_flash_errors(glm_data, LL_coords, UR_coords):
"""
There appears to be dense lines of erroneous flashes around 26 N on
10 Sep & 11 Sep from 1500-2100z. This function will remove these, but is unable
to distinguish if a flash is genuine or erroneous.
Parameters
----------
glm_data : list of str
List of GLM flash latitudes & longitudes
LL_coords : tuple of str
Lower lefthand coordinates of the bounding box contaning the area
of false flashes
UR_coords : tuple of str
Upper righthand coordinates of the bounding box contaning the area
of false flashes
Returns
-------
filtered_flashes : tuple of lists
Filtered GLM flash coordinates. Format: (flash_lons, flash_lats)
"""
filtered_lons = []
filtered_lats = []
lons = glm_data[0]
lats = glm_data[1]
min_lat = LL_coords[1]
max_lat = UR_coords[1]
min_lon = LL_coords[0]
max_lon = UR_coords[0]
for idx, lon in enumerate(lons):
lat = lats[idx]
if ((lat < min_lat or lat > max_lat) or (lon < min_lon or lon > max_lon)):
filtered_lons.append(lon)
filtered_lats.append(lat)
return (filtered_lons, filtered_lats)
|
5ff8ca4dbb82b633e36105527ac7bf18db7b5c94
| 23,032 |
def extract_subtypes(mappings, data_type):
"""Extract subtypes of given data types. e.g: for data type "alert", possible subtypes are "dlp", "policy" etc.
:param mapping_file: Path to JSON mapping file
:param data_type: data type for which subtypes are to be fetched
:return: extracted sub types
"""
taxonomy = mappings["taxonomy"].get(data_type, {})
return [subtype for subtype in taxonomy]
|
82db0ad6c9ac679be806678eaf1b5b21cc9d95c4
| 23,034 |
def load_file(path):
"""Load contents of a file"""
with open(path) as inf:
data = inf.read()
return data
|
9398d26379532d6c3ed407335ade608e9f52d18a
| 23,041 |
import string
import random
def strings(n, chars=string.ascii_letters):
""" Return random string of N characters, sampled at random from `chars`.
"""
return ''.join([random.choice(chars) for i in range(n)])
|
bc7e2cab22b4d0a98b3e93a7199c1ec2b326ee68
| 23,043 |
def search_caches(key, cache_list, raise_error=True):
"""Find UUID if it is in the cache_list dicts
Parameters
----------
key : str
the UUID we're looking for
cache_list : mapping or list of mapping
caches that the objects are stored in (will be searched in order of
the list). Mapping is {uuid: object}
raise_error : bool
whether to raise a KeyError if UUID not found; default True. If
False, object not found returns None
Returns
-------
object or None
the object with the given UUID, or ``None`` if the object is not
found and ``raise_error`` is ``False``.
"""
if key is None:
return None # some objects allow UUID to be None
if not isinstance(cache_list, list):
cache_list = [cache_list]
obj = None
for cache in cache_list:
if key in cache:
obj = cache[key]
break
if obj is None and raise_error:
raise KeyError("Missing key: " + str(key))
return obj
|
e2996e37604e26c937a046dc5f88674060aba738
| 23,045 |
def grayscale(rgb):
"""Converts image to grayscale.
"""
return rgb.dot([0.299, 0.587, 0.114])
|
baf64556a5cdce8ad49023e96271f1358f618db7
| 23,048 |
import re
def get_name_slug(name: str) -> str:
"""Get the stub of the organization's name.
Arguments:
name {str} -- Organization name.
Returns:
str -- Organization name stub.
"""
return '-'.join(re.split(r'\W', name.lower()))
|
ef3fce6346a7aabfcebcc6a6e72d1e718e0ed4d2
| 23,049 |
def is_task_list(fn):
"""Check if a function is a task list.
Return:
boolean: if a function is a task list.
"""
return getattr(fn, '__garcon__', {}).get('list')
|
6546df08c4b6736bc3b08a77e0064191ff64efe7
| 23,052 |
from typing import Dict
def count_vertically_most_common_bits(numbers) -> Dict[int, Dict[str, int]]:
"""
For each position in list of binary numbers count number 0 and number 1.
Result save into nested dictionary.
Eg. [11, 01] -> {0: {'0': 1, '1': 1}, 1: {'0': 0, '1': 2}}
Args:
numbers (list): list of numbers in binary format
Returns:
Dict[int, Dict[str, int]]: keys are numbered positions,
values are dictionaries, with 2 keys ('0' and '1') and counts as values.
"""
bits_in_lines = {}
for digit in [*range(len(numbers[0]))]:
bits_in_lines[digit] = {"0": 0, "1": 0}
for number in numbers:
for i, bit in enumerate(number):
bits_in_lines[i][bit] += 1
return bits_in_lines
|
9b7b1b4ea56091294d49f60212b30d0fd819f2d3
| 23,064 |
def has_permission_to_view(page, user):
"""
Check whether the user has permission to view the page. If the user has
any of the page's permissions, they have permission. If the page has no set
permissions, they have permission.
"""
if page.permissions.count() == 0:
return True
for perm in page.permissions.all():
perm_label = '%s.%s' % (perm.content_type.app_label, perm.codename)
if user.has_perm(perm_label):
return True
return False
|
a61337ac64e7f300a1439fb04c55ab5d89d6303d
| 23,065 |
import torch
def get_v_coords(p_range, v_size, v_indices):
"""
Args:
p_range: [x_min, y_min, z_min, x_max, y_max, z_max]
v_size: [vx, vy, vz]
v_indices : [M, 4] -> [bs, z_i, y_i, x_i]
Returns:
v_coords: [M, 4] -> [bs, x, y, z]
"""
with torch.no_grad():
v_size = torch.tensor(v_size).unsqueeze(0).to(v_indices.device)
min_range = torch.tensor(p_range[0:3]).unsqueeze(0).to(v_indices.device)
v_xyz_idx = v_indices[:, [3, 2, 1]]
v_bs = v_indices[:, [0]].float()
v_xyz = (v_indices[:, [3, 2, 1]].float() + 0.5) * v_size + min_range
v_coords = torch.cat([v_bs, v_xyz], dim = 1)
return v_coords
|
9d2870720f902d343577a85d797b72a277fe7673
| 23,067 |
def alternate(seq):
"""
Splits *seq*, placing alternating values into the returned iterables
"""
return seq[::2], seq[1::2]
|
ef4ed2b352e411f0fb3af1774e066c74f2ff9c28
| 23,069 |
def get_identity_credentials(aip):
"""Returns a dictionary containing a mapping from publickey to identity"""
agent_map = aip.get_agent_identity_to_uuid_mapping()
agent_credential_map = {}
for agent in agent_map:
agent_credential = aip.get_agent_keystore(agent_map[agent]).public
agent_credential_map[agent_credential] = agent
return agent_credential_map
|
3201345db020d3f8e88c83c330900627a0b9bdb4
| 23,071 |
def clut8_rgb565(i):
"""RBG565 CLUT for wasp-os.
This CLUT implements the same palette as :py:meth:`clut8_888` but
outputs RGB565 pixels.
.. note::
This function is unused within this file but needs to be
maintained alongside the reference clut so it is reproduced
here.
:param int i: Index (from 0..255 inclusive) into the CLUT
:return: 16-bit colour in RGB565 format
"""
if i < 216:
rgb565 = (( i % 6) * 0x33) >> 3
rg = i // 6
rgb565 += ((rg % 6) * (0x33 << 3)) & 0x07e0
rgb565 += ((rg // 6) * (0x33 << 8)) & 0xf800
elif i < 252:
i -= 216
rgb565 = (0x7f + (( i % 3) * 0x33)) >> 3
rg = i // 3
rgb565 += ((0x4c << 3) + ((rg % 4) * (0x33 << 3))) & 0x07e0
rgb565 += ((0x7f << 8) + ((rg // 4) * (0x33 << 8))) & 0xf800
else:
i -= 252
gr6 = (0x2c + (0x10 * i)) >> 2
gr5 = gr6 >> 1
rgb565 = (gr5 << 11) + (gr6 << 5) + gr5
return rgb565
|
1f651c381e1995ac80300f60bd686b296abcdb17
| 23,073 |
import threading
import atexit
def call_repeatedly(func, interval, *args, **kwargs):
"""
Call a function at interval
Returns both the thread object and the loop stopper Event.
"""
main_thead = threading.current_thread()
stopped = threading.Event()
def loop():
while not stopped.wait(interval) and main_thead.is_alive(): # the first call is in `interval` secs
func(*args, **kwargs)
return
timer_thread = threading.Thread(target=loop, daemon=True)
timer_thread.start()
atexit.register(stopped.set)
return timer_thread, stopped.set
|
65c33039212463f13dd2f72e0be41f921bf98d0c
| 23,074 |
def _to_space_separated_string(l):
"""
Converts a container to a space-separated string.
INPUT:
- ``l`` -- anything iterable.
OUTPUT:
String.
EXAMPLES::
sage: import sage.geometry.polyhedron.misc as P
sage: P._to_space_separated_string([2,3])
'2 3'
"""
s = '';
for x in l:
if len(s)>0: s += ' '
s += repr(x)
return s
|
1a14fee5bdc86c52c1c9687f922c0d077bf859bb
| 23,079 |
def build_from_clause(sources):
"""Given a list of table names, connects them with JOINs"""
from_clause = [sources[0]]
for join_to in sources[1:]:
from_clause.append('JOIN {} ON ({}. = {}.)'.format(join_to, sources[0],
join_to))
return '\n'.join(from_clause)
|
8d07ad5ae09a75bd6d56467f8e24e86825fa884d
| 23,082 |
def download_boiler(info):
"""
Boiler plate text for On-Demand Info for downloads
:param info: values to insert into the boiler plate
:param info: dict
:return: formatted string
"""
boiler = ('\n==========================================\n'
' {title}\n'
'==========================================\n'
'Total number of ordered scenes downloaded through ESPA order interface order links: {tot_dl}\n'
'Total volume of ordered scenes downloaded (GB): {tot_vol}\n')
return boiler.format(**info)
|
ccdfa8c27590634413a8ff0e9c8572c8f00fbd76
| 23,084 |
from typing import Tuple
def intersectCmp(a: dict, b: dict) -> Tuple[bool, str]:
"""Return true if the common keys have the same values.
E.g.: Taking {"foo": 0, "bla": "test"} and
{"foo": 1, "bar": true, "bla": "test"}, the intersected keys are
"foo" and "bla", and the values are not equal.
If there are no common keys, returns False.
The same comparison applies to nested dictionaries
Args:
a, b: Dictionaries to intersect and compare.
Returns:
A tuple with the comparison result and a diff text, if different.
"""
common_keys = a.keys() & b.keys()
if not common_keys:
return False, "got %s, wanted %s" % (a.keys(), b.keys())
for k in common_keys:
x = a[k]
y = b[k]
if isinstance(x, dict) and isinstance(y, dict):
cmp, diff = intersectCmp(x, y)
if not cmp:
return False, diff
else:
if x != y:
return False, "key %s: %s != %s" % (k, x, y)
return True, ""
|
f03b888d9c0a833e440f7ed4f178edb9aa2da52c
| 23,086 |
def _invalidWin32App(pywinerr):
"""
Determine if a pywintypes.error is telling us that the given process is
'not a valid win32 application', i.e. not a PE format executable.
@param pywinerr: a pywintypes.error instance raised by CreateProcess
@return: a boolean
"""
# Let's do this better in the future, but I have no idea what this error
# is; MSDN doesn't mention it, and there is no symbolic constant in
# win32process module that represents 193.
return pywinerr.args[0] == 193
|
302b32f350267c13ebe992f980b941d1cd638769
| 23,088 |
def illuminant_scotopic_luminance(L_A, CCT):
"""
Returns the approximate scotopic luminance :math:`L_{AS}` of the
illuminant.
Parameters
----------
L_A : numeric
Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`.
CCT : numeric
Correlated color temperature :math:`T_{cp}` of the illuminant.
Returns
-------
numeric
Approximate scotopic luminance :math:`L_{AS}`.
Examples
--------
>>> illuminant_scotopic_luminance(318.31, 6504.0) # doctest: +ELLIPSIS
769.9376286...
"""
CCT = 2.26 * L_A * ((CCT / 4000) - 0.4) ** (1 / 3)
return CCT
|
33f2969722ab34d823d5aa59a7f8e72e2f5c625a
| 23,091 |
def linearization(X, Jfun, P):
"""Transform a covariance matrix via linearization
Arguments:
X: the point to linearize about, (n,) numpy array
Jfun: function which takes the state and returns the (n x n) Jacobian of
the function, f, which we want to transform the covariance by. It
should return an (n x n) matrix
df1dx1 df1dx2 ... df1dxn
df2dx1 df2dx2 ... df2dxn
... ... ... ...
dfndx1 dfndx2 ... dfndxn
P: covariance matrix to transform
Returns:
P_prime: transformed covariance matrix
"""
A = Jfun(X)
P_prime = A.dot(P.dot(A.T))
return P_prime
|
c24c4d0815842cc70ac31f0ba9f505bc0d743036
| 23,097 |
def ensure_bytes(value):
"""Helper function to ensure all inputs are encoded to the proper value utf-8 value regardless of input type"""
if isinstance(value, bytes):
return value
return value.encode('utf-8')
|
12a0f933c0db1d01c8682dc2c8f73db53d816880
| 23,104 |
from typing import Union
from datetime import datetime
def datetime_to_iso_8601(obj: Union[datetime, str, int]) -> str:
"""
Convert a datetime to ISO 8601. For use when serializing the credentials dict.
:param obj: a datetime object
:return: the ISO 8601 representation of the datetime
"""
if isinstance(obj, datetime):
return obj.isoformat()
raise TypeError(f"{type(obj)} is not serializable")
|
13cdd05c9dda84405c650c1275b0bad41f5eddc1
| 23,114 |
def getFootprintByReference(board, reference):
"""
Return a footprint by with given reference
"""
for f in board.GetFootprints():
if f.GetReference() == reference:
return f
raise RuntimeError(f"Footprint with reference '{reference}' not found")
|
e0bf71531834cf6318aaa37e5bc70a4c17c1363b
| 23,116 |
def checkNodeInObstacle(node, img):
"""
To check the color of the image at a particular Node
:param node: node to check
:type node: Node type
:param img: the image to check in
:type img: np.array
:return: Boolean of True or False
:rtype: Boolean
"""
if img[node.y, node.x][0] == 0 and img[node.y, node.x][1] == 0 and img[node.y, node.x][2] == 0:
return True
return False
|
0701fef8bbe1dba57486ea50e0eab22b3aef51cb
| 23,121 |
def reciprocal_rank(predicted_bin_list, k):
""" Reciprocal rank = 1/rank of first 'hit', i.e. first 1 in predicted_bin_list[:k]. If there is no hit,
it is 0."""
predicted_bin_list_k = predicted_bin_list[:k]
# Keep only 1s and 0s, discard 2s (2s are useful only for dcg).
predicted_bin_list_k = [1 if entry>0 else 0 for entry in predicted_bin_list_k]
# Get the index of the first 1
try:
# +1 as index starts with 0.
rr = 1 / (predicted_bin_list_k.index(1) + 1)
return rr
except ValueError:
return 0
|
38f2ad4f40225c7ed75f39f79bc0ff56e4c70862
| 23,122 |
def span(text):
"""
Wraps text around formatting tag
(That's how the web editor handles font sizes for some reason)
"""
return '''<span style="font-size: 16px;">'''+text+"</span>"
|
1c82dda9907879ad5367b8bc281b983b50cef747
| 23,124 |
def seqToGenbankLines(seq):
""" chunk sequence string into lines each with six parts of 10bp, return as a list
>>> seqToGenbankLines("aacacacatggtacacactgactagctagctacgatccagtacgatcgacgtagctatcgatcgatcgatcgactagcta")
['aacacacatg gtacacactg actagctagc tacgatccag tacgatcgac gtagctatcg', 'atcgatcgat cgactagcta']
"""
# first chunk into 10bp parts
parts = [seq[i:i+10] for i in range(0, len(seq), 10)]
# put into lines of 6*10 bp
lines = []
for i in range(0, len(parts), 6):
lines.append(" ".join(parts[i:i+6]))
return lines
|
f0e290cf3d666980edc18acc50523f45ab18e24a
| 23,126 |
import math
def recreate_2DFES(FES, icount, coords, xinc, xmin, xmax, yinc, ymin, ymax, E):
"""
Receive and returns an array that recreates the FES.
Parameters:
-----------
FES : Array of floats
Energy values corresponding to x location on x dimension
icount : Array of integers
Stores number of counts sampled at each location
coord : float
location of walker
xinc : float
increment of grid
xmin : float
minimum value in grid
xmax : float
maximum value in grid
yinc : float
increment of grid
ymin : float
minimum value in grid
ymax : float
maximum value in grid
E : float
Energy value to be stored
Returns:
--------
FES : Array of floats
Energy values corresponding to x location on x dimension
(updated)
icount : Array of integers
Number of counts sampled at each location (updated)
"""
xindex = int(round((round(coords[0],
int(abs(math.log10(xinc)))) +
(0 - xmin)) / xinc))
yindex = int(round((round(coords[1],
int(abs(math.log10(yinc)))) +
(0 - ymin)) / yinc))
if (coords[0] > xmin and coords[0] < xmax and
coords[1] > ymin and coords[1] < ymax):
FES[yindex, xindex] = ((FES[yindex, xindex] *
(icount[yindex, xindex]) + E) /
(icount[yindex, xindex] + 1))
icount[yindex, xindex] = icount[yindex, xindex] + 1
return (FES, icount)
|
ad5f0938903a32c9fdf264a9a42c6d1eb316adf2
| 23,128 |
def _get_from_email_(message: dict) -> str:
"""
Returns the email address of the from message
:param message: a dict that represents a message
:return: an string containing the email or an empty string
"""
if message.get("@msg_from"):
email = message["@msg_from"].get("emailAddress")
else:
email = ""
return email
|
14c364470cb3ad0ed46d3d6e19f81e0e0d7dffd5
| 23,130 |
def get_poem_title(poem_container_soup):
"""Read in a soup object containing a poem and return the poem's title"""
poem_title = ""
title_soup = poem_container_soup.findAll("span", { "class" : "mw-headline" } )[0]
title = ''.join(title_soup.findAll(text=True))
return title
|
e6fff5425d052c09dead9d1865f5c3652c0c8f6b
| 23,131 |
from typing import List
def get_task_names(path: str, use_compound_names: bool = False) -> List[str]:
"""
Gets the task names from a data CSV file.
:param path: Path to a CSV file.
:param use_compound_names: Whether file has compound names in addition to smiles strings.
:return: A list of task names.
"""
index = 2 if use_compound_names else 1
with open(path) as f:
task_names = f.readline().strip().split(',')[index:]
return task_names
|
f97af21dbb2f8cdeb6e24de25f529ad875310135
| 23,132 |
from typing import List
def is_doubled(arr: List[str]) -> bool:
"""
Checks whether a segment array of strings is doubled. That is,
the first half contains the same elements as the second half.
:param arr: List of strings.
:return: True if array is doubled, False otherwise.
"""
if len(arr) % 2 != 0:
return False
first = 0
second = int(len(arr) / 2)
while second < len(arr):
if arr[first] != arr[second]:
return False
first += 1
second += 1
return True
|
3e13dc4c035fa31136e20f30e9c3913c10f90a26
| 23,135 |
def get_weight_op(weight_schedule):
"""Returns a function for creating an iteration dependent loss weight op."""
return lambda iterations: weight_schedule(iterations)
|
c3f4a01159a6a4b3ed309bf094b1821a542ada32
| 23,137 |
def get_average_quality(qualities):
""" Calculates average quality as Phred quality score
Parameters
----------
qualities: str
Read qualities for a certain position
Returns
-------
float
Average quality
"""
sum_quality = 0
for q in qualities:
sum_quality += 1 - 10 **-((ord(q) - 33) / 10.0)
return sum_quality / len(qualities)
|
821f4353d7371ba8a2f1e4f749978a75cb6bd851
| 23,139 |
def calc_lcoe_om(fom, vom, cf=1):
"""
:param fom: Fixed operation and maintentance costs as CURR/KWY
:param vom: Variable cost in the form of CURR/ KWH
:param cf: Capacity factor assumed for the plant, default is 1
:return: LCOE O&M component in CURR per KWh
"""
fixed = fom / (cf * 8600)
om_lcoe = fixed + vom
return om_lcoe
|
b847a8cdbfbffb4d8270fea7703cf3250cbff491
| 23,141 |
def logistic_rhs(t, x, r=2., k=2.):
"""
RHS evaluation of logistic ODE,
returns
f(t, x) = r * x * (1 - x/k)
"""
return r * x * (1. - x / k)
|
eb65d625a3ae1b544032e6f0efe0eb500aa256cf
| 23,146 |
from datetime import datetime
def createTrunk(name):
"""
Creates a trunk name for data in data_vault corresponding
to the current date.
Arguments:
name (str) : the name of the client.
Returns:
(*str) : the trunk to create in data_vault.
"""
date = datetime.now()
trunk1 = '{0:d}_{1:02d}_{2:02d}'.format(date.year, date.month, date.day)
trunk2 = '{0:s}_{1:02d}:{2:02d}'.format(name, date.hour, date.minute)
return ['', str(date.year), '{:02d}'.format(date.month), trunk1, trunk2]
|
c4b1967468159cc13a551afcb142b05a510174ad
| 23,153 |
import re
def remove_state_keys(state, keys_regex):
"""Remove keys from state that match a regex"""
regex = re.compile(keys_regex)
return {k: v for k, v in state.items() if not regex.findall(k)}
|
d1ec89b5da23f866cb8bec8d03a14cc2deba8b5f
| 23,160 |
def get_value_assigned_to_user(user_data, key):
"""
Try to retrieve (attribute) value assigned to an user.
In practise it will be certificate or key. KeyError will be raised in case
of improper payload format or when the attribute for given key does not
exist.
"""
d = user_data["user"]
return d[key]
|
f6e5155612e048ae3d52aa6759e96bb823ebbaca
| 23,163 |
def get_end_year(season):
"""
Returns the end year given a season in YYYY-YY format
"""
second_part = season.split("-")[1]
first_part = "20" if second_part == "00" else season.split("-")[0][:2]
year = f"{first_part}{second_part}"
return int(year)
|
13b1094c47bbfdd388a2df627f3565bc3c326a04
| 23,173 |
def format_name(name):
"""Remove non alphanumeric/whitespace characers from user input or
restaurant data
"""
return ''.join(chr for chr in name if chr.isalnum() or chr.isspace())
|
edbeaa643f1b721287aa722cd491d61a53bf7dff
| 23,175 |
def emulator_default_visibility(emulator):
"""The default visibility for this emulator."""
return emulator["default_visibility"]
|
d279920f4c401b8bf68bf1224432badec4658ebe
| 23,179 |
def is_even(n_digits) -> bool:
"""
Check if the number is even or not
:param n_digits:
:return: Bool
"""
if n_digits % 2 == 0:
return True
return False
|
3ada9b181ab3283fd8d5c3a67fde5004129cfc4c
| 23,184 |
def predict_with_neuron(model_class, X_train, y_train, X_valid,
lrate, epochs, on_epoch_end_callback=None):
"""
Args:
X_train (np.ndarray of shape (N, m): Features of the training set
y_train (np.ndarray of shape (N,): Target values of the training set
X_valid (np.ndarray of shape (V, m): Features of the validation set
lrate (float): learning rate
epochs (int): number of epochs
on_epoch_end_callback (function): A function that accepts as an
argument an integer (the index of current iterator) and a
LinearNeuron object. This function can be used as a callback in
order to perform some action at the end of every epoch
Returns:
The predictions of the trained neuron for X_valid and a np.ndarray
vector with the parameters of the trained neuron.
"""
dims = X_train.shape[1]
model = model_class(dims)
model.fit(X_train, y_train, lrate, epochs, on_epoch_end_callback)
predictions = model.predict(X_valid)
parameters = model.theta
return predictions, parameters
|
7c7b36474e46b4b4b8ee152f97e82b2e8e4770e9
| 23,185 |
def get_namespace(node):
"""Return the namespace of the given node
If the node has not namespace (only root), ":" is returned.
Else the namespace is returned
:param node: the node to query
:type node: str
:returns: The top level namespace.
:rtype: str
:raises: None
"""
ns = node.rpartition('|')[2].rpartition(':')[0]
return ns or ':'
|
a608866c712f3d190cece4f3fd5ebd5cfda040b3
| 23,188 |
def diag_line(ax, **linekwds):
""" Draw a diagonal line x=y"""
linekwds.setdefault('ls', ':') #Dotted ...
linekwds.setdefault('color', 'k') #black
linekwds.setdefault('linewidth', 1)
# Bisecting line
ax.plot(ax.get_xlim(),
ax.get_ylim(),
**linekwds)
return ax
|
0f333533f788d96a1dd845de7b1af8ee632584fb
| 23,195 |
def split_sentence(sentence: str) -> list:
"""
Takes a sentence in IPA and parses it to individual words by breaking according to
the " # " IPA string pattern.
:sentence: sentence to parse
:returns: list of individual words
:rtype: list
"""
words = sentence.split(" # ")
return words
|
71df9a977c16fab57c373b5772e62d3760f51d15
| 23,196 |
def merge_arg(cmd_arg, ini_arg):
"""
Merge command line argument and configure file argument.
The cmd_args has higher priority than ini_arg.
Only none-empty argument will be considered.
"""
if isinstance(cmd_arg, (list, tuple)):
cmd = cmd_arg[0]
return cmd if cmd else ini_arg
else:
return cmd_arg if cmd_arg else ini_arg
|
f896463fad7a00096e9a1e3b730ad96614a6e966
| 23,199 |
def print_same_line(s: str, fill_num_chars: int, done: bool = False) -> int:
"""A helper to repeatedly print to the same line.
Args:
s: The text to be printed.
fill_num_chars: This should be `0` on the first call to
print_same_line() for a series of prints to the same output line. Then
it should be the return value of the previous call to
print_same_line() repeatedly until `done` is True, at which time the
cursor will be moved to the next output line.
done: On the final call to print_same_line() for a given line of output,
pass `True` to have the cursor move to the next line.
Returns:
The number of characters that were written, which should be passed as
`fill_num_chars` on the next call. At the end of printing over the same
line, finish by calling with `done` set to true, which will move to the
next line."""
s += " " * (fill_num_chars - len(s))
if not done:
print("\r" + s, end="")
else:
print("\r" + s)
return len(s)
|
e9926b538473dbfba11fab0d121b58dd845e5d4c
| 23,204 |
def load_config(config, parser):
"""Load configuration settings from the configuration file"""
for name, value in parser.items('config'):
config[name] = value
return config
|
06ac67d0045417cc1c27b6678fd63cd581454f07
| 23,206 |
def strip_control(in_bytes: bytes) -> str:
"""Strip control characters from byte string"""
return in_bytes.strip(b"\x00").decode()
|
4cefa25b58e8ba68a20aca3c10ecc8aebb2697a0
| 23,209 |
def _get_line_element_value(element, line, current_exception):
"""
Given an element to search for in a line of text,
return the element's value if found.
Otherwise, raise the appropriate exception.
"""
if element in line:
return line[line.rfind('>')+1:]
else:
raise current_exception('Couldn\'t find ' + element + ' in '
+ line)
|
6843472532300410f8d35011600e2ca1c1522f73
| 23,212 |
import re
def search(regex, fullpath):
"""
Return True if and only if the given regex matches any line of the given file.
"""
p = re.compile(regex)
for line in open(fullpath):
if p.search(line):
return True
return False
|
cde96d6cb976d25aca953467abe4420030fecc65
| 23,214 |
def get_line_count(filename):
"""
count number of lines in file.
taken from
https://stackoverflow.com/a/27518377
:param filename: file name
:return: number of lines in file
"""
def _make_gen(reader):
b = reader(1024 * 1024)
while b:
yield b
b = reader(1024 * 1024)
f = open(filename, 'rb')
f_gen = _make_gen(f.raw.read)
return sum(buf.count(b'\n') for buf in f_gen)
|
c9222f24e22bc61f0fefcfcc3eb367bb96ed5d96
| 23,219 |
def read_lines_from_text_file(file_path):
"""Read lines from a text file."""
with open(file_path) as f:
lines = [line.strip() for line in f.readlines()]
return lines
|
95a1592a20d4e83a62def2f8aa8f20633e1024a6
| 23,223 |
def match(tokens, rule):
"""Checks if a token stream matches a rule.
Expects the rule part of the rule tuple (rule[1]).
Returns 0 if it doesn't match, 1 if it matches the begin, and 2 if it
matches entirely.
"""
for r in rule:
if len(tokens) > len(r):
continue
for i in range(len(tokens)):
if not tokens[i] or tokens[i].bnf() != r[i]:
break
else: # executed if the loop ends without break
return 2 if len(tokens) == len(r) else 1
return 0
|
9eb9dccddaf31017388fc1e93400349b5f0d2fa0
| 23,225 |
def pyramidal_nums(n):
"""
Returns a list of all pyramidal numbers less than or equal to n
A pyramidal number is defined as: f(num) = (num**3 - num) / 6
"""
res = [1]
for i in range(3, n):
p = (i**3 - i) // 6
if p < n:
res.append(p)
else:
return res
|
2a4c2625014ea0f19d99cef9fc97ccafaf91eff7
| 23,229 |
def get_directions(filename):
"""gets the directions from the file
Args:
filename: name of the file
Returns:
a list containing lists of the directions eg. return[0] is the list of
directions from the first line of the file. return[1] is the list of
directions from the second line of the file.
"""
text_file = open(filename, "r")
input_strs = text_file.readlines()
text_file.close()
directions = []
for input_str in input_strs:
directions.append(input_str.split(','))
return directions
|
8e904e405e47b645ffdc444a2a2bd970a7a84e3e
| 23,231 |
def _offset_to_tzname(offset):
"""
Converts an offset in minutes to an RFC 3339 "time-offset" string.
>>> _offset_to_tzname(0)
'+00:00'
>>> _offset_to_tzname(-1)
'-00:01'
>>> _offset_to_tzname(-60)
'-01:00'
>>> _offset_to_tzname(-779)
'-12:59'
>>> _offset_to_tzname(1)
'+00:01'
>>> _offset_to_tzname(60)
'+01:00'
>>> _offset_to_tzname(779)
'+12:59'
"""
offset = int(offset)
if offset < 0:
tzsign = '-'
else:
tzsign = '+'
offset = abs(offset)
tzhour = offset / 60
tzmin = offset % 60
return '%s%02d:%02d' % (tzsign, tzhour, tzmin)
|
9e94b2f7ab70a001db2ebcf569c0a8305230a322
| 23,234 |
def is_attendee_or_speaker(user, presentation):
"""
Return True if the user is either a speaker or atendee.
:param user: User instance
:param presentation: Presentation instance
"""
speakers = [x.user for x in presentation.speakers()]
registrants = presentation.proposal.registrants.all()
return user in speakers or user in registrants
|
57a40b65608983fc61b6735b77b4d2f75b8a9d20
| 23,242 |
import re
def display_to_origin(display):
"""
from the display value, an stencila article version,
trim it to be the SWH origin value
e.g. for display value
https://elife.stencila.io/article-30274/v99/
return https://elife.stencila.io/article-30274/
"""
if not display:
return None
match_pattern = re.compile(r"^(https://elife.stencila.io/.*?/).*$")
return match_pattern.sub(r"\1", display)
|
1ff22328590dd2926a83406f82a2fd920b7e2f90
| 23,246 |
def _non_empty_lines(output):
"""Helper to turn a string into a list of not
empty lines and returns it.
"""
return [line for line in
output.splitlines() if line.strip()]
|
3034775bb1d629321f13417b2f843986dcdb6408
| 23,247 |
def _prefix_expand(prefix):
"""Expand the prefix into values for checksum computation."""
retval = bytearray(ord(x) & 0x1f for x in prefix)
# Append null separator
retval.append(0)
return retval
|
463930264c3ada545ce03e35e8e2502caf9348c9
| 23,248 |
def get_branch(g, node, visited=None):
"""Return the full list of nodes that branch *exclusively*
from the given node. The starting node is included in
the list.
"""
if visited is None:
visited = set()
visited.add(node)
branch = [node]
for succ in g.successors(node):
for p in g.predecessors(succ):
if p not in visited:
break
else:
branch.extend(get_branch(g, succ, visited))
return branch
|
21fde89ed3cc5eb9d4883e68e759a38386e32fce
| 23,249 |
import re
def smart_truncate(text, length=100, suffix='...'):
"""Truncates `text`, on a word boundary, as close to
the target length it can come.
"""
slen = len(suffix)
pattern = r'^(.{0,%d}\S)\s+\S+' % (length - slen - 1)
if len(text) > length:
match = re.match(pattern, text)
if match:
length0 = match.end(0)
length1 = match.end(1)
if abs(length0 + slen - length) < abs(length1 + slen - length):
return match.group(0) + suffix
else:
return match.group(1) + suffix
return text
|
3cce932b3a4e32c3aa83ddddc61f4dd660b333e1
| 23,250 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.