content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def _validate_structure(structure):
"""Validates the structure of the given observables collection.
The collection must either be a dict, or a (list or tuple) of dicts.
Args:
structure: A candidate collection of observables.
Returns:
A boolean that is `True` if `structure` is either a list or a tuple, or
`False` otherwise.
Raises:
ValueError: If `structure` is neither a dict nor a (list or tuple) of dicts.
"""
is_nested = isinstance(structure, (list, tuple))
if is_nested:
is_valid = all(isinstance(obj, dict) for obj in structure)
else:
is_valid = isinstance(structure, dict)
if not is_valid:
raise ValueError(
'`observables` should be a dict, or a (list or tuple) of dicts'
': got {}'.format(structure))
return is_nested
|
e65f1b0e5a21282cf6e08a80a76ef5dd9a2176be
| 33,035 |
import re
import functools
def rename_column_headers(df, pattern,
repl):
"""Renames column headers of dataframe by regex replacement.
Args:
df: A pandas dataframe.
pattern: Regex pattern to replace.
repl: Replacement string to insert where old string was.
Returns:
Dataframe with renamed columns.
"""
def _regex_replace_fn(x, pattern, repl):
return re.sub(pattern=pattern, repl=repl, string=x)
return df.rename(
columns=functools.partial(_regex_replace_fn, pattern=pattern, repl=repl))
|
0255b5f37e42c54ca2822789a3f144511ab8f376
| 33,037 |
def way_roy(routing, start, end):
"""Return the route from the start to the end as a list for the Roy-Warshall algorithm"""
route = []
current_node = start
while current_node != end:
route.append(current_node)
current_node = routing[current_node][end] # Follow the routing matrix
route.append(end)
return route
|
26070d5d9b9d599586ebe2c6cc447fb804d764ea
| 33,039 |
def extract_topn_from_vector(feature_names, sorted_items, topn=10):
"""get the feature names and tf-idf score of top n items"""
top_items = sorted_items[:topn] if topn > 0 else sorted_items
return {feature_names[idx]: sc for idx, sc in top_items}
|
9d6f1f8e64ccf0c8143e74e58814e00a3455dcac
| 33,040 |
def format_program_client_stats(row, prefix):
"""
Given a program in the facility DF (specified by string prefix),
format the client stats (gender, pets, ADA, EMS calls/visits).
Parameters:
===========
row: pandas.Series
The row of the df to format
prefix: str
The prefix for all the stats entries (e.g., 'trailers_', 'isolation_', etc)
Returns
=======
An HTML string of the formatted client stats.
"""
men = row[prefix + "MALE"] + row[prefix + "TRANSGENDER_F_TO_M"]
women = row[prefix + "FEMALE"] + row[prefix + "TRANSGENDER_M_TO_F"]
nonbinary = (
row[prefix + "DECLINED"] + row[prefix + "OTHER"] + row[prefix + "UNDEFINED"]
)
pets = row[prefix + "totalPets"]
ada = row[prefix + "totalAda"]
ems_calls = row[prefix + "EMS_CALL"]
ems_visits = row[prefix + "EMS_VISIT"]
return f"""
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
Women: {women}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
Men: {men}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
Nonbinary/other/declined: {nonbinary}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
Pets: {pets}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
Clients with ADA needs: {ada}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
EMS calls (last 24 hours): {ems_calls}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
EMS visits (last 24 hours): {ems_visits}
</p>
"""
|
7aaa67e293d818dc9bccd3c117e17fb16f8a7716
| 33,042 |
def observer(*fields):
""" Observer decorator
The `observer` decorator takes `*args` which represent django
field names that should be observed for mutations.
The `ObserverMixin` is responsible for monitoring the fields
for mutation & acting on it but the decorator takes the list
of fields to observe & adds them to the wrapped function as
a private `_observed_fields` property.
"""
def observer_wrapper(func):
""" Add the hidden property with the fields to observe """
assert func.__name__.startswith('_observe_'), \
'Observed method names must begin with "_observer_" not %s' % func.__name__
# pylint: disable=protected-access
func._observed_fields = fields
return func
return observer_wrapper
|
7e49023470e677a3c56ae293daf47c628216c023
| 33,048 |
from typing import Tuple
def time_units(total_seconds: int) -> Tuple[int, int, int]:
"""Convert a given number of seconds to hours, minutes and seconds.
Parameters
----------
total_seconds : int
Total number of seconds to convert.
Returns
-------
int, int, int
Three integers representing the resultant seconds, minutes and hour of the conversion
"""
hours = total_seconds // 3600
minutes = (total_seconds // 60) % 60
seconds = total_seconds % 60
return seconds, minutes, hours
|
09ce9f37d218288eb8061a7dd88e6898480ea8c4
| 33,049 |
def row_count(filename):
""" Counts the rows in a given file """
count = 0
with open(filename, 'r') as ofp:
for _ in ofp:
count = count + 1
# Remove header row from count
count = count - 1 if count > 0 else count
return count
|
35ab79abd4548277ac9d372605cfaca8164202c1
| 33,052 |
def sample_array_str(self):
"""
Create a human readable string for a SampleArray.
:param self:
:return: str
"""
return f"SampleArray(name={self.name}, container_type={self.container_type}, contents={self.contents})"
|
90248d3fc14af0a644b18f1416f205957aa65145
| 33,054 |
from typing import Any
def ensure_field(dictlike: dict, fieldname: str) -> Any:
"""Ensure the required field is found in the data structure."""
sentinel = object()
value = dictlike.get(fieldname, sentinel)
if value is sentinel:
raise UserWarning('{!r} is a required field'.format(fieldname))
return value
|
424e41e16581c60da0bdb99f25fb2d75f082d62b
| 33,055 |
import torch
def prepare_gt(input_img, gt_bboxes, gt_classes):
"""
args:
- input_img: PIL image HxW
- gt_bboxes - list of bounding boxes
- gt_classes - list of category ids
return:
gt[0] = tensor of bboxes of objects in image scaled [0,1], in (CENTER, w, h) format
gt[1] = tensor of class ids in image
"""
gt = [torch.FloatTensor(gt_bboxes), torch.IntTensor(gt_classes)]
height, width, _ = input_img.shape
for idx, bbox in enumerate(gt[0]):
new_bbox = [0] * 4
new_bbox[0] = (bbox[0] + (bbox[2] / 2)) / width
new_bbox[1] = (bbox[1] + (bbox[3] / 2)) / height
new_bbox[2] = bbox[2] / width
new_bbox[3] = bbox[3] / height
gt[0][idx] = torch.FloatTensor(new_bbox)
return gt
|
3ce26456cacb724058d5268134c3b351d4349423
| 33,059 |
def iter_to_dict_frequency(iterable):
"""
creates a dictionary form a list and the values
are the counts of each unique item
"""
dict_frequency = {}
for item in iterable:
dict_frequency.setdefault(item, 0)
dict_frequency[item]+=1
return dict_frequency
|
266d6d1520151c1c2bda297578200021c28da70d
| 33,061 |
def parsedcommand(obj):
"""
Decorator to flag documented expression commands available to users.
Used with the usage() method.
"""
obj.parsedcommand = True
return obj
|
6bd6e06c61cd2f6443bfc9bf4e176c83691eae46
| 33,064 |
def convert_nothing(x):
""" useful utility for not converting anything"""
return x
|
7e900dedbe20cd154bea977bd3a887cda478735f
| 33,069 |
import requests
def get_registered_mars(management_url: str) -> dict:
"""
View all registered mar files.
Parameters:
management_url (str): TorchServe management url
Return:
registered_mars (dict)
"""
management_url = management_url.rstrip("/")
registered_mars_url = f"{management_url}/models"
res = requests.get(registered_mars_url)
return res.json()
|
b5033339c744aafe2a0a5e836962c0d91db6b224
| 33,070 |
import ast
def string_to_dict(string):
"""Function to convert json strings to dictionary"""
return ast.literal_eval(string)
|
c7ab9907e213bb3154350397c2e96b4d20edc293
| 33,074 |
def get_adverse_outcome_lookahead_label_key(time_window_hours: int) -> str:
"""Returns the lookahead label key for the provided time window in hours."""
return f"adverse_outcome_within_{time_window_hours}h"
|
a65b4857d4a3ec1c1cd86fdceddfabd3fcfb5ec4
| 33,076 |
def kill_min_items(classes):
"""Kill all references to min_items=1."""
# NOTE! This changes all constr list to normal List
for i, c in enumerate(classes):
for j, line in enumerate(c.lines):
c.lines[j] = line.replace(', min_items=1', '')
classes[i] = c
return classes
|
27a385b2c78200158f03f91b90577bfdd94feb9c
| 33,079 |
def _chunks(l, chunk_size):
"""return chunks of chunk_size for list l
>>> list(chunks([0, 1, 2, 3, 4], 2))
[[0, 1], [2, 3], [4]]
"""
return (l[i:i + chunk_size] for i in range(0, len(l), chunk_size))
|
2d45088536810dc0f52b38a3d2c2b86414daf862
| 33,080 |
import hashlib
def hash_color(input_string):
"""
Derives a hex color code from the MD5 hash of an input string.
Returns the resulting hex color code as a string.
"""
digest_chars = list(hashlib.md5(input_string).hexdigest())
color = ""
while len(color) < 6:
color += digest_chars.pop(0)
color += digest_chars.pop(-1)
return color
|
0d04cca8b9491e5e99b05504f27401bd6d62c9b5
| 33,087 |
def check_args(args):
"""Check values of parameters. Returns None, or an error message."""
message = ""
if not 0 <= args.min_netmask <= 32:
message = (
message + "\n Option 'min_netmask' must be an integer between 0 and 32"
)
if not 0.0 <= args.network_discovery_timeout <= 60.0:
message = message + "\n Option 'network_timeout' must be between 0.0 and 60s"
if not 1 <= args.network_discovery_threads <= 32000:
message = message + "\n Option 'threads' must be between 1 and 32000"
if message == "":
return None
return message
|
c48a3eb7a04548d71dc0f968de609a09d692e458
| 33,090 |
def fixedsplit(text, separator=None, maxsplit=-1):
"""Split a string and return a fixed number of parts"""
parts = text.split(separator, maxsplit)
maxparts = maxsplit + 1
missing = maxparts - len(parts)
if missing > 0:
parts = parts + (missing * [""])
return parts
|
fecbb9e2d195a4fe04ccf63d80df872cd82941a5
| 33,092 |
def get_keypress_event_trigger_key(event: str) -> int:
"""Find the key number which triggers particular keypress event."""
comparison_str = "e.which == "
if comparison_str not in event:
return -1
comparison_start = event.index(comparison_str) + len(comparison_str)
comparison_end = event.find(")", comparison_start)
key = int(event[comparison_start:comparison_end])
return key
|
73178e6c30affe62fa77677fe894953b0bb371ea
| 33,095 |
def generate_color_brew(n):
"""
Generates an evenly distributed range
of hue values in the HSV colour scale.
Arguments:
n -- an int with the number of hue values
you want to obtain
Returns:
a python list with those hue values
"""
hue_step = 360 / float(n)
return [color * hue_step / 360.0 for color in range(n)]
|
69bfc64bc209fcb916c77ee6d3681ce0c3ccc815
| 33,096 |
import torch
def downsample(vox_in, n, use_max=True):
"""
Downsample a 3-d tensor n times
Inputs:
- vox_in (Tensor): HxWxD tensor
- n (int): number of times to downsample each dimension
- use_max (bool): use maximum value when downsampling. If set to False
the mean value is used.
Output:
- vox_out (Tensor): (H/n)x(W/n)x(D/n) tensor
"""
dimy = vox_in.size(0) // n
dimx = vox_in.size(1) // n
dimz = vox_in.size(2) // n
vox_out = torch.zeros((dimy, dimx, dimz))
for x in range(dimx):
for y in range(dimy):
for z in range(dimz):
subx = x * n
suby = y * n
subz = z * n
subvox = vox_in[suby : suby + n, subx : subx + n, subz : subz + n]
if use_max:
vox_out[y, x, z] = torch.max(subvox)
else:
vox_out[y, x, z] = torch.mean(subvox)
return vox_out
|
28c28a8e22bae39c8474c8d0e406059a74ec9b60
| 33,100 |
from typing import List
import shlex
import difflib
def diff_commands(cmd1: str, cmd2: str) -> str:
"""Return a unified diff of two shell commands."""
def diffable(cmd: str) -> List[str]:
args = shlex.split(cmd)
pretty = []
i = 0
while i < len(args):
if args[i] in ("-o", "-iquote", "-isystem", "-MF"):
pretty.append(args[i] + " " + args[i + 1])
i += 2
else:
pretty.append(args[i])
i += 1
return pretty
args1 = diffable(cmd1)
args2 = diffable(cmd2)
return "\n".join(tuple(difflib.unified_diff(args1, args2, n=999))[2:])
|
a6366a2bc2841541eebf707b8423bd2d409e7b78
| 33,107 |
def ClampValue(input, min, max):
"""
Clamp Value to min/max
:param input: Input
:param min: Minimum Value
:param max: Maximum Value
:return: Clamped Output
"""
if input > max:
return max
elif input < min:
return min
else:
return input
|
00b63ac1fd6fb009d0f5c3abdee6f387ee21a5da
| 33,111 |
from typing import Dict
from typing import List
def _create_default_qubit_map(n_qubits: int) -> Dict[int, List[int]]:
"""Creates a qubit map that maps each qubit to itself."""
qubit_map = {}
for i in range(n_qubits):
qubit_map[i] = [i, 1]
return qubit_map
|
a12e9d2ed183748b3e92e24385de648a90a51bcb
| 33,113 |
from pathlib import Path
import tempfile
import atexit
import shutil
def create_temp_dir(remove_at_exit: bool = True) -> Path:
"""Create a temporary directory.
Args:
remove_at_exit (bool, optional): If True, delete directory when
application exists. Defaults to True.
Returns:
Path: The path to the created directory.
"""
temp_dir = Path(tempfile.mkdtemp())
if remove_at_exit:
atexit.register(lambda: shutil.rmtree(temp_dir, ignore_errors=True))
return temp_dir
|
76fd070c764f748b48d45c31821bc6049128e8ae
| 33,120 |
import requests
def URL_is_reachable(url, expected_response=200):
"""== Verifies URL Passed in Returns a Given Response Code ==
- Pass in URL, and optionally an expected response code (if something other than ``200`` is expected).
- Returns either ``True`` or ``False``.
== Calling ==
| *Args* | ``url`` (str) | Fully-qualified URL (including protocol). |
| *Args* | ``expected_response`` (int) | _Optional_ return code if other than ``200``. |
| *Returns* | ``boolean`` | Either True or False. |
| *Raises* | exception | Returns ``False`` on exception. |
=== Example in Robot ===
| ``${is_reachable} = URL is Reachable https://www.google.com``
"""
try:
req_return = requests.get(url)
if req_return.status_code == expected_response:
return True
else:
return False
except:
return False
|
9e145e4bcf9313d97c1247fa94f3e1d0469f5ab7
| 33,122 |
import binascii
def parse_object_id(value: str) -> bytes:
"""
Parse an object ID as a 40-byte hexadecimal string, and return a 20-byte
binary value.
"""
try:
binary = binascii.unhexlify(value)
if len(binary) != 20:
raise ValueError()
except ValueError:
raise ValueError("blob ID must be a 40-byte hexadecimal value")
return binary
|
503b0cd21c445d1ce851dbdc8f47b493bc17b1f8
| 33,125 |
def get_pos(tokenlist, pos):
""" filter a list to return the words that match the provided part of speech"""
matching_pos_words = []
i = 0
for (word, pos_tag) in tokenlist:
try:
if pos_tag.startswith(pos):
matching_pos_words.append((word, i))
except:
print('error', word, pos_tag)
i += 1
return matching_pos_words
|
ffc508d95d7430aabf99d10a1903133093aaee22
| 33,131 |
def reverse(xs):
"""Returns a reversed sequence"""
return type(xs)(reversed(xs))
|
0cb99f16b2f46fc3d4b5dc478a45be99d5d7201e
| 33,132 |
def is_int(n):
"""Returns True if 'n' is an integer.
Args:
n (anything): The variable to check.
Returns:
bool: True if it is an integet.
"""
try:
int(n)
return True
except (ValueError, TypeError):
return False
|
1a79e76e120be8cdfb838af4e970441d72c8e932
| 33,136 |
def _full_best_response_policy(br_infoset_dict):
"""Turns a dictionary of best response action selections into a full policy.
Args:
br_infoset_dict: A dictionary mapping information state to a best response
action.
Returns:
A function `state` -> list of (action, prob)
"""
def wrap(state):
infostate_key = state.information_state_string(state.current_player())
br_action = br_infoset_dict[infostate_key]
ap_list = []
for action in state.legal_actions():
ap_list.append((action, 1.0 if action == br_action else 0.0))
return ap_list
return wrap
|
d1e2f5869fb3607aaffe05ff3446568f4665bbda
| 33,139 |
from typing import Optional
def _validate_and_convert_float(value: float) -> Optional[float]:
"""
Validate that a value is a float, or a number that can be converted to a float.
If the value is valid, this method will return the value as float. Otherwise, this
method returns None
:param value: Value to validate and convert
:return: Value as float if value is valid, None otherwise
"""
valid = True
if not isinstance(value, (float, int)):
valid = False
if isinstance(value, bool):
valid = False
if valid:
return float(value)
return None
|
14911cf27623e6ffebc4d93b36761884beb73cfa
| 33,141 |
import torch
def IntTensor(values, device='cuda:0'):
"""
Returns a Tensor of type torch.int containing the given values
Parameters
----------
values : list
the values of the tensor
device : str
the device to store the tensor to
Returns
-------
Tensor
an integer precision tensor
"""
return torch.tensor(values, dtype=torch.int, device=device)
|
bde0853a436969b5d463f3ee28394473fc2b3ab0
| 33,142 |
from typing import Callable
import math
def _geometric_binary_search(
func: Callable[[float], float],
target: float,
iterations: int = 24,
reverse: bool = False
) -> float:
"""Perform a binary search using geometric centers.
Do a binary search to find the value ``n`` that makes the function ``func``
return ``target`` when ``n`` is used as the argument. By default, it is
assumed that smaller values of ``n`` will cause ``func`` to produce smaller
outputs. If smaller values of ``n`` produce larger outputs, set ``reverse``
to True.
This implementation of binary search uses the geometric mean instead of the
arithmetic mean to determine the center of the search space. This is
because the values that are being searched are weighted towards zero.
:param func: A Callable which accepts a float and returns a float. This
must be a one-to-one function.
:param target: A float representing the target output which we are trying
to make func produce.
:param iterations: An integer representing the number of iterations to run
the binary search. The default of 24 should be sufficient for most
applications.
:param reverse: A bool representing the relationship between the input and
output values of func.
:return: A float representing value n which makes the function func produce
target when called as its argument.
"""
lower_bound = 2 ** -iterations
upper_bound = 2 ** iterations
assert lower_bound <= upper_bound
for _ in range(iterations):
guess = math.sqrt(lower_bound * upper_bound)
answer = func(guess)
if (not reverse and answer > target) or (reverse and answer < target):
upper_bound = guess
else:
lower_bound = guess
return math.sqrt(lower_bound * upper_bound)
|
fa61dd9f1129474d43915f47344005a7eda0aa96
| 33,147 |
def convert_index(idx, m, n):
"""
Convert 1D index into 2D
:param idx: 1D index
:type idx: int
:param m: number of rows
:type m: int
:param n: number of columns
:type n: int
:return: 2D index
:rtype: tuple[int]
"""
return idx // n, idx % n
|
8cfa7659dd87c8454b279e96370da5840d807ee9
| 33,148 |
import yaml
def configuration(configuration_path):
"""Load our configuration."""
with open(configuration_path, 'r') as stream:
return yaml.load(stream, Loader=yaml.FullLoader)
|
5fab251f28ca9eca955cd396b92a6c5c7ae8ee97
| 33,149 |
def moving_average(x, n, type='simple'):
""" compute an n period moving average.
type is 'simple' | 'exponential'
"""
if type == 'simple':
ma = x.rolling(window = n, center= False).mean()
else:
ma = x.ewm(span = n).mean()
return ma
|
e391ad258f6caa8cbf743e7d1283eff105c55867
| 33,150 |
def divide(value1: int, value2: int) -> float:
"""
Used to divide the number of cards to check that nothing was lost.
Handles division by 0 by returning 0, which is the reciprocal.
"""
if value1 == value2: # good for 0/0
return 1.0
else:
try:
div_value = value1 / float(value2)
except ZeroDivisionError:
div_value = 0.
return div_value
|
afc70043f3a60d2cbca1fce5bc0f49ac6fbd046c
| 33,154 |
def typed_property(name, expected_type):
"""Common function used to creating arguments with forced type
:param name: name of attribute
:param expected_type: expected type of attribute value
:return: property attribute
"""
private_name = '_' + name
@property
def prop(self):
return getattr(self, private_name)
@prop.setter
def prop(self, value):
if not isinstance(value, expected_type):
raise TypeError('Expected {}'.format(expected_type))
setattr(self, private_name, value)
return prop
|
d1cc4dd8ea01d2d32efeffe28dea3236b8ab30c2
| 33,156 |
def dimension(value, arg):
"""
Dimension integers
If value, append arg, otherwise output nothing
"""
if value:
return str(value) + " " + arg
return ""
|
f16030e793a1dfd336da4a4faeaaa00d20920e64
| 33,158 |
def extract_sub_name(subreddit: str) -> str:
"""Extract the name of the sub without prefix."""
if subreddit.startswith("/r/"):
return subreddit[3:]
if subreddit.startswith("r/"):
return subreddit[2:]
return subreddit
|
bcaab5cc95a09899b0b3059e3c27ef0690d9dadd
| 33,162 |
def calculate_accuracy_overall(actual_labels, predicted_labels):
"""
Calculate accuracy percentage for all labels (classes).
"""
correct = sum(1 for i in range(len(actual_labels)) if actual_labels[i] == predicted_labels[i])
return correct / len(actual_labels) * 100.0
|
07067541fb13ec11b998cbf4776273c05f831264
| 33,163 |
def compare_values(value1, value2, relative, absolute):
"""
Compare two values with respect to a relative and an absolute deviation.
:param value1: First value
:param value2: Second value
:param relative: Relative deviation (0..1)
:param absolute: Absolute deviation (e.g. 1, -5.7, 100)
:return: True is value1 is within valid deviation of value2, False if not
"""
mi = min(value1, value2)
ma = max(value1, value2)
if ((ma * (1 - relative)) - absolute) < mi:
return True
else:
return False
|
9b5e7aa10e18c7b0947f7c3cdec5a8d532c8dea3
| 33,164 |
def iterate_squarings(x, powers_to_calculate):
"""
Repeatedly square x.
The values in the "powers_to_calculate" (an iterator),
which must be increasing, will be returned.
"""
powers_calculated = {}
powers_to_calculate = sorted(powers_to_calculate)
# Repeatedly square x
previous_power = 0
for current_power in powers_to_calculate:
for _ in range(current_power - previous_power):
x = pow(x, 2)
powers_calculated[current_power] = x
previous_power = current_power
return powers_calculated
|
08db56b59d5f5d632b1032fcbac3ef9bf2855ba6
| 33,166 |
import gc
def list_with_attribute(classname, attributename, attributevalue):
"""Gather all instances in specified class with a particular attribute"""
my_list = []
for obj in gc.get_objects():
if isinstance(obj, classname):
if getattr(obj, attributename) == attributevalue:
my_list.append(obj.fname)
return my_list
|
91ca3c2104843d78d4b993a702d2db238b027c8b
| 33,171 |
import unicodedata
def is_fullwidth(char):
"""Check if a character / grapheme sequence is fullwidth."""
return any(
unicodedata.east_asian_width(_c) in ('W', 'F')
for _c in char
)
|
2c264f09c5873fc24f31de842a0191b6829997cc
| 33,174 |
import math
def rise_pres(mach: float, beta:float, gamma=1.4) -> float:
"""
Calculate rise in pressure after oblique shock
Parameters
----------
mach:float
Mach number
beta: float
oblique shock angle
gamma: float
isentropic exponent
Returns
-------
mach2: float
Mach number after an oblique shock
"""
m_sin_2 = mach * mach * math.sin(beta) * math.sin(beta)
return (2.0 * gamma / (gamma + 1.0) * m_sin_2) - ((gamma-1.0) / (gamma + 1.0))
|
0845f9e047b0a372ca5b4fffd96b181d5367c82f
| 33,176 |
def get_name(soup):
"""Extract the name from the given tree."""
return soup.find('h2').getText()
|
46773bb657bffb7624bb82fd43a398f2a72ce24f
| 33,178 |
def version_code_table() -> dict[int, tuple[str]]:
"""A dictionary that contains the code for a specific version.
Returns:
dict[int, tuple[str]]: A dictionary that contains data in the form
(version number: 3-line code tuple)
"""
table = {
7: ('000010', '011110', '100110'),
8: ('010001', '011100', '111000'),
9: ('110111', '011000', '000100'),
10: ('101001', '111110', '000000'),
11: ('001111', '111010', '111100'),
12: ('001101', '100100', '011010'),
13: ('101011', '100000', '100110'),
14: ('110101', '000110', '100010'),
15: ('010011', '000010', '011110'),
16: ('011100', '010001', '011100'),
17: ('111010', '010101', '100000'),
18: ('100100', '110011', '100100'),
19: ('000010', '110111', '011000'),
20: ('000000', '101001', '111110'),
21: ('100110', '101101', '000010'),
22: ('111000', '001011', '000110'),
23: ('011110', '001111', '111010'),
24: ('001101', '001101', '100100'),
25: ('101011', '001001', '011000'),
26: ('110101', '101111', '011100'),
27: ('010011', '101011', '100000'),
28: ('010001', '110101', '000110'),
29: ('110111', '110001', '111010'),
30: ('101001', '010111', '111110'),
31: ('001111', '010011', '000010'),
32: ('101000', '011000', '101101'),
33: ('001110', '011100', '010001'),
34: ('010000', '111010', '010101'),
35: ('110110', '111110', '101001'),
36: ('110100', '100000', '001111'),
37: ('010010', '100100', '110011'),
38: ('001100', '000010', '110111'),
39: ('101010', '000110', '001011'),
40: ('111001', '000100', '010101')
}
return table
|
091685145bb8d7a44ae76c3a2208f29c01581b03
| 33,179 |
import ast
def build_tree(script):
"""Builds an AST from a script."""
return ast.parse(script)
|
a37d24ce3808bfa0af3a5e8e4c1fb8a2d281625e
| 33,184 |
def get_maxima(spectrum):
"""
Crude function that returns maxima in the spectrum.
:param spectrum: tuple of frequency, intensity arrays
:return: a list of (frequency, intensity) tuples for individual maxima.
"""
res = []
for n, val in enumerate(spectrum[1][1:-2]):
index = n+1 # start at spectrum[1][1]
lastvalue = spectrum[1][index-1]
nextvalue = spectrum[1][index+1]
if lastvalue < val and nextvalue < val:
print('MAXIMUM FOUND AT: ')
print((spectrum[0][index], val))
res.append((spectrum[0][index], val))
return res
|
d8fec410b4a959c27ab03d7540790b85a5a1766b
| 33,186 |
def indent(text, indent):
"""Indent Text.
Args:
text(str): body of text
indent(str): characters with which to indent
Returns:
text indeneted with given characters
"""
if '\n' not in text:
return "%s%s" % (indent, text)
else:
text = text.splitlines(True)
return indent.join(text)
|
225575457fe308e3462c0e87181eb88a6e4da939
| 33,187 |
def ukr_E(X, B):
"""UKR reconstruction error."""
E = ((X - B.T.dot(X))**2).sum() / B.shape[0] # (Frobenius norm)^2
return E
|
bd807018e162bdbb99cfe3d297daa0b5272aefa2
| 33,190 |
def min_dist(q, dist):
"""
Returns the node with the smallest distance in q.
Implemented to keep the main algorithm clean.
"""
min_node = None
for node in q:
if min_node == None:
min_node = node
elif dist[node] < dist[min_node]:
min_node = node
return min_node
|
99c4e868748598a44f79ee3cb876d7ebc3abae08
| 33,193 |
def filter_localization_probability(df, threshold=0.75):
"""
Remove rows with a localization probability below 0.75
Return a ``DataFrame`` where the rows with a value < `threshold` (default 0.75) in column 'Localization prob' are removed.
Filters data to remove poorly localized peptides (non Class-I by default).
:param df: Pandas ``DataFrame``
:param threshold: Cut-off below which rows are discarded (default 0.75)
:return: Pandas ``DataFrame``
"""
df = df.copy()
localization_probability_mask = df['Localization prob'].values >= threshold
return df.iloc[localization_probability_mask, :]
|
7036c730db4c0a649aa25bcf59a962f89ce2710c
| 33,195 |
def mean_riders_for_max_station(ridership):
"""
Fill in this function to find the station with the maximum riders on the
first day, then return the mean riders per day for that station. Also
return the mean ridership overall for comparsion.
Hint: NumPy's argmax() function might be useful:
http://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html
"""
overall_mean = ridership.mean()
max_station = ridership[0, :].argmax()
mean_for_max = ridership[:, max_station].mean()
return overall_mean, mean_for_max
|
936f6a8f7ac2c1cebc1c53516963253c9c51ee29
| 33,196 |
def _read_first_line(file_path):
"""
Returns the first line of a file.
"""
with open(file_path, 'r') as file_:
first_line = file_.readline().strip()
return first_line
|
035f22e1e35aa2f945d6ee6d8435d44fee17cc01
| 33,197 |
def _month_year_to_tuple(month_year: str) -> tuple[int, int]: # Month, Year
"""
Parses user-preferred combination of Month/Year string and returns tuple
:param month_year: string Month/Year combination space separated (i.e. 10 2021)
:return: tuple[int, int] Month, Year
"""
m, y = month_year.split(" ")
return int(m), int(y)
|
202d7863767374fc043204be4588443f6296d3e9
| 33,198 |
def _parse_cubehelix_args(argstr):
"""Turn stringified cubehelix params into args/kwargs."""
if argstr.startswith("ch:"):
argstr = argstr[3:]
if argstr.endswith("_r"):
reverse = True
argstr = argstr[:-2]
else:
reverse = False
if not argstr:
return [], {"reverse": reverse}
all_args = argstr.split(",")
args = [float(a.strip(" ")) for a in all_args if "=" not in a]
kwargs = [a.split("=") for a in all_args if "=" in a]
kwargs = {k.strip(" "): float(v.strip(" ")) for k, v in kwargs}
kwarg_map = dict(
s="start", r="rot", g="gamma",
h="hue", l="light", d="dark", # noqa: E741
)
kwargs = {kwarg_map.get(k, k): v for k, v in kwargs.items()}
if reverse:
kwargs["reverse"] = True
return args, kwargs
|
a7c7405a3a668e6b7925aa02a1fafc9e5e774f41
| 33,204 |
import re
def rm_custom_emoji(text):
"""
絵文字IDは読み上げないようにする
:param text: オリジナルのテキスト
:return: 絵文字IDを除去したテキスト
"""
pattern = r'<:[a-zA-Z0-9_]+?:>'
return re.sub(pattern, '', text)
|
ac0a1cfb59868c6bcd96541f5bcc75de966d8a93
| 33,206 |
def open_views_for_file(window, file_name):
"""Return all views for the given file name."""
view = window.find_open_file(file_name)
if view is None:
return []
return [v for v in window.views() if v.buffer_id() == view.buffer_id()]
|
d265e42b99b38606646e0f87b456c8b44d6fcb8c
| 33,207 |
def get_bond_type_counts(bond_types):
""" Returns a count on the number of bonds of each type. """
count = {}
for bt in bond_types:
if bt in count.keys():
count[bt] += 1
else:
count[bt] = 1
return count
|
80835f800de7e7cb59f8c0dd05779ef7fab6eba6
| 33,211 |
def encoded_capacity(wmb, data, num_bits):
"""
Returns the total capacity in bits of the bitmask + data
:param wmb: weight mask bits -- bit vectors to indicate non-zero values in original weight matrix M
:param data: Data corresponding to the non-zero elements of matrix M
:param num_bits: number of bits per data value to use
"""
return (data.size()[0]*num_bits + wmb.size()[0])
|
08442eb84c303962ea538995c6920842aa4ff1b3
| 33,213 |
def raise_power(num, pow):
"""Raise the number to the given power"""
# Negative powers are 1 over the positive version
if pow < 0:
result = raise_power(num, -pow)
return 1 / result
# Base cases
if pow == 0:
return 1
if pow == 1:
return num
# Recurse and multiply until reaching 1
return num * raise_power(num, pow - 1)
|
038efd037e19fa22149d866beccc0b4bdd27d448
| 33,214 |
def partition_train_validation_test(data):
"""
Partition a dataset into a training set, a validation set and a testing set
:param data: input dataset
:return: training, validation and testing set
"""
# 60% : modulus is 0, 1, 2, 3, 4, or 5
data_train = [item for item in data if item['hash'] % 10 <= 5]
# 20% : modulus is 6 or 7
data_valid = [item for item in data if item['hash'] % 10 in [6, 7]]
# 20% : modulus is 8 or 9
data_test = [item for item in data if item['hash'] % 10 in [8, 9]]
return data_train, data_valid, data_test
|
4defd18e0dc5ddeb7309b274b707676516901acb
| 33,218 |
def solution(n: int = 600851475143) -> int:
"""
Returns the largest prime factor of a given number n.
>>> solution(13195)
29
>>> solution(10)
5
>>> solution(17)
17
>>> solution(3.4)
3
>>> solution(0)
Traceback (most recent call last):
...
ValueError: Parameter n must be greater than or equal to one.
>>> solution(-17)
Traceback (most recent call last):
...
ValueError: Parameter n must be greater than or equal to one.
>>> solution([])
Traceback (most recent call last):
...
TypeError: Parameter n must be int or castable to int.
>>> solution("asd")
Traceback (most recent call last):
...
TypeError: Parameter n must be int or castable to int.
"""
try:
n = int(n)
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int.")
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one.")
prime = 1
i = 2
while i * i <= n:
while n % i == 0:
prime = i
n //= i
i += 1
if n > 1:
prime = n
return int(prime)
|
2c081a09422ef35c81a6173df6e5cf032cf425e1
| 33,221 |
def interpolate_line(x1, y1, x2, y2):
"""
This functions accepts two points (passed in as four arguments)
and returns the function of the line which passes through the points.
Args:
x1 (float): x-value of point 1
y1 (float): y-value of point 1
x2 (float): x-value of point 2
y2 (float): y-value of point 2
Returns:
callable: the function of the line
"""
if x1 == x2:
raise ValueError("x1 and x2 must be different values")
def f(x):
slope = (y2 - y1) / (x2 - x1)
return slope * (x - x1) + y1
return f
|
71b26e50fb21f22333df7b20ddacf7bc376789cc
| 33,222 |
def dir_names(dirrepo, panel_id):
"""Defines structure of subdirectories in calibration repository.
"""
dir_panel = '%s/%s' % (dirrepo, panel_id)
dir_offset = '%s/offset' % dir_panel
dir_peds = '%s/pedestals' % dir_panel
dir_plots = '%s/plots' % dir_panel
dir_work = '%s/work' % dir_panel
dir_gain = '%s/gain' % dir_panel
dir_rms = '%s/rms' % dir_panel
dir_status = '%s/status' % dir_panel
return dir_panel, dir_offset, dir_peds, dir_plots, dir_work, dir_gain, dir_rms, dir_status
|
fee6d54795593af4e84497310aae5549d2579c68
| 33,223 |
def is_sequence(item):
"""Whether input item is a tuple or list."""
return isinstance(item, (list, tuple))
|
f00d191f68994043dbab0c483a8af58d102a2a11
| 33,227 |
def remove_at(bar, pos):
"""Remove the NoteContainer after pos in the Bar."""
for i in range(len(bar) - pos):
bar.remove_last_entry()
return bar
|
3814c2d825d5aca8815e62e398a940149f287835
| 33,230 |
def calc_gof(resdf, simvar, datavar):
"""Calculate goodness-of-fit measures for given sim & data vars"""
resdf.loc['error'] = abs(resdf.loc[simvar] - resdf.loc[datavar])
maeom = resdf.loc['error'].mean()/resdf.loc[datavar].mean()
mape = (resdf.loc['error']/resdf.loc[datavar]).mean()
r2 = (resdf.loc[simvar].corr(resdf.loc[datavar])) ** 2
return maeom, mape, r2
|
bf7f290fb6012b09df56c12d137db8914c75a253
| 33,232 |
def fc_params(in_features: int, out_features: int, bias: bool = True):
"""
Return the number of parameters in a linear layer.
Args:
in_features: Size of input vector.
out_features: Size of output vector.
bias: If true count bias too.
Returns:
The number of parameters.
"""
m = out_features + 1 if bias else out_features
return in_features * m
|
0138ae52f101975aba2fd6f38452471a09a2c8e1
| 33,234 |
def _to_float(maybe_float):
"""Safe cast to float."""
try:
return float(maybe_float)
except ValueError:
return maybe_float
|
fa46e030d83dd879b09949d310092cca3c75abca
| 33,237 |
def get_fuel_cost_part2(x1, x2):
"""Get the fuel that would cost move from x1 to x2, when:
Moving from each change of 1 step in horizontal position costs 1 more unit of fuel than the last. This can be easily
computed as a triangular number/binomial coefficient.
"""
steps = abs(x1-x2)
return steps*(steps+1)/2
|
16a975c6ba9bcc9af9a85c857d86e7436b80863b
| 33,238 |
def azimu_half(degrees: float) -> float:
"""
Transform azimuth from 180-360 range to range 0-180.
:param degrees: Degrees in range 0 - 360
:return: Degrees in range 0 - 180
"""
if degrees >= 180:
degrees = degrees - 180
return degrees
|
068f1e059a64f0070d927cee05777a15bc22b01a
| 33,240 |
import torch
def update_affine_param( cur_af, last_af): # A2(A1*x+b1) + b2 = A2A1*x + A2*b1+b2
"""
update the current affine parameter A2 based on last affine parameter A1
A2(A1*x+b1) + b2 = A2A1*x + A2*b1+b2, results in the composed affine parameter A3=(A2A1, A2*b1+b2)
:param cur_af: current affine parameter
:param last_af: last affine parameter
:return: composed affine parameter A3
"""
cur_af = cur_af.view(cur_af.shape[0], 4, 3)
last_af = last_af.view(last_af.shape[0],4,3)
updated_af = torch.zeros_like(cur_af.data).to(cur_af.device)
dim =3
updated_af[:,:3,:] = torch.matmul(cur_af[:,:3,:],last_af[:,:3,:])
updated_af[:,3,:] = cur_af[:,3,:] + torch.squeeze(torch.matmul(cur_af[:,:3,:], torch.transpose(last_af[:,3:,:],1,2)),2)
updated_af = updated_af.contiguous().view(cur_af.shape[0],-1)
return updated_af
|
e9a5127ebec4cb404b419446a17b8e949a4d8c04
| 33,246 |
def is_empty(list):
"""Determine if empty."""
return not list
|
9659f43b2724c1e0f3186be8229e903f3f4fec3c
| 33,247 |
from typing import Dict
from typing import Any
def _make_serving_version(service: str, version: str) -> Dict[str, Any]:
"""Creates description of one serving version in API response."""
return {
'split': {
'allocations': {
version: 1,
}
},
'id': service
}
|
a75fa6bdc03ee67d6f4bb6714d94c489072cfa66
| 33,250 |
def signum(x):
"""Sign of `x`: ``-1 if x<0, 0 if x=0, +1 if x>0``."""
if x < 0:
return -1
elif 0 < x:
return +1
else:
return 0
|
858f49fe9d271b18e3a20d157bc793e613b6d73a
| 33,251 |
def detxy2kxy(xdet, ydet, xstart, ystart, x0, y0, fx, fy, xstep, ystep):
"""
Conversion from detector coordinates (xd, yd) to momentum coordinates (kx, ky).
**Parameters**\n
xdet, ydet: numeric, numeric
Pixel coordinates in the detector coordinate system.
xstart, ystart: numeric, numeric
The starting pixel number in the detector coordinate system
along the x and y axes used in the binning.
x0, y0: numeric, numeric
The center pixel position in binned image used in calibration.
fx, fy: numeric, numeric
Scaling factor along the x and y axes (in binned image).
xstep, ystep: numeric, numeric
Binning step size along x and y directions.
"""
xdet0 = xstart + xstep * x0
ydet0 = ystart + ystep * y0
kx = fx * ((xdet - xdet0) / xstep)
ky = fy * ((ydet - ydet0) / ystep)
return (kx, ky)
|
44d353a5c5b5cabeb5a4b9aba8b4a07fc6a3ac2c
| 33,254 |
def get_item_from_dict_by_key(dict_name,
search_term,
search_in,
return_content_of="item"):
"""
Return all items in a dict with a certain field match.
It will normally return the content of the field
'item' which is expected to contain a Q-item.
It is, however, possible to overwrite the name
of the field whose contents should be returned.
@param dict_name: the dictionary to look in
@pram search_term: the value to match
@param search_in: the field in which to look for matching value
@param return_content_of: the field whose content to return
"""
results = []
matches = [x for x in dict_name if x[search_in] == search_term]
if len(matches) == 0:
return []
else:
for match in matches:
results.append(match[return_content_of])
return results
|
e142ad6a017834bc2e3200d5c5350e2bea06c919
| 33,259 |
from typing import List
import itertools
def permutations(raw: str) -> List[str]:
"""Return a list of all unique permutations of a given input string.
In case of an empty string (`''`) a list with an empty string will be returned (`['']`).
Parameters
----------
raw: str
Input string from which the permutations are being generated
Returns
-------
permutations: List[str]
A list of permutation strings based on the input string
"""
return [*set(''.join(tup) for tup in itertools.permutations(raw))]
|
7de84dde64431722fe25b170c6769e9eabc57858
| 33,268 |
def get_colour(temperature: float) -> tuple[float, float, float, float]:
"""Get colour from temperature.
Args:
temperature (float): Temperature in range [-1, 1]
Return
tuple: RGBA tuple
"""
# blending constant
colour_blend = 0.85
alpha_blend = 0.9
# if temperature < 0, the colour is blue
if temperature < 0:
channel = -temperature * colour_blend + (1 - colour_blend)
alpha = -temperature * alpha_blend + (1 - alpha_blend)
return (0, 0, channel, alpha)
# otherwise, the colour is red
channel = temperature * colour_blend + (1 - colour_blend)
alpha = temperature * alpha_blend + (1 - alpha_blend)
return (channel, 0, 0, alpha)
|
dff04ed5023885b127b44632a2f5d49441e8b68e
| 33,269 |
def has_feature(td, feature):
"""
Checks for feature in DISTRO_FEATURES or IMAGE_FEATURES.
"""
if (feature in td.get('DISTRO_FEATURES', '') or
feature in td.get('IMAGE_FEATURES', '')):
return True
return False
|
b438b3b84a134c0fbe0c38decd8caedbd1e1087c
| 33,272 |
def is_xml_file(f):
"""Tries to guess if the file is a BNC xml file"""
f.seek(0)
line = f.readline().strip()
while line == '':
line = f.readline().strip()
if line.find('<bncDoc ') != -1:
return True
else:
return False
|
e54318fc5679cc0709db0924f0444be347e7089a
| 33,273 |
def possible_negation_suffix(text: str) -> bool:
"""
Checks if the texts contains a possible negation suffix
:param text: string containing a token
:return: True if the texts ends with a possible negation suffix, False if not
"""
suffixes = ("less",)
# length is mentioned so it doesn't consider "less" as containing the suffix
return text.endswith(suffixes) and len(text) >= 5
|
0cb8c2f81d29e6b836c2b4a2da2198613bb894cd
| 33,275 |
def count_object_methods(obj:object):
"""
get the number of callable object methods
Args:
obj (object): any object
Returns:
int: the number of object class methods
"""
return len([k for k,v in obj.__dict__.items() if callable(v)])
|
20fa1375441b30e119181b25c4d001604d5a6796
| 33,277 |
def t(s):
"""Force Windows line endings to Unix line endings."""
return s.replace("\r\n", "\n")
|
399ee6bccb2207afb79a0a8f74330be541baea6e
| 33,278 |
from typing import OrderedDict
def linear_set_generator(random, args):
"""
Generates a list continuous values of the size of a representation.
This function requires that a bounder is defined on the EvolutionaryAlgorithm.
See Also
--------
inspyred.ec
Parameters
----------
random : Random
args : dict
representation: set containing the possible values
max_candidate_size: int, default: 9
variable_candidate_size: bool, default: True
Returns
-------
list
A list containing tuples - sample of the elements and linear value.
If variable_candidate_size is True the list size is up to max_candidate_size,
otherwise the candidate size equals candidate_size
"""
bounder = args.get("_ec").bounder
representation = args.get('representation')
max_size = args.get('max_size', 9)
variable_size = args.get('variable_size', True)
if variable_size:
size = random.randint(1, max_size)
else:
size = max_size
indices = random.sample(range(len(representation)), size)
values = random.uniform(next(bounder.lower_bound), next(bounder.upper_bound), len(indices))
return OrderedDict({i: v for i, v in zip(indices, values)})
|
39fef7e79d83d6c281e290e4387829d6f3343410
| 33,280 |
import textwrap
def reindent(content, indent):
"""
Reindent a string to the given number of spaces.
"""
content = textwrap.dedent(content)
lines = []
for line in content.split('\n'):
lines.append(indent + line)
return '\n'.join(lines)
|
b93b59a286e65eff286492315ad35f73ea86a1bd
| 33,282 |
import six
def construct_getatt(node):
"""
Reconstruct !GetAtt into a list
"""
if isinstance(node.value, six.text_type):
return node.value.split(".", 1)
elif isinstance(node.value, list):
return [s.value for s in node.value]
else:
raise ValueError("Unexpected node type: {}".format(type(node.value)))
|
180d3f6ffba403213daf7b61b0381bcf758591df
| 33,285 |
def distanceSquared(a,b):
"""Squared L2 distance"""
if len(a)!=len(b): raise RuntimeError('Vector dimensions not equal')
sum=0
for i in range(len(a)):
sum = sum + (a[i]-b[i])*(a[i]-b[i])
return sum
|
c1875902fba462d2c9a822d66acb1dc2be27c7b8
| 33,287 |
def flatten_nested_covariates(covariate_definitions):
"""
Some covariates (e.g `categorised_as`) can define their own internal
covariates which are used for calculating the column value but don't appear
in the final output. Here we pull all these internal covariates out (which
may be recursively nested) and assemble a flat list of covariates, adding a
`hidden` flag to their arguments to indicate whether or not they belong in
the final output
We also check for any name clashes among covariates. (In future we could
rewrite the names of internal covariates to avoid this but for now we just
throw an error.)
"""
flattened = {}
hidden = set()
items = list(covariate_definitions.items())
while items:
name, (query_type, query_args) = items.pop(0)
if "extra_columns" in query_args:
query_args = query_args.copy()
# Pull out the extra columns
extra_columns = query_args.pop("extra_columns")
# Stick the query back on the stack
items.insert(0, (name, (query_type, query_args)))
# Mark the extra columns as hidden
hidden.update(extra_columns.keys())
# Add them to the start of the list of items to be processed
items[:0] = extra_columns.items()
else:
if name in flattened:
raise ValueError(f"Duplicate columns named '{name}'")
flattened[name] = (query_type, query_args)
for name, (query_type, query_args) in flattened.items():
query_args["hidden"] = name in hidden
return flattened
|
8f9d92ba1c7baa1a0d6c7153d2bf2b577d49ff70
| 33,288 |
def get_field_name(data, original_key, alternative_value):
"""
check which column name used in the BioSamples record, if both provided column names not found, return ''
:param data: one BioSamples record in JSON format retrieved from API
:param original_key: field name to be checked
:param alternative_value: alternative field name to be checked
:return: either original key or alternative value if found in the data, if not found return ''
"""
# biosamples record always has characteristics section
if original_key not in data['characteristics']:
if alternative_value in data['characteristics']:
return alternative_value
else:
return ''
return original_key
|
07b9f01e5d1e0fe58a654c4ea55287a744cd291f
| 33,290 |
def generate_tikz_foot(tikzpic=True):
"""
generates end of tex-file
:param tikzpic: if True include end{tikzpicture} before end{document}
:return: tex-foot
"""
if tikzpic:
tikzCode = '''
\\end{tikzpicture}
\\end{document}'''
else:
tikzCode = '''
\\end{document}
'''
return tikzCode
|
efac23b349cce2f9031cfa2eb5edbb48d47047fd
| 33,292 |
def remove_quotes(val):
"""Helper that removes surrounding quotes from strings."""
if val[0] in ('"', "'") and val[0] == val[-1]:
val = val[1:-1]
return val
|
11a8c26e5b261e75a08ae9b11d9c4e39f07da4c3
| 33,293 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.