content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def mk_id(identifier):
"""Make an id from a string such as 'C0326'."""
return identifier
|
de196c51e566a6c7eb2277952d513a50075a5997
| 569,078 |
def getEndJoints(jnt):
"""
Recurse through children and return all joints that have no joint children.
Args:
jnt (PyNode): A joint node
"""
result = []
children = jnt.listRelatives(children=True, typ='joint')
if not children:
result.append(jnt)
else:
for child in children:
result.extend(getEndJoints(child))
return result
|
7d24cdef7b9d8b4e31eda2a83ec52c022a91774e
| 101,717 |
def map_diameter(c: int) -> float:
""" Compute the diameter """
return 1 / 3 * (c + 1) * (c - 1)
|
fb3f19478901f40b52af11c6d200f125d3716112
| 687,700 |
def sentence_error(source, target):
"""
Evaluate whether the target is identical to the source.
Args:
source (str): Source string.
target (str): Target string.
Returns:
int: 0 if the target is equal to the source, 1 otherweise.
"""
return 0 if target == source else 1
|
02cfc350763b176d0296007ef44321674ef65d27
| 53,351 |
def middle(seq):
"""Return middle item of a sequence, or the first of two middle items."""
return seq[(len(seq) - 1) // 2]
|
736b6aed9a8e03f0581f2d7892c5f370f0886285
| 19,268 |
import re
def exclude_filter(excl_filter, paths):
"""
Matches a set of paths against an exclude filter, removing those that don't match
param: excl_filter: The filter to match.
param: paths: The set of paths to match against the filter.
returns: A set of paths which do not match the filter.
"""
misses = set()
for p in paths:
if re.search(excl_filter, p) is None:
misses.add(p)
return misses
|
4332ab8c75e71592ace91a614f73ce260a3895a0
| 40,721 |
def convert_range_to_list(node_range):
"""
Convert a number range to a list.
Example input: Input can be like one of the format: "1-3", "1-2,6", "2, 8"
Example output: [1, 2, 3]
"""
return sum(
(
(list(range(*[int(j) + k for k, j in enumerate(i.split("-"))])) if "-" in i else [int(i)])
for i in node_range.split(",")
),
[],
)
|
11942990fdc709d3f53c2b045feb799c9f56d5a4
| 201,396 |
def read_info_file(info_file, include_labels=False):
"""
It reads an info txt file and returns the list of image paths and, eventually, the list of label paths.
The info file has an entry in each line in the form: IMAGE_PATH LABEL_PATH.
:param info_file: the path to the info txt file .
:param include_labels: if True the list of labels is returned together with the list of images. If False just the
list of images is returned.
:return: the list of image paths and the list of label paths (if include_labels is True).
"""
# open the info file and convert to a list of lines.
with open(info_file) as f:
lines = [line.rstrip('\n') for line in f]
images = []
labels = []
# Add each file path to the respective list
for l in lines:
images.append(l.split(" ")[0])
labels.append(l.split(" ")[1])
if include_labels:
return images, labels
else:
return images
|
32e24530cfc3ed4a794e773e5d89054ab0accf66
| 506,897 |
import unicodedata
def strip_accents(eval_ctx, value):
"""Strip non ASCII characters and convert them to ASCII."""
return unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode("utf-8")
|
c3287d533d8e66adfe892f9d7f07311331257b05
| 470,435 |
import re
def load_tolerances(fname):
""" Load a dictionary with custom RMS limits.
Dict keys are file (base)names, values are RMS limits to compare.
"""
regexp = r'(?P<name>\w+\.png)\s+(?P<tol>[0-9\.]+)'
dct = {}
with open(fname, 'r') as f:
for line in f:
match = re.match(regexp, line)
if match is None:
continue
dct[match.group('name')] = float(match.group('tol'))
return dct
|
60af52ec49cadfdb5d0f23b6fa5618e7cc64b4c2
| 27,897 |
def tensor_image_converter(tensor):
"""
Converts PyTorch Tensor to Numpy Array (Image)
"""
tensor = tensor.squeeze()
if len(tensor.shape) > 2:
tensor = tensor.permute(1, 2, 0)
img = tensor.detach().cpu().numpy()
return img
|
cae23789e08c2a6266c1307c3ae2c490da08b0e6
| 399,913 |
def calculate_number_of_conditions(conditions_length, max_conditions):
"""
Every condition can hold up to max_conditions, which (as of writing this) is 10.
Every time a condition is created, (max_conditions) are used and 1 new one is added to the conditions list.
This means that there is a net decrease of up to (max_conditions-1) with each iteration.
This formula calculates the number of conditions needed.
x items in groups of y, where every group adds another number to x
Math: either math.ceil((x-1)/(y-1))
or math.floor((x+(y-1)-2)/(y-1)) == 1 + (x-2)//(y-1)
:param int conditions_length: total # of conditions to handle
:param int max_conditions: maximum number of conditions that can be put in an Fn::Or statement
:return: the number (int) of necessary additional conditions.
"""
num_conditions = 1 + (conditions_length - 2) // (max_conditions - 1)
return num_conditions
|
547bbdf43256d1a0b8514bb0d3fd8cb93f29c1dd
| 112,805 |
def get_integer_selection(description, min_value, max_value):
"""
Asks the user to select an integer between min_value and max_value
"""
print(description)
# Initially there is no valid selection
valid_selection = False
selection = min_value - 1 # invalid selection
# Continue to prompt user while there is no valid selection
while not valid_selection:
try:
selection = int(input(f"Please enter a number between {min_value} and {max_value}\n"))
except ValueError:
# Inform the user on the fact, that the entered value was not an Integer
print("Invalid selection! Please enter an Integer")
continue
# Check whether the given value is in the range
if min_value <= selection <= max_value:
valid_selection = True
else:
print("Invalid selection! Number not between the specified bounds")
# Return the Selection
return selection
|
f0f5bbea39f72106c0d848e14dc322f5ac97957d
| 557,444 |
def expand_box(box, img_shape, scale=None, padding=None):
"""Expand roi box
Parameters
----------
box : list
[x, y, w, h] order.
img_shape : list
[width, height]
scale : float, optional
Expand roi by scale, by default None
padding : int, optional
Expand roi by padding, by default None
Returns
-------
expanded roi: list
[x, y, w, h] order.
"""
x, y, w, h = box
wmax, hmax = img_shape
if scale is not None:
xo = max([x - (scale - 1) * w / 2, 0])
yo = max([y - (scale - 1) * h / 2, 0])
wo = w * scale
ho = h * scale
elif padding is not None:
xo = max(x - padding, 0)
yo = max(y - padding, 0)
wo = w + padding * 2
ho = h + padding * 2
else:
xo, yo, wo, ho = x, y, w, h
if xo + wo >= wmax:
wo = wmax - xo - 1
if yo + ho >= hmax:
ho = hmax - yo - 1
return [int(xo), int(yo), int(wo), int(ho)]
|
3fd9c97b8baa70a89b898d3e9d14e8c930d0045e
| 17,029 |
def factorial2(num):
"""
return factorial without recursion
:param num: the number
:return: factorial
"""
product = 1
for n in range(2, num + 1):
product *= n
return product
|
c4dd33751fc14c0238fefa395e804837db0588e6
| 508,454 |
def task_accuracy_metrics(reward_list):
""" Accuracy as percentage of examples that received rewards """
accuracy = sum(reward_list)*100/float(len(reward_list))
#print("Total Reward: %s, Accuracy: %s %%"%(sum(reward_list),accuracy))
return accuracy
|
43b3cca9f9fa4c723117df1f2bca332790232e61
| 516,195 |
import socket
import fcntl
import struct
def get_ip_address(network_iface):
"""
Returns the ip address of a specific network interface. Returns None if
it could not be determined.
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(), 0x8915, struct.pack('256s', network_iface[:15].encode('utf-8'))
)[20:24])
except:
return None
|
10f4e88e158fc8e260fbf4adf21182dc0e419480
| 165,168 |
import re
def match_scene_name(scene_name):
"""
Args:
scene_name (str): FloorPlanXX-<random_seed(int) | default>
Returns a tuple of scene name and seed (or 'default') if we
can match the format."""
m = re.match("^FloorPlan[0-9]+-([0-9]+|default)$", scene_name)
if m is not None:
return m.group().split("-")
return None
|
d8d2de675e102984007e735992fb9c0696d4845c
| 20,023 |
def compile_vprint_function(verbose):
"""Compile a verbose print function
Args:
verbose (bool): is verbose or not
Returns:
[msg, *args]->None: a vprint function
"""
if verbose:
return lambda msg, *args: print(msg.format(*args))
return lambda *_: None
|
d2fe3b93b09011f63df54eb162e270303c328cb9
| 695,513 |
def find_elements(node, xpath, allow_zero=True, allow_multiple=True):
"""Attempt to find child elements in a node by xpath. Raise exceptions if conditions are violated. Return a
(possibly empty) list of elements."""
all_elements = node.findall(xpath)
if (len(all_elements) == 0 and not allow_zero) or (len(all_elements) > 1 and not allow_multiple):
raise AssertionError(f'Found {len(all_elements)} instances of {xpath} in {node}, which is not allowed')
return all_elements
|
2255fcd63f35837c647dd6dca81ab648d59addc8
| 703,996 |
def create_parameter(model, parameter_id, value, constant=True, units="dimensionless"):
"""
Creates new parameter for libsbml.Model. It is not necessary to assign the
function to a value. The parameter is automatically added to the input model
outside of the function.
Parameters
----------
model : libsbml.Model
Model for which species will be created.
parameter_id : str
Id for the new parameter.
constant : {True, False}
True if the new parameter can only be constant. And False if not.
False does not mean that the parameter has to change but rather that
it is allowed to.
value : float
Value of the parameter.
units : str
Units of the parameter.
Returns
-------
parameter : libsbml.Parameter
Parameter defined for the model.
"""
parameter = model.createParameter()
parameter.setId(parameter_id)
parameter.setName(parameter_id)
parameter.setConstant(constant)
parameter.setValue(value)
parameter.setUnits(units)
return parameter
|
fa7206e53941aa8bfe81f5f1ee1cd764c3c12668
| 675,418 |
def file_line(mark):
"""Format a mark as <file>:<line> information."""
result = mark.name
if mark.line:
result += ':' + str(mark.line)
return result
|
d4f466c5f627df42de19df39b49bc63e82f7769f
| 253,410 |
def slqs_sub(x_entr, y_entr):
"""
Computes SLQS Sub score from two entropy values.
:param x_entr: entropy value of x
:param y_entr: entropy value of y
:return: SLQS Sub score
"""
score = y_entr - x_entr
return score
|
2afd4e805c0fbd295bfb63b588634244cf27deef
| 462,387 |
def get_wind_direction(degree):
"""Convert wind degree to direction."""
DEGREES = [-11.25, 11.25, 33.75, 56.25,
78.75, 101.25, 123.75, 146.25,
168.75, 191.25, 213.75, 236.25,
258.75, 281.25, 303.75, 326.25, 348.75]
DIRECTIONS = ['N', 'NNE', 'NE', 'ENE',
'E', 'ESE', 'SE', 'SSE',
'S', 'SSW', 'SW', 'WSW',
'W', 'WNW', 'NW', 'NNW']
# North wind correction.
if degree > 348.75:
degree -= 360
for i in range(len(DIRECTIONS)):
left_border = DEGREES[i]
right_border = DEGREES[i + 1]
if left_border < degree <= right_border:
return DIRECTIONS[i]
|
110a08df997a7895becd7db49a38ca0f3b6655e7
| 557,528 |
def first(seq):
"""Return first item from a sequence."""
if isinstance(seq, (list, tuple)):
return seq[0]
return next(seq)
|
50e228586d17709c5ae35cff321fa7fb6b8efb03
| 298,806 |
import torch
def complex_mult_torch(X, Y):
""" Computes the complex multiplication in Pytorch when the tensor last dimension is 2: 0 is the real component and 1 the imaginary one"""
assert X.shape[-1] == 2 and Y.shape[-1] == 2, 'Last dimension must be 2'
return torch.stack(
(X[..., 0] * Y[..., 0] - X[..., 1] * Y[..., 1],
X[..., 0] * Y[..., 1] + X[..., 1] * Y[..., 0]),
dim=-1)
|
2ede27dcc061d25b15f818544ae5b6e55196dad7
| 562,661 |
from typing import Optional
import traceback
def format_exc(exc: Optional[int] = None) -> str:
"""Format a traceback."""
return traceback.format_exc(exc)
|
3d3df2adaca14828f393a4cb0e6a5bfcbae158e8
| 627,310 |
import json
def fetch_records(board_file):
"""Import records from servo_ina file.
board files are json files, and have a list of tuples with
the INA data.
(name, rs, swetberry_num, net_name, channel)
Args:
board_file: board file
Returns:
list of tuples as described above.
"""
data = None
with open(board_file) as f:
data = json.load(f)
return data
|
9c3fd8a6b3016faca72de5df96fafe5f76fb9810
| 435,534 |
def gram_size(term):
"""
Convenience func for getting n-gram length.
"""
return len(term.split(' '))
|
8684294afafcb68237e268e05229235bfee00e4c
| 192,079 |
def find_swift_version_copt_value(copts):
"""Returns the value of the `-swift-version` argument, if found.
Args:
copts: The list of copts to be scanned.
Returns:
The value of the `-swift-version` argument, or None if it was not found in
the copt list.
"""
# Note that the argument can occur multiple times, and the last one wins.
last_swift_version = None
count = len(copts)
for i in range(count):
copt = copts[i]
if copt == "-swift-version" and i + 1 < count:
last_swift_version = copts[i + 1]
return last_swift_version
|
53d2ae0aa4f0e20373df441f1dc6c326525141f2
| 247,008 |
import uuid
def url_to_uuid5(url):
"""Generate a UUID5 for a given URL."""
return str(uuid.uuid5(uuid.NAMESPACE_URL, url.encode('utf-8')))
|
688aaac5d7ad564eabb0de2adc23cf94c820bdc9
| 464,919 |
from typing import List
import torch
def average_checkpoints(
checkpoint_paths: List[str],
):
"""
Average a list of checkpoints' state_dicts.
Parameters
----------
checkpoint_paths
A list of model checkpoint paths.
Returns
-------
The averaged state_dict.
"""
avg_state_dict = {}
for per_path in checkpoint_paths:
state_dict = torch.load(per_path, map_location=torch.device("cpu"))["state_dict"]
for key in state_dict:
if key in avg_state_dict:
avg_state_dict[key] += state_dict[key]
else:
avg_state_dict[key] = state_dict[key]
del state_dict
num = torch.tensor(len(checkpoint_paths))
for key in avg_state_dict:
avg_state_dict[key] = avg_state_dict[key] / num.to(avg_state_dict[key])
return avg_state_dict
|
06dee6e4cc1a26e09fddb781855427a94a3d5cbf
| 395,261 |
def bracket(v, low=0, high=255):
"""Simply brackets a value between 0 and 255."""
return max(low, min(high, v))
|
e921d8a9fc792bb4dc6eebad32993b220bdb4d51
| 364,105 |
def latexify(ticklabels):
"""Manually set LaTeX format for tick labels."""
return [r"$" + str(label) + "$" for label in ticklabels]
|
83b310381b4363ac15e3802112aea2d5b6201f85
| 395,866 |
from typing import OrderedDict
def load_vocab(fin):
"""
Load vocabulary from vocab file created by word2vec with
``-save-vocab <file>`` option.
Args:
fin (File): File-like object to read from.
encoding (bytes): Encoding of the input file as defined in ``codecs``
module of Python standard library.
errors (bytes): Set the error handling scheme. The default error
handler is 'strict' meaning that encoding errors raise ValueError.
Refer to ``codecs`` module for more information.
Returns:
OrderedDict: Mapping from a word (``bytes``) to the number of
appearance in the original text (``int``). Order are preserved from
the original vocab file.
"""
vocab = OrderedDict()
for line in fin:
v, c = line.strip().split()
vocab[v] = int(c)
return vocab
|
864f1de89e9dfe0a6b0ec0215e1bafeb5ed3d634
| 206,109 |
def block_search(dict_or_list, block_name, default=None):
"""Find block_name in dict_or_list where block_name in the form 'keya.keyb.keyc'"""
current = dict_or_list
if block_name is None or block_name == "" or block_name == ".":
return current
for part in block_name.split("."):
if isinstance(current, list):
try:
idx = int(part)
except ValueError:
return default
if 0 <= idx < len(current):
current = current[idx]
else:
return default
elif isinstance(current, dict):
current = current.get(part)
if current is None:
return default
return current
|
bf0f5188ac092ad9b0850d4c67ad8103322910b6
| 463,927 |
import re
def clean_kql_query(query_string: str) -> str:
"""
Return kql query stripped of comments and newline characters.
Parameters
----------
query_string : str
Input query
Returns
-------
str
Cleaned query.
"""
remove_comments = re.sub(r"(//[^\"\'\n]+)", " ", query_string, re.MULTILINE).strip()
# get rid of newlines and returns
return re.sub(r"(\s*\n\s*)", " ", remove_comments)
|
3be00366335e8f82d60935273ae128c76e4bded2
| 260,515 |
def compute_score_of(winner: list) -> int:
"""Compute score of winning deck."""
return sum((idx + 1) * card for idx, card in enumerate(winner[::-1]))
|
36c238bf5a3aeed431adfb586b4cf089f668148a
| 331,859 |
def fmt_enum_repr(fmt: str, enum_type, enum_val):
"""Append repr of a given enum type to a format string.
Arguments:
fmt - Format string
enum_type - Enum Type to construct.
enum_val - Enum value.
Returns:
formatted string
"""
return fmt.format(repr(enum_type(enum_val)))
|
eba614c155e7609d41ac706a27594c70fb840115
| 76,910 |
def title_contains(title):
""" An expectation for checking that the title contains a case-sensitive
substring. title is the fragment of title expected
returns True when the title matches, False otherwise
"""
def _predicate(driver):
return title in driver.title
return _predicate
|
e1372da4bd9ce34ccc02fbd15aa6b76695cb5cd2
| 356,827 |
import math
def _brevity_penalty(candidate, references):
"""Calculate brevity penalty.
As the modified n-gram precision still has the problem from the short
length sentence, brevity penalty is used to modify the overall BLEU
score according to length.
An example from the paper. There are three references with length 12, 15
and 17. And a terse candidate of the length 12. The brevity penalty is 1.
>>> references = [['a'] * 12, ['a'] * 15, ['a'] * 17]
>>> candidate = ['a'] * 12
>>> _brevity_penalty(candidate, references)
1.0
In case a candidate translation is shorter than the references, penalty is
applied.
>>> references = [['a'] * 28, ['a'] * 28]
>>> candidate = ['a'] * 12
>>> _brevity_penalty(candidate, references)
0.2635...
The length of the closest reference is used to compute the penalty. If the
length of a candidate is 12, and the reference lengths are 13 and 2, the
penalty is applied because the candidate length (12) is less then the
closest reference length (13).
>>> references = [['a'] * 13, ['a'] * 2]
>>> candidate = ['a'] * 12
>>> _brevity_penalty(candidate, references)
0.92...
The brevity penalty doesn't depend on reference order. More importantly,
when two reference sentences are at the same distance, the shortest
reference sentence length is used.
>>> references = [['a'] * 13, ['a'] * 11]
>>> candidate = ['a'] * 12
>>> _brevity_penalty(candidate, references) == _brevity_penalty(candidate, reversed(references)) == 1
True
A test example from mteval-v13a.pl (starting from the line 705):
>>> references = [['a'] * 11, ['a'] * 8]
>>> candidate = ['a'] * 7
>>> _brevity_penalty(candidate, references)
0.86...
>>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]
>>> candidate = ['a'] * 7
>>> _brevity_penalty(candidate, references)
1.0
"""
c = len(candidate)
ref_lens = (len(reference) for reference in references)
r = min(ref_lens, key=lambda ref_len: (abs(ref_len - c), ref_len))
if c > r:
return 1
else:
return math.exp(1 - r / c)
|
b56c53730b2a90581a89f8cffa51f34c5d643984
| 680,563 |
import torch
def one_hot(y, K, smooth_eps = 0): # pylint: disable=invalid-name
"""One-hot encodes a tensor with optional label smoothing.
Args:
y: A tensor containing the ground-truth labels of shape (N,), i.e. one label
for each element in the batch.
K: The number of classes.
smooth_eps: Label smoothing factor in [0, 1] range.
Returns:
A one-hot encoded tensor.
"""
assert 0 <= smooth_eps <= 1
assert y.ndim == 1, "Label tensor must be rank 1."
y_hot = torch.eye(K)[y] * (1 - smooth_eps) + (smooth_eps / (K - 1))
return y_hot.to(y.device)
|
ee47f9c778d875834c49c098ded4936edb104887
| 13,113 |
def area_from_district(district_name):
"""Strip a trailing " District" from a string."""
return district_name.strip().split(" District")[0]
|
4cb6700f9592a914bb75da88e538f3bd22b9528c
| 271,872 |
import typing
def find(predicate: typing.Callable,
iterable: typing.Iterable):
"""
Finds the first element in an iterable matching a predicate.
:param predicate: predicate that returns true for any match.
:param iterable: iterable to iterate over.
:return: the first match; None if there is not one available.
"""
for element in iterable:
if predicate(element):
return element
return None
|
14fc2b6b9ca2af1265f12b918c43bf436282d6b2
| 266,647 |
def _make_result_path(result):
"""Create a path for the result, using the fspath and test id"""
result_path = ""
if result.get("metadata") and result["metadata"].get("fspath"):
fspath = result["metadata"]["fspath"]
if fspath.startswith("../"):
fspath = fspath[3:]
if fspath.startswith("./"):
fspath = fspath[2:]
result_path = "{}::".format(fspath)
result_path += result["test_id"]
return result_path
|
825f506b4923245cf65197479e4c2f6cc8546f71
| 131,966 |
def binary_string_to_num(bvalue):
""" convert from binary string (as list of chars)to int """
return int("".join(bvalue), 2)
|
d560ab52896ebdf550403a15be39a5139703e48f
| 523,084 |
import math
def truncate_middle(content, max_length, middle="..."):
"""
Truncates the middle part of the string if the total length if too long.
For example:
truncate_middle('testabcdecho', 8) == 'tes...ho'
:param content: The string that must be truncated.
:type: str
:param max_length: The maximum amount of characters the string can have.
:type max_length: int
:param middle: The part that must be added in the middle if the provided
content is too long.
:type middle str
:return: The truncated string.
:rtype: str
"""
if len(content) <= max_length:
return content
if max_length <= len(middle):
raise ValueError(
"The max_length cannot be lower than the length if the " "middle string."
)
total = max_length - len(middle)
start = math.ceil(total / 2)
end = math.floor(total / 2)
left = content[:start]
right = content[-end:] if end else ""
return f"{left}{middle}{right}"
|
16f9ec8e6210ab2820a78328e2f21ec6d636b787
| 664,347 |
def iqr(series):
"""
Extract inter-quantile range
(25%-75%)
Input
---
series : pandas series
Returns
---
inter-quantile range
(25-75%)
"""
Q1 = series.quantile(0.25)
Q3 = series.quantile(0.75)
IQR = Q3 - Q1
return IQR
|
1a5f8348dcece3a9b297a34f95f092469c2fda4f
| 426,462 |
def triple_split(triple):
"""Split target triple into parts.
"""
arch, vendor, os = triple.split('-', 2)
if '-' in os:
os, env = os.split('-', 1)
else:
env = ''
return arch, vendor, os, env
|
bdfbe48fdd32fc7a5a52cf3750d4d83e37549d32
| 98,393 |
def gray_code(N):
""" Generate a Gray code for traversing the N qubit states. """
if N <= 0 or type(N) is not int:
raise ValueError("Input for gray code construction must be a positive integer.")
if N == 1: # Base case
return ["0", "1"]
else:
sub_code = gray_code(N-1)
return ["0" + x for x in sub_code] + ["1" + x for x in sub_code[::-1]]
|
148a10bb355592b7b5913e5791c90b35e2470898
| 538,930 |
def scd_session_cm(pytestconfig) -> bool:
"""True iff SCD auth1 user is authorized for constraint management"""
return pytestconfig.getoption('scd_auth1_cm')
|
ca5926db377d6b9512f4e34ae296dc2d18a00903
| 529,044 |
def image_index(pathname):
"""
Helper function for submission. Takes the path to an image, e.g.
"./image_folder/15.jpg", and returns the image name, in this example
"15.jpg".
"""
return pathname[pathname.rfind("/")+1:]
|
ce2c047ee8367cf6457ca9316eb8487314101c28
| 576,714 |
import hashlib
def get_path_from_args(args):
""" Returns a unique hash for an argparse object. """
args_str = str(args)
path = hashlib.md5(args_str.encode()).hexdigest()
return path
|
e09669d5e09375083af25c03ca250776352295b5
| 456,842 |
def sum_digits(s):
"""
assumes s a string
Returns an int that is the sum of all of the digits in s.
If there are no digits in s it raises a ValueError exception.
"""
sum_of_digits = 0
count = 0
for item in s:
if item in list('0123456789'):
sum_of_digits += int(item)
count += 1
if not count:
raise ValueError
return sum_of_digits
|
40fb5868eccf3d6840177273ce2084da5238d3cd
| 493,503 |
def performanceCalculator(count, avg, std, maxv, countref, avgref, stdref, maxvref):
"""
===========================================================================
Performance calculator function
===========================================================================
Calculate performance based on reference values.
If some value is None return None
**Args**:
* count : actual number of samples -- (int)
* avg : actual duration average -- (float)
* std : actual duration standar desviation -- (float)
* maxv : actual duration max value -- (float)
* countref : reference number of samples -- (int)
* avgref : reference duration average -- (float)
* stdref : reference duration standar desviation -- (float)
* maxvref : reference duration max value -- (float)
**Returns**:
performance value indicator. [0-1] -- (float)
"""
if avgref == None or stdref == None or maxvref == None:
return None
# f = 0
# if maxvref == 0: maxvref = 0.1
# if maxvref < 0.2:
# f = avg / (maxvref * 10)
# if maxvref < 1:
# f = avg / (maxvref * 5)
# if maxvref < 10:
# f = avg / (maxvref * 2)
#
# f = 0
# if maxvref == 0: maxvref = 0.1
# if maxvref < 0.2:
# f = avg / (maxvref * 5)
# elif maxvref < 1:
# f = avg / (maxvref * 3)
# elif maxvref < 10:
# f = avg / (maxvref * 1.5)
# else:
# f = avg / maxvref
# f = 1-f
if stdref < 0.01: stdref = 0.01
f = (1-((avg - avgref) / (stdref*2)))*0.9
if f > 1: f=1
if f < 0: f=0
return f
|
92f98baa720f19a1e6f5dbcdb610b38036c49c6c
| 681,655 |
def parse_csv_data(csv_filename) -> list:
"""Reads the csv file provided and formats it into a list of strings
Arguements:
csv_filename {csv} - a file containing covid data including the following parameters:
areaCode,areaName,areaType,date,cumDailyNsoDeathsByDeathDate,
hospitalCases,newCasesBySpecimenDate
Returns:
covid_csv_data {list} - a list of strings containing the above data
"""
covid_csv_data = [] # declare covid_csv_data as empty list
# iterates through every line in the csv file, and appends to covid_csv_data
with open(csv_filename, encoding="utf-8") as csv_file:
for row in csv_file:
line = row.split()
covid_csv_data.append(line)
return covid_csv_data
|
8e457c9756370959db7a087fa73bac8398bb01c7
| 348,636 |
import re
def dict_from_sdkconfig(path):
"""
Parse the sdkconfig file at 'path', return name:value pairs as a dict
"""
regex = re.compile(r"^([^#=]+)=(.+)$")
result = {}
with open(path) as f:
for line in f:
m = regex.match(line)
if m:
result[m.group(1)] = m.group(2)
return result
|
ac52b59f2179033ee265d3ea0b8dbdaabfc7740e
| 619,199 |
def compute_poi_email_ratio(poi_messages, all_messages):
""" FEATURE
given a number messages to/from POI (numerator)
and number of all messages to/from a person (denominator),
return the ratio of messages to/from that person
that are from/to a POI
"""
ratio = 0.
if type(poi_messages) is int and type(all_messages) is int and poi_messages > 0 and all_messages > 0:
ratio = float(poi_messages) / float(all_messages)
return ratio
|
982408618f8b56bffabf0030e31fe2e9dd37c6b0
| 515,804 |
import inspect
def get_functions(mod):
"""
Get a list of functions for the given module.
:param mod: The module to inspect for functions.
:type mod: types.ModuleType
:return: The list of functions.
:rtype: list[types.FunctionType]
"""
return [o[0] for o in inspect.getmembers(mod, inspect.isfunction)]
|
78a7807760086b65aefdc45be7c7c39d95e5ba9d
| 101,316 |
def get_kwargs(accident_data, field):
"""
Build kwargs from accident data for a specific field.
Default is one pair: value = field_value_as_string
"""
if field == '\xef\xbb\xbfAccident_Index':
return {'acc_index': accident_data[field]}
if field == 'Date':
return {'date': accident_data['Date'], 'time': accident_data['Time']}
if field == 'Junction_Control':
return {'junction_control': accident_data[field], 'special_conditions': accident_data['Special_Conditions_at_Site']}
return {'value': accident_data[field]}
|
f509746c61fb5a28f297dfed410609d94f79e128
| 285,580 |
def extract_ip_from_oid(oid):
"""Given a dotted OID string, this extracts an IPv4 address from the end of it (i.e. the last four decimals)"""
return ".".join(oid.split(".")[-4:])
|
e34325d518891552ca8f113c742ed4bd2d3a9f5b
| 368,573 |
import re
def inject_parameters(file_contents: str, parameters: dict):
"""
Replace placeholders in a file with the provided dictionary
:param file_contents: String containing expected file contents
:param parameters: Dictionary of parameters {placeholder: value}
:return: Parsed/injected file
"""
if not parameters:
return file_contents
else:
for key, val in parameters.items():
file_contents = re.sub(rf'\[{key}]', val, file_contents, flags=re.IGNORECASE)
remaining_placeholders = re.findall("|".join([rf'\[{key}]' for key in parameters.keys()]), file_contents)
if remaining_placeholders:
raise ValueError(f"Unable to replace some placeholder values: {', '.join(remaining_placeholders)}")
return file_contents
|
80c9023e57cdf4962cdd940a8de6c732fba637f9
| 548,157 |
import uuid
import requests
def get_dns_records(params):
"""Return dictionary of all DNS records in Dreamhost account."""
url = "https://api.dreamhost.com/" + \
"?key=" + str(params["api_key"]) + \
"&cmd=dns-list_records" + \
"&unique_id=" + str(uuid.uuid4()) + \
"&format=json"
r = requests.get(url)
r.raise_for_status()
records = r.json()
if "data" in records:
return records["data"]
else:
raise ValueError("Error getting records from Dreamhost.")
|
5904177877b5daecffe1c6cfe8f76bce367e59e8
| 362,135 |
def get_clipped_freq(records_l, records_r, records_s, minclip=4):
"""
Get the number of reads that are clipped over the breakpoints in a meaningful way. For the left and right
breakpoints, look for clipping in the SV. For the single breakpoint, look for clipping on either side of the read.
:param records_l: Records over the left breakpoint.
:param records_r: Records over the right breakpoint.
:param records_s: Records over the single breakpoint.
:param minclip: Minimum number of bases clipped to count the read as clipped.
:return: A tuple of two elements: Proportion of reads clipped over the left and right breakpoints, and proportion
of reads clipped over the single breakpoint (in that order).
"""
if len(records_l) > 0 or len(records_r) > 0:
clipped_lr = (
len([True for key in records_l if records_l[key].clip_r >= minclip]) +
len([True for key in records_r if records_r[key].clip_l >= minclip])
) / (len(records_l) + len(records_r))
else:
clipped_lr = 0
if len(records_s) > 0:
clipped_s = (
len([True for key in records_s if records_s[key].clip_l >= minclip or records_s[key].clip_r >= minclip]) /
len(records_s)
)
else:
clipped_s = 0
return clipped_lr, clipped_s
|
e3fe7374db78d4db2303aec2807e9e7d218cbd77
| 617,019 |
def getSyndrome(H, v):
"""
Calculates the syndrome string given a matrix and codeword
:param H:
:param v:
:return:
"""
s = (H*v) % 2
return s.reshape(1, 11)
|
f3f44ce537f3d740a48b9565cfa3e4fc48067236
| 97,336 |
def remove_empty_str(org_list):
"""Remove empty strings from list."""
return [i for i in org_list if i != ""]
|
95fa23a32516bc6bdaa0846d54ca500aeb41a17c
| 234,338 |
def escape_eid(eid):
"""Replace slashes with underscores, to avoid recognizing them
as directories.
"""
return eid.replace('/', '_')
|
505ad3fd245ee9744d9b4ad05547c57373e898fd
| 201,669 |
def latest(scores):
"""Return the last score of the player"""
return scores[-1]
|
c9a1d3e55a64e002fc637449f0a0017378b02759
| 396,178 |
def get_path_from_finish(current):
"""Traces back the path through parent-nodes"""
backwards = []
while current:
backwards.append(current.location)
current = current.parent
backwards.reverse()
return backwards
|
212748572ec0cd7e8ab3df09162d95d3169c95b4
| 390,752 |
from typing import Dict
def calc_post_depths_from_thread_structure(thread_structure: Dict) \
-> Dict[str, int]:
"""Calculates the nested depth of each post in a thread.
We determine post depth from the provided `structure.json` files in the
dataset because this is easier than following the chain of a post's
parents to the source post of a thread.
Args:
thread_structure: The parsed JSON dict from one of the dataset's
`structure.json` files.
Returns:
A dictionary mapping post IDs to their nested depth. The source
post of a thread always has depth `0`, first level replies `1`, etc.
Example:
If the `thread_structure` would look like the following::
{
'foo': {
'bar': [],
'baz': {
'boogy': []
},
'qux': []
}
}
The parsed post depths would be::
{
'foo': 0,
'bar': 1,
'baz': 1,
'boogy': 2,
'qux': 1
}
"""
post_depths = {}
def walk(thread: Dict, depth: int) -> None:
for post_id, subthread in thread.items():
post_depths[post_id] = depth
if isinstance(subthread, Dict):
walk(subthread, depth + 1)
walk(thread_structure, 0)
return post_depths
|
11687977dfa107b9b94d8c097e750825e98a479e
| 83,558 |
def is_request_to_send(data):
""" Check if a packet is a request-to-send packet. """
return len(data) == 45 and ord(data[25]) == 0xb4 and ord(data[26]) == 0x00
|
93d07ecdb1919a39385acce9cd8cf09869268ec1
| 262,099 |
def get_segment_token_offsets(segment_token_list, token_map):
"""
given a list of token node IDs, returns the index of its first and last
elements. this actually calculates the int indices, as there are weird
formats like RS3, which use unordered / wrongly ordered IDs.
Parameters
----------
segment_token_list : list of str
sorted list of token IDs (i.e. the tokens
that this segment spans)
token_map : dict of (str, int)
a map from token IDs to token indices
Returns
-------
first_token_index : int
index of the first token of the segment
last_token_index : int
index of the last token of the segment
"""
token_indices = [token_map[token_id] for token_id in segment_token_list]
# we need to foolproof this for nasty RS3 files or other input formats
# with unordered or wrongly orderd IDs
return min(token_indices), max(token_indices)
|
5f0aabb5f3462b53856ec62ffd9284a7d51fa981
| 265,632 |
def _safe_wrap_in_html_tag(content, tag_name):
"""
Adds an html tag around content but handles new line characters
to prevent incorrectly nested tags later
:param content: The content to wrap in the tag
:param tag_name: The name of the tag to use e.g. 'em', 'strong'
:return: the HTML tagged string
"""
raw_lines = content.split('\n')
tagged_lines = []
for line in raw_lines:
if line.strip():
tagged_line = '<{tag_name}>{line}</{tag_name}>'.format(tag_name=tag_name, line=line)
tagged_lines.append(tagged_line)
else:
tagged_lines.append(line)
return '\n'.join(tagged_lines)
|
878533f011012353a2bccfdf2536d7b48bb829f9
| 149,149 |
def dump_byte_array(aaa):
""" Stringify a byte array as hex. """
out = ''
for bbb in aaa:
pair = "%02x" % bbb
out += pair
return out
|
dd77810c637fee08ef781442519e22b50c51d6f2
| 525,011 |
def intcode_one(parameter_one, parameter_two, parameter_three, code_list):
"""Adds element in the first parameter's index with element in second parameter's index. Places sum in third parameter's index. Returns True. """
code_list[parameter_three] = code_list[parameter_one] + code_list[parameter_two]
return True
|
195909f53e4a454fe0a93df89fb9493a3f38f0d1
| 542,551 |
def timestamp_to_bytestring(val, padding=8):
"""Convert Unix timestamp to bytes"""
result = []
while val != 0:
result.append(chr(val & 0xFF))
val = val >> 8
return ''.join(reversed(result)).rjust(padding, '\0')
|
a238d2dfa261d0c9e83ded4d902210b11ffc7757
| 412,724 |
import inspect
def is_valid_resource(test_item, api_name = None):
""" Filter function used to determine whether the given objects within a module represent
valid ModelResources for this API version"""
if inspect.isclass(test_item):
if test_item.api_name == api_name and "DefaultResource" not in test_item.__name__:
return True
return False
|
69b996835a289877b7a96f7e84e37ca24f172ebb
| 529,645 |
def check_polygon(nums: list[float]) -> bool:
"""
Takes list of possible side lengths and determines whether a
two-dimensional polygon with such side lengths can exist.
Returns a boolean value for the < comparison
of the largest side length with sum of the rest.
Wiki: https://en.wikipedia.org/wiki/Triangle_inequality
>>> check_polygon([6, 10, 5])
True
>>> check_polygon([3, 7, 13, 2])
False
>>> check_polygon([1, 4.3, 5.2, 12.2])
False
>>> nums = [3, 7, 13, 2]
>>> _ = check_polygon(nums) # Run function, do not show answer in output
>>> nums # Check numbers are not reordered
[3, 7, 13, 2]
>>> check_polygon([])
Traceback (most recent call last):
...
ValueError: Monogons and Digons are not polygons in the Euclidean space
>>> check_polygon([-2, 5, 6])
Traceback (most recent call last):
...
ValueError: All values must be greater than 0
"""
if len(nums) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space")
if any(i <= 0 for i in nums):
raise ValueError("All values must be greater than 0")
copy_nums = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1])
|
114ae7120e661e19f32a7424895e37493f530ee6
| 65,415 |
import math
def A12_5_3_3(Pv, A, Fy, GammaRTv = 1.05):
"""
A.12.5.3.3 Beam shear strength check
Pv = representative shear strength
A = total cross-sectional area
GammaRTv = partial resistance factor for beam
shear strength, 1,05
"""
# Pv = representative shear strength
_Pv = A * Fy / (2 * math.sqrt(3))
# Tubular members subjected to beam shear forces
# should satisfy:
_V = _Pv / GammaRTv # (A.12.5-13)
#
return _V
#
|
cec124e0c7b71f6fafbae9512a8f489297176bdf
| 279,667 |
def image_filename(im_num=0, pos_num=0, channel_num=0, z_num=0):
""" create a filename based on the image number, position, channel and z
Micro-manager format:
img_channel000_position001_time000000002_z000.tif
"""
filename = "img_channel{0:03d}_position{1:03d}_time{2:09d}_z{3:03d}.tif"
return filename.format(channel_num, pos_num, im_num, z_num)
|
5e8e6af29c0bed4a4c26e5741d1e3c322f48c83d
| 380,681 |
def anchor_ctr_inside_region_flags(anchors, stride, region):
"""Get the flag indicate whether anchor centers are inside regions."""
x1, y1, x2, y2 = region
f_anchors = anchors / stride
x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5
y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5
flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2)
return flags
|
5f1e3b764145a9ee8abb709cd43f7f99c8515eb7
| 691,059 |
def get_ancestor(taxid, tree, stop_nodes):
"""Walk up tree until reach a stop node, or root."""
t = taxid
while True:
if t in stop_nodes:
return t
elif not t or t == tree[t]:
return t # root
else:
t = tree[t]
|
f7841bc5104f96cd66122165a0646b70fc3fd33e
| 700,914 |
def add_dicts(base, new):
"""Add newly dict items to a base set. New items replace existing
ones."""
if new:
base.update(new)
return base
|
e5e076c477705986353eb930b5a738bef00842bf
| 549,752 |
def _band_power(mean_ds, f, center, bandwidth=2):
"""Sum DS values over a band of frequencies."""
low_f = center - bandwidth
high_f = center + bandwidth
f_idx = (low_f <= f) & (f <= high_f)
return mean_ds[f_idx].sum(axis=0)
|
5c11b912908f677f123b99c67018b0bcaee9658a
| 55,042 |
def tidy(word):
"""Filter out non-printable characters such as unicode characters."""
word = word.strip()
word = ''.join([i for i in word if i.isprintable()])
return word
|
306802618ee3d0fe0dcc0358b168bc281cf395a2
| 375,839 |
import json
def get_device(token):
"""
Read the device configuration from device.json file.
:return: dict - the device configuration
"""
try:
with open("/tmp/{}/device.json".format(token), "r") as f:
return json.load(f)
except:
pass
|
5675113a87ee2d4abf8ce630cd167113c25e3626
| 30,020 |
def calculateBounds(sentences):
"""Given a list of sentence strings, calculate their bounds as character offsets from 0
"""
curbound, bounds = 0, []
for s in sentences:
bounds.append((curbound, curbound + len(s)))
curbound += len(s) + 1
commabounds = [','.join((str(b) for b in bnds)) for bnds in bounds]
strbounds = '\n'.join(commabounds)
return strbounds
|
934e1e95538f3a846384897efe00f745639cb1b7
| 523,699 |
def difference(d1, d2):
"""Difference of two dicts.
Return elements of the first dict not existing in the second dict."""
ret = {}
for k in d1:
if k not in d2:
ret[k] = d1[k]
return ret
|
3358b5d836c1551a83200c5184801193610c8bdf
| 326,227 |
def amz_group_grant(uri, permission):
"""
Returns XML Grant for group with URI.
:param uri: group URI
:param permission: permission value
"""
grant = (
'<Grant>'
'<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'xsi:type="Group">'
'<URI>%s</URI>'
'</Grantee>'
'<Permission>%s</Permission>'
'</Grant>' % (uri, permission))
return grant
|
6db69daa14e2c24544e4a2639a7b9229a15135d4
| 73,156 |
def get_digits(n):
"""
Get the array of digits for n
"""
digits = []
while n > 0:
digits.append(n % 10)
n //= 10
return digits
|
30f8f6b32b13190dabe723eda9cb874ec24b674b
| 449,787 |
import math
def p75(lis, exception=0.0, cycles=False):
"""Calculates the average of the upper half of a list.
lis is the list that is averaged.
exception is returned if there is a divide by zero error. The
default is 0.0 because the main usage in in percentage calculations.
cycles is whether or not the data set is cycle times. If it is, it
takes the bottom half of the list, rather than the upper half,
because the lower half is the faster cycle times.
"""
# Removes the Nones from the list because if they are part of the
# list, it returns an error.
lis = [item for item in lis if item is not None]
if len(lis) == 0:
return exception
else:
# If the cycles specifcation is true, it takes the lower half of
# the list, which are the faster cycle times.
if cycles is True:
# 'math.ceil()' rounds the float up to be an int.
upper_half = lis[:math.ceil(len(lis) / 2)]
else:
# 'math.floor()' rounds the float down to be an int.
upper_half = lis[-math.floor(len(lis) / 2):]
return sum(upper_half) / len(upper_half)
|
514c952c133c4448d3be27e91bac9141910cbe10
| 496,265 |
def load_points(filename):
"""
Splits the contents of the sensors data file into a list of tuples (points)
:param filename: relative path to the datafile
:return: list of tuples of floats (coordinates)
"""
with open(filename, "r") as file:
# read the whole file (1 line) without the first 2 and last 2 brackets
string = file.readline()[2:-3]
# split by points and split each point into 3 numbers
return [tuple([float(x) for x in point.split(",")]) for point in string.split("},{")]
|
0b642f6981dbe36d48d35348239d3dc52e1803e1
| 354,799 |
import yaml
def read_conda_environments(env_paths: list):
"""
Read in a list of paths to yaml conda environment definitions.
Used to create package whitelists for conda-mirror
:param env_paths: A list of paths to conda environment definition yaml files
:return: a list of packages defined in the environment.
The package is a dictionary with a "name" member and optional
"version" and "build" members.
"""
packages = []
for env_path in env_paths:
with open(env_path, "rt") as f:
env = yaml.safe_load(f)
for dep in env["dependencies"]:
# dep is in format <name>=<version>=<build>
# but version and build are optional
pkg_vals = dep.strip().split('=')
pkg = {"name": pkg_vals[0]}
if len(pkg_vals) > 1:
pkg["version"] = pkg_vals[1]
if len(pkg_vals) > 2:
pkg["build"] = pkg_vals[2]
packages.append(pkg)
return packages
|
8b8ac992334d8c71446becf30c970143acd23304
| 603,175 |
def route53_to_zone_file(records):
"""Return a list of Zone File lines from a list of AWS::Route53::RecordSet"""
max_name_len = max([len(record["Name"]) for record in records], default=1)
return [
"{name:{max_name_len}}\t{ttl}\tIN\t{type:5}\t{data}".format(
name=record["Name"], max_name_len=max_name_len,
ttl=record["TTL"], type=record["Type"],
data=" ".join(record["ResourceRecords"]))
for record in records]
|
6f6078f9a1b42b806b5690432fce42ade9e5d94a
| 527,027 |
def _find_from_file(full_doc, from_file_keyword):
"""
Finds a line in <full_doc> like
<from_file_keyword> <colon> <path>
and return path
"""
path = None
for line in full_doc.splitlines():
if from_file_keyword in line:
parts = line.strip().split(':')
if len(parts) == 2 and parts[0].strip() == from_file_keyword:
path = parts[1].strip()
break
return path
|
0deffb34143494dc5850ff2a217b9c0e31b48f37
| 206,081 |
from typing import List
from typing import Dict
def grid_values(puzzle:str,boxes:List[str],replace:bool=True) -> Dict[str,str]:
"""This function maps each puzzle unit to its box unit.
Args:
puzzle (str): String of puzzle
boxes (List[str]): A list of box units
replace (bool) : An option to relace dots with number 1-9.Defaults to True
Returns:
Dict[str,str]: A dictionary of box units with its puzzle value.
"""
assert len(puzzle) == 81
return {key : ( '123456789' if value =='.' and replace else value) for key,value in zip(boxes,puzzle)}
|
649954daed0b15c294ea50d43e8199114566b4b4
| 482,754 |
from typing import List
def split_content_in_lines(file_content:str)->List[str]:
"""
Splits a block of strings into lines and keeps lines that are not starting with #
:param file_content:
:return:
"""
file_content = file_content.strip()
if len(file_content) <= 0:
return []
collected_entries = []
for line in file_content.split("\n"):
line = line.strip()
if len(line) <= 0:
continue
if line[0] == "#":
continue
collected_entries.append(line)
return collected_entries
|
ed58454e916a84288814b906ea14e223914320b8
| 238,699 |
from typing import MutableMapping
from contextlib import suppress
def delete_recursive_dictionary_keys(dct_to_change: MutableMapping, list_of_keys_to_remove: list) -> MutableMapping:
"""
Removes the specified keys from the specified dict.
Args:
dct_to_change: Dictionary to modify
list_of_keys_to_remove: List of keys to remove
Returns: dct_to_change with any specified keys removed
"""
if not isinstance(dct_to_change, MutableMapping) or not isinstance(list_of_keys_to_remove, list):
raise AttributeError("delete_recursive_dictionary_keys expects a dict and a list as args")
for key in list_of_keys_to_remove:
with suppress(KeyError):
del dct_to_change[key]
for value in list(dct_to_change.values()):
if isinstance(value, MutableMapping):
delete_recursive_dictionary_keys(value, list_of_keys_to_remove)
return dct_to_change
|
e73f19521edcbcdbb21b2ced1911aea55d5af5ca
| 660,458 |
from typing import Generator
def get_rel_dict_rows(rel_dict:dict) -> Generator:
"""Converts dict of relationships into a generator of rows that can be passed to
a csv.DictWriter"""
return ({'Source':src, 'Target':tar, 'Relationship Type':val} for (src,tar),val in rel_dict.items())
|
cc530e9b89f0c17050f1dde25cd9fef9e79a239f
| 319,657 |
def col_to_dict(col, include_id=True):
"""Convert SchemaColumn to dict to use in AddColumn/AddTable actions."""
ret = {"type": col.type, "isFormula": col.isFormula, "formula": col.formula}
if include_id:
ret["id"] = col.colId
return ret
|
d7c10eb07daaf1af14c73f51192f5d1a5b7bd19b
| 32,354 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.