content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import torch
def squared_deltaE(lab1, lab2):
"""Squared Delta E (CIE 1976).
lab1: Bx3xHxW
lab2: Bx3xHxW
return: Bx1xHxW
"""
return torch.sum((lab1 - lab2) ** 2, 1, keepdim=True)
|
8540ac9c8274d9a292167141146dbd0ac8bb6386
| 42,061 |
def principal_from_interaction(interaction):
"""
principal_from_interaction(interaction: IInteraction) -> IPrincipal
Find the primary :class:`IPrincipal` for the *interaction*. The primary
principal is the principal of the first participation.
"""
return next(iter(interaction.participations)).principal
|
8aba1a9c63ef08e1e2aff105476750e7a2c9c06c
| 42,062 |
import re
def is_divider(item: str) -> bool:
"""Return true if the string contains a divider character."""
return re.match(r'^(,|:)', item) is not None
|
3160a586b83dd3415026918936bee27ac0c7548f
| 42,066 |
def py6S_sensor(mission):
"""
Py6S satellite_sensor name from satellite mission name
"""
switch = {
'Sentinel2':'S2A_MSI',
'Landsat8':'LANDSAT_OLI',
'Landsat7':'LANDSAT_ETM',
'Landsat5':'LANDSAT_TM',
'Landsat4':'LANDSAT_TM'
}
return switch[mission]
|
87e943ea55be17b9a19a3908191a798520180c04
| 42,072 |
def _get_timezone_name(timezone):
"""Return the name of ``timezone``."""
return timezone.tzname(None)
|
e33ea45ac5cee435b528e3e8a794e646bb569791
| 42,078 |
def object_name_to_label(object_class):
"""convert from object name in S3DIS to an int"""
object_label = {
'ceiling': 1,
'floor': 2,
'wall': 3,
'column': 4,
'beam': 5,
'window': 6,
'door': 7,
'table': 8,
'chair': 9,
'bookcase': 10,
'sofa': 11,
'board': 12,
'clutter': 13,
'stairs': 0,
}.get(object_class, 0)
return object_label
|
46417ff4033b50079d9a323bb790362368e2abd2
| 42,080 |
def power_of_two(n):
"""Check if value is a power of two."""
if n == 2:
return True
elif n%2 != 0:
return False
else:
return power_of_two(n/2.0)
|
8a435ac95f6d8b2b8006788400b7bd04615f8b5e
| 42,085 |
def to_bytes(seq):
"""convert a sequence to a bytes type"""
b = bytearray()
for item in seq:
b.append(item) # this one handles int and str
return bytes(b)
|
433a9af70ee429ec6b978d8a3e642654ca224631
| 42,088 |
def hsl_to_rgb(hsl_array):
"""!
@brief Convert hsl array [h, s, l] to rgb array [r, g, b].
@details HSL where h is in the set [0, 359] and s, l are in the set [0.0, 100.0].
RGB where r, g, b in the set [0, 255].
Formula adapted from https://www.rapidtables.com/convert/color/hsl-to-rgb.html
@param hsl_array HSL array [h, s, l].
@return RGB array [r, g, b].
"""
# Normalize 0 <= s <= 1 AND 0 <= l <= 1
h, s, l = hsl_array
s, l = s/100, l/100
r, g, b = None, None, None
if s == 0:
r = g = b = l # Color is grayscale/achromatic.
else:
color_range = s * (1 - abs(2 * l - 1))
x = color_range * (1 - abs(((h / 60) % 2) - 1))
m = l - (color_range / 2)
if 0 <= h < 60:
r, g, b = color_range, x, 0
elif 60 <= h < 120:
r, g, b = x, color_range, 0
elif 120 <= h < 180:
r, g, b = 0, color_range, x
elif 180 <= h < 240:
r, g, b = 0, x, color_range
elif 240 <= h < 300:
r, g, b = x, 0, color_range
elif 300 <= h < 360:
r, g, b = color_range, 0, x
r, g, b = (r+m), (g+m), (b+m)
r, g, b = round(r*255), round(g*255), round(b*255)
return [r, g, b]
|
0984c4036d09f8874f9a228ca7494096ddd6ef8f
| 42,090 |
def get_lambda_timeout(lambda_context) -> int:
"""Gets the remaining lambda execution time in seconds."""
return int(lambda_context.get_remaining_time_in_millis()/1000)
|
b66f485bc5394151757f662e4d9b2be7c7e379f3
| 42,091 |
def retrieve_nuts3(url: str) -> str:
"""Prepare nuts3 code from url."""
return url.split('/')[-2]
|
aaf1047a477ff59498509495749efc1c0ff25b0a
| 42,092 |
def scale_features(sample, header, scale_parameters, scale_method):
"""
Parameters
----------
sample : list
List containing all to features to be scaled
header : list
Features name
scale_parameters : dict
Dict containing the scale parameters
- Mean
- Stdev
- Min
- Max
scale_method : str
Method to be used to scale the data
- Standard
- MinMax
Returns
-------
sample scaled
"""
scaled = []
if scale_method == "Standard":
for i, key in enumerate(header):
mean = scale_parameters[key]["mean"]
stdev = (
scale_parameters[key]["stdev"]
if scale_parameters[key]["stdev"] != 0
else 0.001
)
scaled.append((sample[i] - mean) / stdev)
elif scale_method == "MinMax":
for i, key in enumerate(header):
min_val = scale_parameters[key]["min"]
max_val = scale_parameters[key]["max"]
diff = (max_val - min_val) if max_val - min_val != 0 else 0.001
scaled.append((sample[i] - min_val) / diff)
else:
return sample
return scaled
|
f0a54417c9f17b3962279c4622db4d0f68058c12
| 42,094 |
def is_tiff(data):
"""True if data is the first 4 bytes of a TIFF file."""
return data[:4] == 'MM\x00\x2a' or data[:4] == 'II\x2a\x00'
|
7136d494f42169345ee09d58c1468330e22b6590
| 42,096 |
def read_string(fd, loc=None):
"""
Read a null-terminated string.
The file descriptor is set to one past the null.
:param fd: file handle opened in binary mode
:return: the decoded string
"""
if loc is not None:
fd.seek(loc)
b = b""
while True:
bb = fd.read(1)
if bb == b"\x00":
break
b += bb
return b.decode("utf-8")
|
6eaed519162874c85f41d4525e450b4fff35ab71
| 42,104 |
def list2str(lst, short=True):
""" Returns a string representing a list """
try:
if short:
return ', '.join(lst)
else:
return str(lst)
except:
if short:
return ""
else:
return "[]"
|
cdd8ccd778c9037832c96f5a399beab490ff6d6b
| 42,108 |
def script(content="", src="", scripttype="text/javascript"):
"""
returns a unicode text html script element.
>>> script('#this is a comment', scripttype="text/applescript")
'<script type="text/applescript">#this is a comment</script>'
"""
out_script = u'<script '
if scripttype:
out_script += u'type="%s" ' % scripttype
if src:
out_script += u'src="%s" ' % src
return out_script[:-1] + u'>%s</script>' % content
|
5d7bdd84fff1047677ea3f648c786d5fac3c833f
| 42,111 |
def identity(x):
"""Identity map."""
return x
|
175def70d5694d5b9c0ca513ceea6c31860a4f8f
| 42,115 |
from typing import Optional
from typing import Union
import json
def _jsify_dict(d: dict, stringify: Optional[bool] = False) -> Union[dict, str]:
"""
JavaScript-ify Python dicts: use camelCase string keys.
"""
def _snake2camel(s: str) -> str:
s_split = s.split("_")
return "".join([s_split[0]] + [e.capitalize() for e in s_split[1:]])
d2 = dict(
(_snake2camel(str(k)), _jsify_dict(v) if isinstance(v, dict) else v)
for k, v in d.items())
return d2 if not stringify else json.dumps(
d2, separators=(',', ':')).replace("\"", "'")
|
48f9694c4a99b46dc240e3d3458849eaededd9ab
| 42,117 |
def add_format_field(record, fieldname):
"""Return record with fieldname added to the FORMAT field if it doesn't
already exist.
"""
format = record.FORMAT.split(':')
if fieldname not in format:
record.FORMAT = record.FORMAT + ":" + fieldname
return record
|
bcc3ff87a80bb561462b11428eeba8561bcc100f
| 42,120 |
from typing import Dict
def create_simple_sample(
num_nodes: int = 1,
kedro_version: str = "0.17.2",
tagged: bool = True,
name_prefix: str = "node",
) -> Dict:
"""Create Sample data for examples and tests.
Parameters
--------
num_nodes : int
number of nodes to generate in the pipeline
kedro_version : str
kedro version to use in the pipeline.json format
tagged : bool
to tag the datasets or not
name_prefix : str
prefix to add to the name of each node
Returns
--------
kedro pipeline.json sample data as a dictionary
Examples
--------
>>> create_simple_sample(1)
{'kedro_version': '0.17.2', 'pipeline': [{'name': 'node1', 'inputs': ['output0'], 'outputs': ['output1'], 'tags': ['tag1']}]}
>>> create_simple_sample(1, name_prefix='first')
{'kedro_version': '0.17.2', 'pipeline': [{'name': 'first1', 'inputs': ['output0'], 'outputs': ['output1'], 'tags': ['tag1']}]}
>>> create_simple_sample(1, tagged=False)
{'kedro_version': '0.17.2', 'pipeline': [{'name': 'node1', 'inputs': ['output0'], 'outputs': ['output1'], 'tags': ['']}]}
"""
return {
"kedro_version": kedro_version,
"pipeline": [
{
"name": f"{name_prefix}{n}",
"inputs": [f"output{n-1}"],
"outputs": [f"output{n}"],
"tags": [f"tag{n}" if tagged else ""],
}
for n in range(1, num_nodes + 1)
],
}
|
de183b2a6bb0e1d66460b9836f32edc6a07e81d7
| 42,122 |
from typing import Mapping
from typing import List
def is_kept(feature_proportions: Mapping[int, float], thresholds: Mapping[int, float]) -> List[bool]:
"""
Determine whether each variable should be kept after selection
Parameters
----------
feature_proportions: Mapping[int, float]
Lookup from variable to % of splits in the model that use that variable
thresholds: Mapping[int, float]
Lookup from variable to required % of splits in the model to be kept
Returns
-------
List[bool]
An array of length equal to the width of the covariate matrix
True if the variable should be kept, False otherwise
"""
print(sorted(list(feature_proportions.keys())))
return [feature_proportions[feature] > thresholds[feature] for feature in sorted(list(feature_proportions.keys()))]
|
4d3d0b5d4bf3e555fb37bafcc1ee58c71b337d3a
| 42,126 |
def load_hyperedges(pc, table_name):
"""
Returns the hyperedge list from the Postgres table.
:param pc: (PostgresConnector) Object for communication.
:param table_name: (string) Where to retrieve the values from
:return: (list) Tuple values for (edge_id, term_id)
"""
with pc as open_pc:
open_pc.cursor.execute("SELECT * FROM {}".format(table_name))
data = open_pc.cursor.fetchall()
return data
|
60dedd0ad05a125dd1be39d38f059edeffe991b0
| 42,127 |
def time_delta(t1: int, t2: int) -> float:
"""
:param t1: first timestamp
:param t2: second timestamp
:return: time delta
"""
return (t2 - t1) / 3600000
|
e8606dab047ba6088d8d30b2729c85b03cba1766
| 42,128 |
def acquire_category(soup):
"""
Take a BeautifulSoup content of a book page.
Return the category of the book.
"""
table = soup.ul.find_all('a')
category = table[2].string
return category
|
ce0f28cab0809959d89f684d1d1e5ba060abb51a
| 42,133 |
import torch
def RescaleProbMap(batch_x, sparsity):
"""
Rescale Probability Map
given a prob map x, rescales it so that it obtains the desired sparsity
if mean(x) > sparsity, then rescaling is easy: x' = x * sparsity / mean(x)
if mean(x) < sparsity, one can basically do the same thing by rescaling
(1-x) appropriately, then taking 1 minus the result.
"""
batch_size = len(batch_x)
ret = []
for i in range(batch_size):
x = batch_x[i:i+1]
xbar = torch.mean(x)
r = sparsity / (xbar)
beta = (1-sparsity) / (1-xbar)
# compute adjucement
le = torch.le(r, 1).float()
ret.append(le * x * r + (1-le) * (1 - (1 - x) * beta))
return torch.cat(ret, dim=0)
|
73bd378728f2034064366d6edab343847b3b52b0
| 42,134 |
def landeg(gL,gS,J,S,L):
""" Calculating the Lande factor g,
For fine structure: landeg(gL,gS,J,S,L)
For hyperfine structure: landeg(gJ,gI,F,I,J)
"""
return gL * (J * (J + 1) - S * (S + 1) + L * (L + 1)) / (2 * J * (J + 1)) + \
gS * (J * (J + 1) + S * (S + 1) - L * (L + 1)) / (2 * J * (J + 1))
|
e9b062a01f346b166ac9789f28d1c3cf057517e9
| 42,138 |
def epb(mjd):
"""
Converts Modified Julian Date to Besselian Epoch
Inputs:
- mjd Modified Julian Date (JD - 2400000.5)
Returns the Besselian Epoch.
Reference:
Lieske,J.H., 1979. Astron.Astrophys.,73,282.
History:
P.T.Wallace Starlink February 1984
2002-07-11 ROwen Converted EPB2D to Python.
"""
return 1900.0 + (mjd-15019.81352)/365.242198781
|
8757c02acb1a128fc2907fc67891e589160156e6
| 42,139 |
def split_sortby(sort_by):
""""Split the value of sortBy.
sortBy can have a trailing 'ASC' oder 'DESC'.
This function returns the fieldname and 'ASC' or 'DESC' as tuple.
"""
asc_desc = 'ASC'
if sort_by.lower().endswith('asc'):
sort_by_value = sort_by[:-3]
elif sort_by.lower().endswith('desc'):
sort_by_value = sort_by[:-4]
asc_desc = 'DESC'
else:
sort_by_value = sort_by
return sort_by_value, asc_desc
|
75c6f8ce9ad2edb02c844ccd9c0a2c0c6d22d306
| 42,143 |
from typing import Union
from pathlib import Path
from typing import Tuple
def split_filename(
filepath: Union[str, Path],
resolve: bool = False,
) -> Tuple[Path, str, str]:
"""split a filepath into the directory, base, and extension"""
filepath = Path(filepath)
if resolve:
filepath = filepath.resolve()
path = filepath.parent
_base = Path(filepath.stem)
ext = filepath.suffix
if ext == ".gz":
ext2 = _base.suffix
base = str(_base.stem)
ext = ext2 + ext
else:
base = str(_base)
return Path(path), base, ext
|
80f47eea7249ab22b709db3f02f9e7bde7a984f5
| 42,150 |
import torch
def affine_to_linear(x):
"""Convert NxCxHxW tensor to Nx(C*H*W+1) tensor where last column is one"""
(N,C,H,W) = x.shape if len(x.shape)==4 else (1,*x.shape)
return torch.cat( (x.view(N,C*H*W), torch.ones(N,1, dtype=x.dtype)), dim=1)
|
be168f7e6f5221b3e2acbbd104a4904dfd81f7b6
| 42,152 |
import torch
def perm_gpu_f32(pop_size, num_samples):
"""Use torch.randperm to generate indices on a 32-bit GPU tensor."""
return torch.randperm(pop_size, dtype=torch.int32, device="cuda")[
:num_samples
].long()
|
950b07a4d6493fa83cf75c368ac0faf4a3ab44fc
| 42,153 |
def _plain(hash_value, password):
"""Check if ``hash_value`` and ``password`` match, using plain method."""
return hash_value == password
|
8ce4fae1737e8281b28d171b05175d77566fe459
| 42,155 |
from typing import List
import re
def parse_active_site_data_line(line: str) -> List[str]:
"""
Parse active site data line.
Args:
line (str): a line from the active site data file.
Returns:
List[str]: a list containing identifiers and the sequence.
"""
identifiers, sequence = re.split(r",\s+", line.strip(">\n"))
return identifiers.split() + [sequence]
|
275c33c94a7b6442d2b209abec1ac70ff494a96e
| 42,160 |
def get_deep(x, path, default=None):
""" access value of a multi-level dict in one go.
:param x: a multi-level dict
:param path: a path to desired key in dict
:param default: a default value to return if no value at path
Examples:
x = {'a': {'b': 5}}
get_deep(x, 'a.b') returns 5
get_deep(x, ['a', 'b']) returns 5
get_deep(x, 'c', 5) returns 5
"""
if path is None or path == '':
path_keys = []
elif type(path) in (list, tuple):
path_keys = path
else:
path_keys = path.split('.')
v = x or {}
for k in path_keys:
try:
v = v.get(k)
except TypeError:
v = None
finally:
if v is None:
return default
return v
|
87290152f3c8cb7e5bf6daa4cf833fe5bb5b8ee4
| 42,162 |
import csv
def CreateFieldIdLookup(f):
"""Create a dictionary that specifies single variable analysis each var.
Args:
config_dir: directory of metadata, output by update_rappor.par
Returns:
A dictionary from field ID -> full field name
NOTE: Right now we're only doing single variable analysis for strings, so we
don't have the "type".
"""
field_id_lookup = {}
c = csv.reader(f)
for i, row in enumerate(c):
if i == 0:
expected = ['metric', 'field', 'field_type', 'params', 'field_id']
if row != expected:
raise RuntimeError('Expected CSV header %s' % expected)
continue
metric, field, field_type, _, field_id = row
if field_type != 'string':
continue
# Paper over the difference between plain metrics (single variable) and
# metrics with fields (multiple variables, for association analysis).
if field:
full_field_name = '%s.%s' % (metric, field)
else:
full_field_name = metric
field_id_lookup[field_id] = full_field_name
return field_id_lookup
|
ef349fb6fb5eb79bbb884c941b935f93caf44d78
| 42,164 |
import math
def is_hexagonal(number):
"""
Check given number to be a hexagonal number.
:param number: value to be checked to be a hexagonal number.
:returns: True when given value is a hexagonal number
see http://en.wikipedia.org/wiki/Hexagonal_number
>>> is_hexagonal(15)
True
>>> is_hexagonal(14)
False
"""
value = (math.sqrt(8 * number + 1) + 1) / 4.0
return value == int(value)
|
491bb2d9b5b1455463e18cf96c0a4ab30bce85ab
| 42,165 |
def _all_na_or_values(series, values):
"""
Test whether every element in the series is either missing or in values.
This is fiddly because isin() changes behavior if the series is totally NaN
(because of type issues)
Example: x = pd.DataFrame({'a': ['x', np.NaN], 'b': [np.NaN, np.NaN]})
x.isin({'x', np.NaN})
Args:
series (pd.Series): A data column
values (set): A set of values
Returns:
bool: True or False, whether the elements are missing or in values
"""
series_excl_na = series[series.notna()]
if not len(series_excl_na):
out = True
elif series_excl_na.isin(values).all():
out = True
else:
out = False
return out
|
f6c3f05a7dc2ad03047b1529cdcd00f6dd091899
| 42,168 |
def kwargs_from_parsed_args(args):
"""
Transforms argparse's parsed args object into a dictionary to be passed as
kwargs.
"""
return {k: v for k, v in vars(args).items() if v is not None}
|
8db36b1e151f8a5c6efcda9f0ce52fa9ade0f698
| 42,171 |
def _rescale_layout(pos, scale=1):
"""
Normalize the given coordinate list to the range [0, `scale`].
Parameters
----------
pos : array
Coordinate list
scale : number
The upperbound value for the coordinates range
Returns
-------
pos : array
The rescaled (normalized) coordinates in the range [0, `scale`].
Notes
-----
Changes `pos` in place.
"""
pos -= pos.min(axis=0)
pos *= scale / pos.max()
return pos
|
39de1a71f6d7807d2d46f1d3b6aafdd441ff719a
| 42,173 |
def active_class(var, prop, active):
"""Tag to return an active class if the var and prop test matches."""
try:
return active if var == prop else ''
except Exception:
return ''
|
8b199f594839e1e6e7bc692408631ccbe7e3074a
| 42,176 |
def _is_xml(s):
"""Return ``True`` if string is an XML document."""
return s.lower().strip().startswith('<?xml ')
|
f11ce0c32915d8dc8b5dcd070644557b1b9f9a4f
| 42,178 |
def recordToMessage(record, reqid):
"""
Format a log record as a list of interesting entries (a message) for the
daemon-client protocol.
"""
msg = [getattr(record, e) for e in ('name', 'created', 'levelno',
'message', 'exc_text')] + [reqid]
if not hasattr(record, 'nonl'):
msg[3] += '\n'
return msg
|
03c6ac362a3d0937583578ae845f3397474d7263
| 42,180 |
def find_disconnected(model):
"""
Return metabolites that are not in any of the reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
return [met for met in model.metabolites if len(met.reactions) == 0]
|
19c606affff99c01c47522d6903b6f8008cee8c6
| 42,182 |
import math
def calculate_entropy(item_count, total_items):
"""
Calculate the entropy present in a password/passphrase.
Assumes item_count number of items(characters/words) were chosen
uniformly from out of total_items number of items, calculated as:
entropy = log2(total number of possibilities during generation), or
entropy = log2(total_items**item_count), or
entropy = item_count * log2(total_items)
Keyword arguments:
item_count -- Number of items present in the generated word/phrase
In a password this is simply the length
In a passphrase this is the number of words
total_items -- Number of choices that were present
In a password this is the size of the choice alphabet
In a passphrase this is the number of words in the dictionary
"""
return item_count * math.log2(total_items)
|
1dbad575cf721314677598a5731aeac5f569b62f
| 42,185 |
def get_request_ids(events, context):
"""
Get request IDs from a set of lambda log events
"""
ids = []
for event in events:
if ('extractedFields' in event):
fields = event['extractedFields']
if 'type' in fields and fields['type'] == 'END' and 'requestId' in fields:
ids.append(fields['requestId'])
# should always be at least one END event
assert len(ids) > 0, "No END events found in message stream."
# shouldn't be any dupes
assert len(ids) == len(set(ids)), "Found duplicate request ids"
return ids
|
c5cc433c497de3f23f48bbf89b078ec399fce27e
| 42,189 |
def atoi(text):
"""
Turn an int string into a number, but leave a non-int string alone.
"""
return int(text) if text.isdigit() else text
|
76b7a3fdd28333bdc30b45ed8a8d8f7ec361fa70
| 42,190 |
from typing import Tuple
def _lex_quoted(header: str) -> Tuple[str, str]:
"""
>>> _lex_quoted('"abc";a=10')
('"abc"', ';a=10')
>>> _lex_quoted('a=10')
('', 'a=10')
"""
if header[0] != '"':
return "", header
end_quote_pos = header.find('"', 1)
return header[: end_quote_pos + 1], header[end_quote_pos + 1 :]
|
ecf941475c3b37bc6d3edb246d827ab63be6173b
| 42,192 |
import torch
from typing import OrderedDict
def retrieve_out_channels(model, size):
"""
This method retrieves the number of output channels of a specific model.
Args:
model (nn.Module): The model for which we estimate the out_channels.
It should return a single Tensor or an OrderedDict[Tensor].
size (Tuple[int, int]): The size (wxh) of the input.
Returns:
out_channels (List[int]): A list of the output channels of the model.
"""
in_training = model.training
model.eval()
with torch.no_grad():
# Use dummy data to retrieve the feature map sizes to avoid hard-coding their values
device = next(model.parameters()).device
tmp_img = torch.zeros((1, 3, size[1], size[0]), device=device)
features = model(tmp_img)
if isinstance(features, torch.Tensor):
features = OrderedDict([("0", features)])
out_channels = [x.size(1) for x in features.values()]
if in_training:
model.train()
return out_channels
|
fc4c0caaad0a3f6d7763d4537abea72a877d97a4
| 42,194 |
def print_title(title, outf):
"""Prints a title to a file
The title will be marked up by an underline of equal signs as in the Setext
style of headers.
"""
print("\n\n%s" % title, file=outf)
print("=" * len(title), file=outf)
print("")
return None
|
097c1dc233a09b9231f6de0f433f09f4d74c849f
| 42,197 |
def hypergeometric_expval(n, m, N):
"""
Expected value of hypergeometric distribution.
"""
return 1. * n * m / N
|
f9440637bde88851624bae564f75dfda21b62808
| 42,203 |
import math
def drop_dead_intensity(t):
"""Intensity that can be maintained for a given time.
All athletes assumed to exhibit identical behavior. Originally
based on elite athletes.
Args:
t (float): Time to exhaustion in minutes.
Returns:
float: maximum sustainable intensity for this time, defined as a
ratio of max sustainable VO2 to VO2max.
"""
a1 = 0.2989558
a2 = 0.1894393
tau1 = 1 / 0.1932605
tau2 = 1 / 0.012778
c = 0.8
return a1 * math.exp(-t / tau1) + a2 * math.exp(-t / tau2) + c
|
b5048e8907c0147aa5a8390221e2c64eb7663546
| 42,206 |
import logging
def fetch_relationships(bucket, dataset, relation_types):
"""
Get a listing of relationship files on S3
Parameters
----------
bucket : str
dataset : str
relationship_file : fileobj
relation_types : List[str]
Returns
-------
{str: [(str, str)}
A dict of (bucket, key) pairs, where the key is the relation type
and the value is the location of the corresponding relationship files.
"""
relationship_locations = dict()
for rel_type in relation_types:
rel_file_key = "relationships/{}.csv".format(rel_type)
logging.info("Fetching relationship {}".format(rel_file_key))
relationship_locations[rel_type] = (bucket, rel_file_key)
return relationship_locations
|
4b4e158506aed62c20906212390d34ff1c033d32
| 42,210 |
def cria_matriz(num_linhas, num_colunas, valor):
""" (int, int, valor) -> matriz (lista de listas)
cria e retorna uma matriz comnum_linhas linhas e num_colunas
colunas em que cada elemento é igual ao valor dado.
"""
matriz = [] # lista vazia
for i in range(num_linhas):
#cria a linha i
linha =[] # lista vazia
for j in range(num_colunas):
linha.append(valor)
# adiciona linha à matriz
matriz.append(linha)
return matriz
|
a6e92131cd9af347b3a36d7bd651a7c3fb5347e3
| 42,212 |
def buildKey(ids, dataLine):
"""
Concatenate a set of fields together to build an overall key
This is a simple approach to determining k-anonymity, in which all
of the fields of interest are concatenated as a single key. The
ids coming in should be a list of indexes into the fields in the dataLine.
These will be concatenated in order to form a new key. Note that this
currently assumes that all of the data fields are strings.
"""
retKey = ''
for i in ids:
retKey += dataLine[i]
return retKey
|
b37d96b46916cb7db751b49d7351b1a2e0d9f8ae
| 42,219 |
def scan_deployed_policies(org_client):
"""
Return list of Service Control Policies deployed in Organization
"""
return org_client.list_policies(Filter='SERVICE_CONTROL_POLICY')['Policies']
|
53b96af745cd0f782158a6600bb7d6bedb836747
| 42,220 |
def get_zhang_aspect_ratio(aspect_ratio):
"""Compute an equivalent aspect ratio according to Zhang.
Parameters
----------
aspect_ratio : float
Aspect ratio of a cylindrical fiber.
Returns
-------
float
Equivalent aspect ratio for an ellipsoid.
References
----------
.. [1] Zhang, D.; Smith, D.E.; Jack, D.A.; Montgomery-Smith, S.,
'Numerical Evaluation of Single Fiber Motion for Short-Fiber-Reinforced Composite
Materials Processing.'
J. Manuf. Sci. Eng. 2011, 133, 51002.
https://doi.org/10.1115/1.4004831
"""
return (
0.000035 * aspect_ratio ** 3
- 0.00467 * aspect_ratio ** 2
+ 0.764 * aspect_ratio
+ 0.404
)
|
e83b751179c566683d8667b2682d4589d8f3f540
| 42,221 |
def app_label(value):
""" Template tag that returns the name of the app an item belongs too"""
return value._meta.app_label
|
40bb07baef805ba5ae7f4d59834d70ec3121310a
| 42,224 |
def get_context_attribute(self, request, context_name, namespace_name = None):
"""
Retrieves the value of the context attribute with the
provided name.
In case no attribute is found a none value is returned.
:type request: Request
:param request: The request to be used.
:type context_name: String
:param context_name: The name of the of the context attribute
to retrieve the value.
:type namespace_name: String
:param namespace_name: The name of the namespace to be used
for the context (session) variable to be retrieved.
:rtype: Object
:return: The value of the requested context attribute.
"""
# retrieves the context defaulting to a new and empty map
# in case an invalid session attribute is returned
context = self.get_session_attribute(request, "_context", namespace_name)
if context == None: context = {}
# returns the retrieves attribute value, defaulting to none
# in case it's not present in the context map
return context.get(context_name, None)
|
06241ff83389fdbcc61fc9c9b419cfab95106a6f
| 42,227 |
def _buckets_for_length(bucket_length, batch_size, max_eval_length, n_devices,
training):
"""Creates heuristically a set of bucket boundaries and sizes.
The middle boundary is set to `bucket_length` and the corresponding batch
size is set to `batch_size`. We also create buckets of 1/2 and 1/4 length
with 2x and 4x batch size, and buckets of 2x and 4x and larger length with
1/2 and 1/4 batch size respectively, and batch size 1 for the final one.
Args:
bucket_length: the length of the middle bucket.
batch_size: the batch size for the middle bucket.
max_eval_length: the longest bucket length if training=False.
n_devices: number of devices, batch sizes are divisible by that.
training: bool, whether we are training or evaluating.
Returns:
a pair of lists of integers, (bucket_boundaries, bucket_batch_sizes).
"""
bucket_boundaries = [bucket_length // 4, bucket_length // 2,
bucket_length, bucket_length * 2,
bucket_length * 4, bucket_length * 8,
bucket_length * 16]
if not training:
max_eval_length = max_eval_length or bucket_length * 32
# Set last bucket boundary to be max_eval_length, cut off boundaries
# that are larger than this.
bucket_boundaries = (
[b for b in bucket_boundaries if b < max_eval_length] +
[max_eval_length]
)
bucket_boundaries.append(max_eval_length)
bucket_batch_sizes = [batch_size * 4, batch_size * 2,
batch_size, batch_size // 2,
batch_size // 4, batch_size // 8,
batch_size // 16, 1]
if not training:
# The last bucket batch size is always 1, but the one-but-last is
# sized to accommodate the final length = bucket_boundaries[-1], which
# we changed for eval above -- so adjusting here too.
# Resize if needed, since bucket_batch_sizes may not be the same size
# anymore.
bucket_batch_sizes = bucket_batch_sizes[:len(bucket_boundaries)] + [1]
bucket_batch_sizes[-2] = batch_size // max_eval_length
# Make batch sizes divisible by n_devices.
bucket_batch_sizes = [max(b // n_devices, 1) * n_devices
for b in bucket_batch_sizes]
return (bucket_boundaries, bucket_batch_sizes)
|
4381af811f6f5d530806de49e217de014b6c9395
| 42,231 |
def _boolify_envvar(val):
"""Interpret boolean environment variables.
True whenever set/exported, even if value is an empty string,
"null", or "none".
"""
falsey = ("false", "nil", "no", "off", "0")
return (val if val is not None else "false").lower() not in falsey
|
caae6f51cc99ef82f6bdccf1fc398f0c79a57035
| 42,232 |
from pathlib import Path
def file_ext(f):
""" File extension """
return Path(f).suffix.replace('.', '')
|
3e556bcd5d20727c2699da3d5b4e5785a4dcc8f1
| 42,233 |
def _EscapePosixShellArgument(arg):
"""Escapes a shell command line argument so that it is interpreted literally.
Args:
arg: The shell argument to escape.
Returns:
The escaped string.
"""
return "'%s'" % arg.replace("'", "'\\''")
|
c0a46321337621e8f8e5f949783fac3bdf2fc193
| 42,235 |
from typing import Dict
from typing import List
from typing import Tuple
from bs4 import BeautifulSoup
from typing import OrderedDict
def load_docs_from_sgml(
file_path: str, encoding='utf-8'
) -> Dict[str, List[Tuple[str, str]]]:
"""
Loads documents from given SGML file.
Returns dict mapping document ids to list of segments [segments].
Each segment is a tuple (segment id, segment text).
"""
soup = None
with open(file_path, encoding=encoding) as _file:
soup = BeautifulSoup(_file, features='lxml')
all_docs: Dict[str, List[Tuple[str, str]]] = OrderedDict()
for curr_doc in soup.find_all('doc'):
curr_doc_id = curr_doc.attrs['docid']
if not curr_doc_id in all_docs:
all_docs[curr_doc_id] = []
for curr_seg in curr_doc.find_all('seg'):
curr_seg_id = curr_seg.attrs['id']
curr_seg_text = curr_seg.get_text()
all_docs[curr_doc_id].append((curr_seg_id, curr_seg_text))
return all_docs
|
99074d183c2f66839db50394528453f4517685c9
| 42,236 |
def endianSwapU16(bytes):
"""Swaps pairs of bytes (16-bit words) in the given bytearray."""
for b in range(0, len(bytes), 2):
bytes[b], bytes[b + 1] = bytes[b + 1], bytes[b]
return bytes
|
3a3db1e49de0a171aba856629e06b7084ae702bd
| 42,240 |
def _parse_html_table(table):
"""Return list of lists with cell texts.
:param table: beautifulsoup tag with table element
"""
rows = table.findAll("tr")
data = []
for row in rows:
tds = row.findAll("td")
ths = row.findAll("th")
if len(ths) > 0:
tmp = [th.text.strip() for th in ths]
else:
tmp = [td.text.strip() for td in tds]
data.append(tmp)
return data
|
073449286be9d2a91028b80bad1a73a475b06994
| 42,244 |
def to_map(labelset, map_unlabeled=True):
"""map set of labels to series of consecutive integers from 0 to n inclusive,
where n is the number of labels in the set.
This 'labelmap' is used when mapping labels from annotations of a vocalization into
a label for every time bin in a spectrogram of that vocalization.
If map_unlabeled is True, 'unlabeled' will be added to labelset, and will map to 0,
so the total number of classes is n + 1.
Parameters
----------
labelset : set
of labels used to annotate a Dataset.
map_unlabeled : bool
if True, include key 'unlabeled' in mapping. Any time bins in a spectrogram
that do not have a label associated with them, e.g. a silent gap between vocalizations,
will be assigned the integer that the 'unlabeled' key maps to.
Returns
-------
labelmap : dict
maps labels to integers
"""
if type(labelset) != set:
raise TypeError(f"type of labelset must be set, got type {type(labelset)}")
labellist = []
if map_unlabeled is True:
labellist.append("unlabeled")
labellist.extend(sorted(list(labelset)))
labelmap = dict(zip(labellist, range(len(labellist))))
return labelmap
|
0cca033beea95b9eddd084875ed08b149f62f36b
| 42,254 |
from typing import List
def read_umls_file_headers(meta_path: str, filename: str) -> List[str]:
"""
Read the file descriptor MRFILES.RRF from a UMLS release and get column headers (names)
for the given file
MRFILES.RRF file format: a pipe-separated values
Useful columns:
column 0: name of one of the files in the META directory
column 2: column names of that file
Args:
meta_path: path to the META directory of an UMLS release
filename: name of the file to get its column headers
Returns:
a list of column names
"""
file_descriptors = f"{meta_path}/MRFILES.RRF" # to get column names
with open(file_descriptors) as fin:
for line in fin:
splits = line.split("|")
found_filename = splits[0]
column_names = (splits[2] + ",").split(
","
) # ugly hack because all files end with an empty column
if found_filename in filename:
return column_names
assert False, f"Couldn't find column names for file {filename}"
return None
|
788bca5a94e2f7c40f09bc1804d7c2e1e31f2c2b
| 42,257 |
def cate_init(*arg, **kwargs):
"""
No actual use, just demonstrates the signature of an Cate entry point callable.
:param arg: any arguments (not used)
:param kwargs: any keyword arguments (not used)
:return: any or void (not used)
"""
return arg, kwargs
|
b7b81c9c4c32a59acb3931c9f57cad8137f7a4ec
| 42,258 |
from datetime import datetime
def year_fraction(date):
"""Obtain the fraction of the year that a given date represents.
Args:
date (datetime): a datetime object.
Returns:
float representing the fraction of the year.
"""
year = date.year
this_year_start = datetime(year=year, month=1, day=1)
next_year_start = datetime(year=year+1, month=1, day=1)
days_elapsed = date.timetuple().tm_yday - 0.5
days_total = (next_year_start - this_year_start).days
return days_elapsed/days_total
|
7b600f38ad0862eff2568ed5f460487971010efb
| 42,259 |
def _GetHandlerFromRequest(request):
"""Safely extracts a request handler from a Request.
Args:
request: A webapp2.Request instance.
Returns:
The handler that corresponds to the given Request (which can be a class or
method), or None if there is no such handler (e.g. 404's).
"""
route = getattr(request, 'route', None)
if route is not None:
return getattr(route, 'handler', None)
|
8fe0b6d2f68931cba6ab618b38704d2f21455eea
| 42,266 |
def input_parameter_name(name, var_pos):
"""Generate parameter name for using as template input parameter names
in Argo YAML. For example, the parameter name "message" in the
container template print-message in
https://github.com/argoproj/argo/tree/master/examples#output-parameters.
"""
return "para-%s-%s" % (name, var_pos)
|
40a3d29274b141294e4b9cae83ddb84ae8e44188
| 42,270 |
def subscript(text: str) -> str:
"""
Return the *text* surrounded by subscript HTML tags.
Subscript text appears half a character below the normal line,
and is sometimes rendered in a smaller font.
Subscript text can be used for chemical formulas.
>>> subscript("foo")
'<sub>foo</sub>'
"""
return f"<sub>{text}</sub>"
|
48cfc245c863b569aef83743ca7d39a1e02878da
| 42,272 |
def confirm(s: str = ''):
"""
Ask yes/no, retry if invalid.
:param str s: prompt
:return: bool
"""
while True:
value = input('> {} [y/n]: '.format(s)).lower()
if value:
if value in 'yesrtui':
return True
elif value in 'novbm,':
return False
|
32d7d8b8ccef3516a76492bde7038db84a17c67a
| 42,273 |
def is_point_in_rect2(point, rect_center, rect_w, rect_h):
"""Checks whether is coordinate point inside the rectangle or not.
Rectangle is defined by center and linear sizes.
:type point: list
:param point: testing coordinate point
:type rect_center: list
:param rect_center: point, center of rectangle
:type rect_w: float
:param rect_w: rectangle width
:type rect_h: float
:param rect_h: rectangle height
:rtype: boolean
:return: boolean check result
"""
cx, cy = rect_center
x, y = point
if abs(x - cx) <= rect_w / 2.0 and abs(y - cy) <= rect_h / 2.0:
return True
return False
|
8ced860e0bee60d287aba796414d3b3f46a436b5
| 42,276 |
from typing import Tuple
from typing import List
from typing import Set
def get_neighbours(p: Tuple[int], offsets: List[tuple]) -> Set[tuple]:
"""
Get all of the neighbours of a point.
"""
ns = set()
for offset in offsets:
n = []
for dim in range(len(p)):
n.append((p[dim]+offset[dim]))
ns.add(tuple(n))
return ns
|
24bbcde11212d2cd86cd0ebe93a343de9c06e1a3
| 42,277 |
def mock_dataset_with_config(mocker, mock_dataset, dataset_config_response):
"""Returns an example dataset, mocked to return a configuration."""
mocker.patch.object(
mock_dataset, 'get', return_value=dataset_config_response)
mock_dataset.attributes
return mock_dataset
|
0b902a1447b460e0acf2ae5cac7d2223ae6e2b42
| 42,280 |
import re
def reg_exp(regex, str):
"""
Does a regexp match on the given string and returns the match
"""
return re.match(regex, str)
|
121e8e5d96a180e0d5f3919a12fb5d5e232d4e25
| 42,282 |
from datetime import datetime
def change_datetime_format(datetime_value: str, old: str, new: str):
"""Converts a date string's format to another format"""
return datetime.strptime(datetime_value, old).strftime(new)
|
cbc6ea7a11607b39d1bf222c6bb120c5772ea8c0
| 42,290 |
import grp
def parse_gid(gid):
"""Parse group id.
Arguments:
gid (str, int): Actual gid, or the name of a group.
Returns:
int: The actual gid of the group.
"""
try:
return int(gid)
except ValueError:
try:
return grp.getgrnam(gid).gr_gid
except (AttributeError, KeyError):
raise KeyError(f'Group does not exist: {gid}')
|
d80c9f7e41b084449f3d1604e513936fecb2335e
| 42,292 |
def _collapse_consecutive_gaps(a, b):
"""Collapse consecutive gaps in an alignment between two sequences.
For example, the alignment
ATC----GA
ATCATCGGA
would become
ATC-GA
ATCAGA
Args:
a, b: two aligned sequences
Returns:
tuple (a', b') where a' and b' represents an alignment of a
and b, with gaps collapsed
"""
assert len(a) == len(b)
a_ccg, b_ccg = a[0], b[0]
for i in range(1, len(a)):
if a[i-1] == '-':
if a[i] == '-':
# Skip this position; we are already in a gap
continue
else:
# We are out of the gap for a; be sure to include
# this position
a_ccg += a[i]
b_ccg += b[i]
elif b[i-1] == '-':
if b[i] == '-':
# Skip this position; we are already in a gap
continue
else:
# We are out of the gap for b; be sure to include
# this position
a_ccg += a[i]
b_ccg += b[i]
else:
a_ccg += a[i]
b_ccg += b[i]
return (a_ccg, b_ccg)
|
bd5f550edd95b63a70c7bd30d9cb2270919cc1b5
| 42,295 |
import json
def json_to_dict(col):
"""
Given a json object as bytes, convert it to a Python dictionary.
:param col:
:type col: bytes
:rtype: dict
"""
if isinstance(col, dict):
return col
elif isinstance(col, bytes):
col = col.decode("utf-8")
return json.loads(col)
|
920a28a6070998d12c176dd27be2ede263c265d0
| 42,298 |
def two_oldest_ages(ages):
"""
The two oldest ages function/method needs to be completed. It should take an array of numbers as its argument and
return the two highest numbers within the array. The returned value should be an array in the format
[second oldest age, oldest age]. The order of the numbers passed in could be any order. The array will
always include at least 2 items.
:param ages: an array of numbers.
:return: the highest two values within the array.
"""
return sorted(ages)[-2:]
|
d7362b9fdd3e266521453446abe30c6c48be41b6
| 42,301 |
import re
def get_numbers(address_string_no_postcode):
"""
Retrieves a list of all the numbers in an address that are not part of the postcode
"""
num_list = re.findall("\d+", address_string_no_postcode)
return num_list
|
d64d05541c423ab391ca3fcdd58471039b5bc481
| 42,302 |
import fnmatch
def find_file(contents, pattern):
"""
Find the file matching the given filename pattern.
Searches the dictionary of Debian package archive entries reported by
:func:`deb_pkg_tools.package.inspect_package()`.
:param contents: The dictionary of package archive entries.
:param pattern: The filename pattern to match (:mod:`fnmatch` syntax).
:returns: The metadata of the matched file.
:raises: :exc:`exceptions.AssertionError` if zero or more than one
archive entry is found.
"""
matches = []
for filename, metadata in contents.items():
if fnmatch.fnmatch(filename, pattern):
matches.append(metadata)
assert len(matches) == 1, "Expected to match exactly one archive entry!"
return matches[0]
|
efd0a5e718e8a487f653247b1dba06b2f39e3292
| 42,304 |
import socket
def to_address(hostname, family=socket.AF_UNSPEC, socktype=socket.SOCK_STREAM):
"""
Resolve a hostname to an address, preferring IPv4 addresses.
Given a string containing a DNS hostname, this function resolves the
hostname to an address, using an LRU cache to speed up repeat queries. The
function prefers IPv4 addresses, but will return IPv6 addresses if no IPv4
addresses are present in the result from getaddrinfo. If the hostname does
not resolve, the function returns None rather than raise an exception (this
is preferable as it provides a negative lookup cache).
:param str hostname: The hostname to resolve to an address
:returns: The resolved address
"""
result = None
try:
for (family, _, _, _, sockaddr) in socket.getaddrinfo(
hostname, None, family, socktype):
if family == socket.AF_INET:
result = sockaddr[0]
break
elif family == socket.AF_INET6 and not result:
result = sockaddr[0]
# XXX Workaround LP #1154599
# This should be socket.gaierror instead of socket.error
except socket.error:
pass
return result
|
a164fc21e2efa143137b07c63acbb3ba8062ede7
| 42,311 |
def encode_incarnation(incarnation: int, *, signed: bool = False) -> bytes:
""" Encode the given incarnation integer as 8-byte BigEndian buffer.
"""
return int.to_bytes(incarnation, 8, 'big', signed=signed)
|
cfbac4fc7ee53047b8f6a1f6875dd35dc065eadb
| 42,316 |
def update_variables_momentum(alpha, beta1, var, grad, v):
"""
Updates a variable using the gradient descent with momentum optimization
alpha is the learning rate
beta1 is the momentum weight
var is a numpy.ndarray containing the variable to be updated
grad is a numpy.ndarray containing the gradient of var
v is the previous first moment of var
Returns: the updated variable and the new moment, respectively
"""
vdv = (beta1 * v) + ((1 - beta1) * grad)
vup = var - (alpha * vdv)
return vup, vdv
|
c52c936914829ee115b5f493c8a96fd3088d5a23
| 42,317 |
import random
import logging
def test_sub_port(testbed_params):
"""Select a test sub port."""
test_sub_port = random.choice(testbed_params.keys())
logging.info("Select test sub port %s", test_sub_port)
return test_sub_port
|
8a5fa38498ec4055d910034355b3b6feb6db3861
| 42,318 |
def round_if_int(val):
"""Rounds off the decimal of a value if it is an integer float."""
if isinstance(val, float) and val.is_integer():
val = int(val)
return val
|
b0d50f8aec6ab7781b390faaf8c68733992dc2c8
| 42,322 |
def omit_falsy(collection: list):
"""
Removes falsy entries from a list, returning None if no entries remaining
"""
new_list = list(filter(lambda entry: entry, collection))
return new_list or None
|
8f60e6de4671f2029e3c1870ee05849501eb988d
| 42,326 |
def unique(list):
"""
Creates a list with unique entries, sorted by their appearance in the list (to make an ordered set)
:param list:
:return:
"""
existing = set()
return [x for x in list if not (x in existing or existing.add(x))]
|
e2bd8a917f32ec8c74276b9441480a36f3e8e3de
| 42,335 |
def chk_int(line):
"""Function: chk_int
Description: Checks to see if the string is an integer.
NOTE: Does not work for floats.
Arguments:
(input) line -> String containing an integer.
(output) True|False -> Whether the string is an integer.
"""
# Remove positive/negative sign if present.
if line[0] in ("-", "+"):
return line[1:].isdigit()
return line.isdigit()
|
f8a53a3565cebc6fe384fc7ee9883faf37f3f693
| 42,336 |
def split_artifact_filename(s):
"""
split_artifact_filename('artifact::filename') -> ('artifact', 'filename')
Raises ValueError if "::" is not found.
"""
index = s.index("::")
return (s[:index], s[index+2:])
|
c45f74095f534bdc7bd341c78d9a91c471f96593
| 42,341 |
def whiten(x):
"""Whiten the data
returns (x - mean(x)) / std(x)
Arguments:
x {np.array} -- Input data
Returns:
np.array -- Whitened data
"""
mu = x.mean()
std = x.std()
x = (x-mu)/std
return x
|
d494beb0a5292969e6263d55cbc5d9b0fe0e2b47
| 42,345 |
def get_decile_from_row_number(x, total_num_rows):
"""
Given (value, row number) in sorted RDD and total number of rows, return (decile, value).
Decile will be in integer interval [1, 10].
Example:
row_number = 219, total_num_rows = 1000 ->
219/1000 = 0.219 * 10 ->
int(2.19) + 1 = 3,
so 219 is in the 3rd decile.
"""
value, row_number = x
sorted_rank = row_number / float(total_num_rows)
decile = int(sorted_rank * 10) + 1
return (decile, value)
|
76b8a37b2b7ec744b716bf16f25f357a6fa18948
| 42,347 |
def NOT_IN(attribute, val):
"""
Check if the value does not contain the attribute
"""
return attribute not in val
|
924266ce149c3eca93ddb14a57170863452a8f35
| 42,349 |
def str_list(data_in, mode=0):
"""
mode 0: splits an ascii string and adds elements to a list.
mode 1: converts list elements to ascii characters and joins them.
"""
if mode == 0:
data_out = []
for i in range(len(data_in)):
data_out.append(ord(data_in[i])) # chr and ord functions for ascii conversions
return data_out
data_out = ''
for i in range(16):
data_out += chr(data_in[i])
return data_out
|
fc9fbe3e6ffda45e5cf8fe4273ccc42dd088237e
| 42,356 |
def filterDups(coordList):
"""
gets rid of adjacent duplicates in a list
"""
ret = []
for i in range(0, len(coordList)-1):
if coordList[i] == coordList[i+1]:
continue
ret += [coordList[i]]
ret += [coordList[-1]]
return ret
|
9710755f5e4c051839c37a627e4d20392806c792
| 42,361 |
def bearing_to_cartesian(heading):
"""
Bearing (heading from North) to cartesian orientation CCW from east
:param heading: CW from North, in degrees
:type heading: float
:returns: Cartesian direction, CCW from East, in degrees
:rtype: float
"""
return 90 - heading;
|
d583be85d4c7e529e31206c12752310aadce3a3c
| 42,369 |
def calculate_monthly_interest(total_balance: float, monthly_interest: float) -> float:
"""
Calcualtes the amount of interest added to the balance each months
:param total_balance: Total debt left
:type total_balance: float
:param monthly_interest: Monthly interest in %
:type monthly_interest: float
"""
return total_balance * monthly_interest / 100
|
3f67a43c3f08aff8614d19029666d37a5875c78c
| 42,373 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.