content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import csv
def get_ycoordinates(filename):
"""Transforms CSV file of Y coordinates (new line seperated) to list"""
with open(filename) as o:
y = [float(item) for sublist in csv.reader(o) for item in sublist]
return y
|
05e54b2cb4e7f9c7b58d3dfbe241090e1b0170ea
| 21,086 |
def node_weight(G, u):
"""
Computes the weighted degree of a node
:param G: networkx.Graph
Graph containing the node u
:param u: node
Node of which the degree will be computed
:return:
w: double
Degree of u
"""
w = 0
for v in G[u].keys():
w = w + G[u][v]['weight']
return w
|
a6872007b3e8b60ce6e4893631c613febd59453a
| 21,088 |
def compare_balance_with_zero(balance):
"""
:param balance: a double with the value of the balance in after a year.
:return: 0 if the balance is equal to zero or nearly equal, 1 if the balance is greater than zero and -1 if
the balance is lower than zero.
"""
if 0.05 >= balance >= -0.05:
return 0
elif balance > 0.05:
return 1
else:
return -1
|
17b70cc282aa42495fa205ab9ab802a913d78341
| 21,093 |
def get_subseq(df, perc_start, perc_end):
"""Get a subsequence from a dataframe
Args:
df (pd.DataFrame): Pandas DataFrame
perc_start (int): Starting percentage of the subsequence
perc_end (int): Ending percentage of the subsequence
Returns:
subseq (pd.DataFrame): The requested subsequence
"""
start = int(len(df) * perc_start/100)
end = int(len(df) * perc_end/100)
df = df.iloc[start:end]
return df
|
986e00834e59dc8489e790b706882e9397e6dbaf
| 21,095 |
import math
def line_angle(p1, p2):
"""
Return the angle of the line that goes from p1 to p2
Clockwise in pygame window
Counter clockwise in xy-space
"""
angle = math.atan2((p1[1]-p2[1]), (p1[0]-p2[0])) * 180.0/math.pi
return (angle + 360) % 360
|
9bafb4e3ac4de4a30a85c04074f8ce169c116538
| 21,110 |
def create_profile_name_from_role_arn(
role_arn, account_alias, profile_name_format
):
"""Create a profile name for a give role ARN and account alias."""
profile_name = role_arn.split("role/")[-1].replace("/", "-")
if profile_name_format == "RoleName-AccountAlias":
return f"{profile_name}-{account_alias}"
return profile_name
|
3df10e12ccae591f963e25bb9df3d8a7634c7463
| 21,112 |
def GetPDBAtomNames(mol):
"""Extracts PDB atoms names"""
names = {}
for i, atom in enumerate(mol.GetAtoms()):
name = atom.GetPDBResidueInfo().GetName()
names[name.strip()] = i
return names
|
e983861fb6e581d5a40eba9b5e4febe77206faae
| 21,115 |
def splitParents(parents):
"""Splits the input string into at most 3 parts:
- father's first name
- mother's first name
- mother's last name.
The input is in the format:
"{father_first_name},{mother_first_name} {mother_last_name}"
"""
split = parents.split(',', 1)
if len(split) == 1:
father = ''
mother = parents
else:
father = split[0].strip()
mother = split[1].strip()
motherSplit = mother.rsplit(' ', 1)
if not father:
return motherSplit
return [father] + motherSplit
|
4f1621f3ce77df544b2cdcd13985c4b1000945fc
| 21,119 |
def _merge_tables(d1, d2):
"""
Merge dictionaries
Args:
d1 (dict): first dict to merge
d2 (dict): second dict to merge
"""
for key, l in d2.items():
if key in d1:
for item in l:
if item not in d1[key]:
d1[key].append(item)
else:
d1[key] = l
return d1
|
2f1fabcd9ef7ce2f8f53405e267561c88002f457
| 21,124 |
def unpad(data, length):
"""
PKCS #7-style unpadding with the given block length
"""
assert length < 256
assert length > 0
padlen = ord(data[-1])
assert padlen <= length
assert padlen > 0
assert data[-padlen:] == padlen * chr(padlen)
return data[:-padlen]
|
826969b67e7d0a8b4aa65394cc06a3dbf0844dd8
| 21,127 |
import torch
def dis_primal(
input_view: torch.Tensor,
param: torch.Tensor,
n_sample: int,
) -> torch.Tensor:
"""Computes distortion penalty for the primal formulation.
Let n be the number of samples in the view of interest and p the number of
features. In the primal formulation, the 'param' matrix is the p*low_dim model
parameter and input_view corresponds to the input data, of shape n*p. The
distortion penalty can be written as
distortion = ||input_view*input_view.T
- input_view*param*param.T*input_view.T||_2.
The distortion is computed as is when n < p. However, if n > p, we
compute the following formulation:
distortion = torch.sqrt(Tr((I - param*param.T)*input_view.T*input_view
*(I - param*param.T)*input_view.T*input_view))
to avoid computing terms that are O(n**2) in memory or runtime.
Arguments:
input_view: torch.Tensor, one of the two views.
param: torch.Tensor, model parameters.
n_sample: int, sample size of entire dataset.
Returns:
distortion_value: torch.Tensor, scalar value.
"""
n_sample, p_feature = input_view.shape
if n_sample < p_feature:
inner_prod = torch.matmul(input_view, input_view.t())
tmp = torch.matmul(torch.matmul(
torch.matmul(input_view, param), param.t()), input_view.t())
tmp = (inner_prod - tmp)**2
distortion_value = torch.sqrt(torch.sum(tmp))
else:
gram = torch.matmul(input_view.t(), input_view)
tmp = torch.matmul(param, torch.matmul(param.t(), gram))
prod = gram - tmp
distortion_value = torch.sqrt(torch.trace(torch.matmul(prod, prod)))
return distortion_value
|
6826994ce40799e5b83059d158bd65f50c381622
| 21,130 |
def fill_dict (feed_dict, placeholders, data):
"""Feeds a dictionary of data into a dictionary of placeholders."""
for k in data:
feed_dict[placeholders[k]] = data[k]
return feed_dict
|
31b9554a531cc7880e92371c8b3f17364a9b59de
| 21,134 |
def compare_dates(converted_creation_date, rotation_date):
"""
Compares createTime date to x (default 90) days ago.
Args:
converted_creation_date - The datatime formatted creation date of our API key.
rotation_date - datetime formatted "rotation_period" days ago (default 90).
Example: 2020-09-18 13:38:52.943663
"""
# If the createTime value for our key
# is over x days (default 90)
# Return true to key_analysis function
if converted_creation_date < rotation_date:
return True
else:
return False
|
7cbf3e63b1fd8ce4d5c0db386517920940dda427
| 21,137 |
def _deg_ord_idx(deg, order):
"""Get the index into S_in or S_out given a degree and order."""
# The -1 here is because we typically exclude the degree=0 term
return deg * deg + deg + order - 1
|
9aa5159a5d92e00e6f391e2ca313eb8c342f0a8d
| 21,143 |
def linkify_phone(value):
"""
Render a telephone number as a hyperlink.
"""
if value is None:
return None
return f"tel:{value}"
|
3eacfe7dc80b873d0c9b7df2cd92daf66bceca84
| 21,145 |
def _convert_unit(size_string):
"""
Convert given string to size in megabytes
:param string size_string: Size with unit
:returns integer: Converted size from given unit
:rtype integer:
"""
size, unit = size_string.split(' ')
if 'M' in unit:
return int(float(size))
elif 'G' in unit:
return int(float(size)) * 1024
elif 'T' in unit:
return int(float(size)) * 1024 * 1024
|
c0581514194f95d0e5fd871edd559e09f9fc2234
| 21,146 |
import pathlib
def find_component(path: pathlib.PurePath):
"""
Extracts the likely component name of a CSV file based on the path to it
:param path: path to a CSV file
:return: likely component to use
"""
# pylint: disable=no-else-return
if path.parent.name.isnumeric():
# Probably a version directory
return path.parents[1].name
else:
return path.parent.name
|
2cdf37ed07a1c535f59c6318f402c66fe4248fc2
| 21,149 |
from typing import List
def create_cave(depth: int, tx: int, ty: int) -> List[List[int]]:
"""
Creates the cave according to the cave generation rules.
Since the cave is essentially infinite a constant size padding is applied
around the target coordinates to make the pathfinding feasible. Note that
there needs to be a padding because the optimal path can overshoot the
target. The padding size for this input was found simply by starting with
a very large value and progressively decreasing it until a value small
enough was found which produces the correct pathfinding result but is
still relatively quick to compute.
"""
PADDING = 50
cave = [[0] * (tx + PADDING) for _ in range(ty + PADDING)]
for y in range(ty + PADDING):
for x in range(tx + PADDING):
index = None
if y == 0 and x == 0:
index = 0
elif y == 0:
index = x * 16807
elif x == 0:
index = y * 48271
elif y == ty and x == tx:
index = 0
if index is None:
cave[y][x] = (cave[y-1][x] * cave[y][x-1] + depth) % 20183
else:
cave[y][x] = (index + depth) % 20183
return cave
|
20537fab61614aece67b20f9d33bd8ade3259637
| 21,151 |
import logging
def logging_setup(logging_handler, logging_level) -> logging.Logger:
""" Init logger object for logging in rubrik-sdk
For more info - https://docs.python.org/3/library/logging.html
Args:
logging_level(int): Log level
logging_handler (Handler): Handler to log
Returns:
logging.Logger: logger object
"""
logger = logging.getLogger('rubrik-sdk')
logger.setLevel(logging_level)
logger.addHandler(logging_handler)
return logger
|
c1e301183baf7a121467738d56c5c68883b5fca5
| 21,152 |
import re
def extract_template(pattern):
"""Extracts a 'template' of a url pattern given a pattern
returns a string
Example:
input: '^home/city
(-(?P<city_name>bristol|bath|cardiff|swindon|oxford|reading))?$'
output: 'home/city(-{})?'
"""
pattern = pattern.strip('$^')
pattern = re.sub(r'\(\?P.+?\)', '{}', pattern)
return pattern
|
4d36c5f0b6d3ac4072b376119d78b083419143c4
| 21,156 |
def add_a_half(rectangle):
"""Adds 0.5 to a rectangle (2x2 coordinates)"""
return [(x + 0.5, y + 0.5) for x, y in rectangle]
|
6cd1e1a71419b486706a47e7a216b530a9bf6e73
| 21,162 |
from pydantic import BaseModel # noqa: E0611
def is_base_model_type(type_):
"""
Whether ``type_`` is a subclass of ``BaseModel``.
"""
if not isinstance(type_, type):
return False
return issubclass(type_, BaseModel)
|
d0a195460a70a978244e75503896b9bdbd147c9b
| 21,167 |
def extract_2d_info(img_meta, tensor):
"""Extract image augmentation information from img_meta.
Args:
img_meta(dict): Meta info regarding data transformation.
tensor(torch.Tensor): Input tensor used to create new ones.
Returns:
(int, int, int, int, torch.Tensor, bool, torch.Tensor):
The extracted information.
"""
img_shape = img_meta['img_shape']
ori_shape = img_meta['ori_shape']
img_h, img_w, _ = img_shape
ori_h, ori_w, _ = ori_shape
img_scale_factor = (
tensor.new_tensor(img_meta['scale_factor'][:2])
if 'scale_factor' in img_meta else tensor.new_tensor([1.0, 1.0]))
img_flip = img_meta['flip'] if 'flip' in img_meta else False
img_crop_offset = (
tensor.new_tensor(img_meta['img_crop_offset'])
if 'img_crop_offset' in img_meta else tensor.new_tensor([0.0, 0.0]))
return (img_h, img_w, ori_h, ori_w, img_scale_factor, img_flip,
img_crop_offset)
|
8807e24a849aedc1cc460859b4c2088318c13489
| 21,173 |
import math
import random
def train_test(data, val_ratio=0.2, test_ratio=0.2, shuffle=True, seed=42):
"""Split a list into training and test sets, with specified ratio.
By default, the data is shuffled with a fixed random seed.
The data is not mutated.
:param data: list of data objects
:param val_ratio: ratio of data to take for validation set
:param test_ratio: ratio of data to take for test set
:param shuffle: if true, the data is shuffled before being split
:param seed: random seed for the shuffle
:returns: triple of lists (training set, validation set, test set)
"""
n = len(data)
k_val = math.floor((1 - val_ratio - test_ratio) * n)
k_test = math.floor((1 - test_ratio) * n)
if shuffle:
random.seed(42)
data_shuffled = random.sample(data, k=n)
else:
data_shuffled = data
return data_shuffled[:k_val], data_shuffled[k_val:k_test], data_shuffled[k_test:]
|
42c999ce21d1f60c8bba88f10ed833ce9576057c
| 21,176 |
def set_difference(set_a, set_b):
"""
compare two sets and return the items
which are in set_b but not in set_a
"""
diff = set_b - set_a
return diff
|
583a83bac0a95c46050a2626c7d4092a71d62a4e
| 21,177 |
import re
def split_dump_pattern(pattern):
"""Split a comma separated string of patterns, into a list of patterns.
:param pattern: A comma separated string of patterns.
"""
regex = re.compile('\s*,\s*')
return regex.split(pattern)
|
ba32147a07cf31dc4a59d18fe10872a19bf7d209
| 21,179 |
def get_branch_list(nodes, exit_index):
"""Computes the branch list for the control flow graph.
Args:
nodes: A list of control_flow.ControlFlowNodes.
exit_index: The index of the exit node.
Returns:
A Python list representing the branch options available from each node. Each
entry in the list corresponds to a node in the control flow graph, with the
final entry corresponding to the exit node (not present in the cfg). Each
entry is a 2-tuple indicating the next node reached by the True and False
branch respectively (these may be the same.) The exit node leads to itself
along both branches.
"""
indexes_by_id = {
id(node): index for index, node in enumerate(nodes)
}
indexes_by_id[id(None)] = exit_index
branches = []
for node in nodes:
node_branches = node.branches
if node_branches:
branches.append([indexes_by_id[id(node_branches[True])],
indexes_by_id[id(node_branches[False])]])
else:
try:
next_node = next(iter(node.next))
next_index = indexes_by_id[id(next_node)]
except StopIteration:
next_index = exit_index
branches.append([next_index, next_index])
# Finally we add branches from the exit node to itself.
# Omit this if running on BasicBlocks rather than ControlFlowNodes, because
# ControlFlowGraphs have an exit BasicBlock, but no exit ControlFlowNodes.
branches.append([exit_index, exit_index])
return branches
|
8c92087289649ad457340d3a1af781b791b4666c
| 21,180 |
def convert_names_to_highlevel(names, low_level_names,
high_level_names):
"""
Converts group names from a low level to high level API
This is useful for example when you want to return ``db.groups()`` for
the :py:mod:`bob.bio.base`. Your instance of the database should
already have ``low_level_names`` and ``high_level_names`` initialized.
"""
if names is None:
return None
mapping = dict(zip(low_level_names, high_level_names))
if isinstance(names, str):
return mapping.get(names)
return [mapping[g] for g in names]
|
db3a568e5f5465736b3134903f725ce19dfe56d4
| 21,182 |
def get_load_config_timestamp(pefile_object):
"""
Retrieves the timestamp from the Load Configuration directory.
:param pefile.PE pefile_object: pefile object.
:return: Recovered timestamps from PE load config (if any).
None if there aren't.
:rtype: int
"""
timestamp = 0
if hasattr(pefile_object, 'DIRECTORY_ENTRY_LOAD_CONFIG'):
loadconfigdata = pefile_object.DIRECTORY_ENTRY_LOAD_CONFIG
timestamp = getattr(loadconfigdata.struct, 'TimeDateStamp', 0)
return timestamp
|
4b04afa7d844ce05761fa5b8f484540a1ae243a4
| 21,184 |
def soft_timing(Nframes, time, fpsmin=10, fpsmax=20):
"""determines time & fps; aims for target time, but forces fpsmin < fps < fpsmax.
example usage: target 3 seconds, but force 10 < fps < 25:
import QOL.animations as aqol
for i in range(50):
code_that_makes_plot_number_i()
aqol.saveframe('moviename')
plt.close()
aqol.movie('moviename', **soft_timing(3, 10, 25))
returns dict(time=time, fps=fps)
"""
if time > Nframes/fpsmin: #makes sure the movie doesnt go too slow.
(time, fps) = (None, fpsmin)
elif time < Nframes/fpsmax: #makes sure the movie doesnt go too fast.
(time, fps) = (None, fpsmax)
else: #makes the movie <time> duration if it will have fpsmin < fps < fpsmax.
(time, fps) = (time, 1) #fps will be ignored in aqol.movie since time is not None.
return dict(time=time, fps=fps)
|
e6698e8665398c9d2996f2532d3455caaf77d253
| 21,187 |
import ast
def has_docstring(node):
"""Retuns true if given function or method has a docstring.
"""
docstring = ast.get_docstring(node)
if docstring is not None:
return not docstring.startswith('mys-embedded-c++')
else:
return False
|
7d6b1be6d48ba39fb871cf517080f352c56ac14c
| 21,193 |
import json
def encode_pretty_printed_json(json_object):
"""Encodes the JSON object dict as human readable ascii bytes."""
return json.dumps(
json_object,
ensure_ascii=True,
indent=4,
sort_keys=True,
).encode("ascii")
|
e91ccef7379de9e062b8b456b02d99b04f8871e1
| 21,195 |
def format_field(relation_name, field):
"""Util for formatting relation name and field into sql syntax."""
return "%s.%s" % (relation_name, field)
|
5e2fd795779f198a64a176da218f879046ab49f5
| 21,197 |
def validate_initial_digits(credit_card_number: str) -> bool:
"""
Function to validate initial digits of a given credit card number.
>>> valid = "4111111111111111 41111111111111 34 35 37 412345 523456 634567"
>>> all(validate_initial_digits(cc) for cc in valid.split())
True
>>> invalid = "14 25 76 32323 36111111111111"
>>> all(validate_initial_digits(cc) is False for cc in invalid.split())
True
"""
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6"))
|
3a40c24d1200ea70c84035b66a7cd756a8abf32e
| 21,199 |
def j2k(j, E, nu, plane_stress=True):
"""
Convert fracture
Parameters
----------
j: float (in N/mm)
E: float
Young's modulus in GPa.
nu: float
Poisson's ratio
plane_stress: bool
True for plane stress (default) or False for plane strain condition.
Returns
-------
K : float
Units are MPa m^0.5.
"""
if plane_stress:
E = E / (1 - nu ** 2)
return (j * E) ** 0.5
|
9ae849e78ba1136209accb56afa411803c0940a3
| 21,203 |
import gzip
def openGzipOrText(fPath,encoding=None) :
"""Opens a file for reading as text, uncompressing it on read if the path ends in .gz"""
if str(fPath).lower().endswith('.gz') :
return gzip.open(fPath,'rt',encoding=encoding)
else :
return open(fPath,'rt',encoding=encoding)
|
c9497814182ba1c884acb5de488e10dcd5cafc1d
| 21,209 |
def get_parent_technique_id(sub_tid):
"""Given a sub-technique id, return parent"""
return sub_tid.split(".")[0]
|
4dcfc1e3558e20a58c754a17f5730865804f4b9d
| 21,210 |
def lineStartingWith(string, lines):
""" Searches through the specified list of strings and returns the
first line starting with the specified search string, or None if not found
"""
for line in lines:
if line.startswith(string):
return line
else:
return None
|
7d6b8fa259a8514721443a37195d678c7d8ac21b
| 21,211 |
from pathlib import Path
def _get_file_from_folder(folder: Path, suffix: str) -> Path:
"""Gets this first file in a folder with the specified suffix
Args:
folder (Path): folder to search for files
suffix (str): suffix for file to search for
Returns:
Path: path to file
"""
return list(Path(folder).glob("*" + suffix))[0]
|
3e941b5dfaa394baa0baa9c1675a62020c85d8ae
| 21,212 |
import copy
def _normalize_barcodes(items):
"""Normalize barcode specification methods into individual items.
"""
split_items = []
for item in items:
if item.has_key("multiplex"):
for multi in item["multiplex"]:
base = copy.deepcopy(item)
base["description"] += ": {0}".format(multi["name"])
del multi["name"]
del base["multiplex"]
base.update(multi)
split_items.append(base)
elif item.has_key("barcode"):
item.update(item["barcode"])
del item["barcode"]
split_items.append(item)
else:
item["barcode_id"] = None
split_items.append(item)
return split_items
|
6f576d7789cc045b81abe8535942cf0c0abd912a
| 21,213 |
def __to_float(num):
"""
Try to convert 'num' to float, return 'num' if it's not possible, else
return converted :code:`num`.
"""
try:
float(num)
return float(num)
except ValueError:
return num
|
642d2a247066028c95623641a61f8eb523025d15
| 21,222 |
def calc_minutes(hhmm):
"""Convert 'HH:MM' to minutes"""
return int(hhmm[:2]) * 60 + int(hhmm[3:])
|
09de4c4f01860f67aa8628a50db8eb89f0815000
| 21,223 |
def is_stop_word(word, nlp):
""" Check if a word is a stop word.
:param word: word
:param nlp: spacy model
:return: boolean
"""
return nlp(word)[0].is_stop
|
5eaae33e299b0cd51f8e9b72517e14a128fb46fa
| 21,224 |
def format_80(s):
"""
Split string that is longer than 80 characters to several lines
Args:
s (str)
Returns:
ss (str): formatted string
"""
i = 0
ss = ''
for x in s:
ss += x
i += 1
if i == 80:
i = 0
ss += ' \ \n'
return ss
|
6de52ef72f7bfaa237c43390cecea22a85fc88b3
| 21,225 |
def check_panagram(
input_str: str = "The quick brown fox jumps over the lazy dog",
) -> bool:
"""
A Panagram String contains all the alphabets at least once.
>>> check_panagram("The quick brown fox jumps over the lazy dog")
True
>>> check_panagram("My name is Unknown")
False
>>> check_panagram("The quick brown fox jumps over the la_y dog")
False
"""
frequency = set()
input_str = input_str.replace(
" ", ""
) # Replacing all the Whitespaces in our sentence
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return True if len(frequency) == 26 else False
|
b340c61820aaf674eaf79f712ea2dd64b4aa5690
| 21,232 |
def df_query_with_ratio(df_in, query, ratio_name='ratio'):
"""
This function calls the .query() method on a DataFrame
and additionally computes the ratio of resulting rows
over the original number of rows.
The result is a tuple with the filtered dataframe as first
element and the filter ratio as second element.
"""
df_out = df_in.query(query)
ratio = df_out.shape[0] / df_in.shape[0]
print('{} = {:.2f} %'.format(ratio_name, 100 * ratio))
return df_out, ratio
|
46f0cad6494ff142bc9cd1a139e6cfe16cde8ac5
| 21,233 |
import torch
def cthw2tlbr(boxes):
"""
Convert center/size format `boxes` to top/left bottom/right corners.
:param boxes: bounding boxes
:return: bounding boxes
"""
top_left = boxes[..., :2] - boxes[..., 2:]/2
bot_right = boxes[..., :2] + boxes[..., 2:]/2
return torch.cat([top_left, bot_right], dim=-1)
|
c425f97a244e8433b07c5ec6839e0ca090d6e6bc
| 21,234 |
def get_location_by_offset(filename, offset):
"""
This function returns the line and column number in the given file which
is located at the given offset (i.e. number of characters including new
line characters).
"""
with open(filename, encoding='utf-8', errors='ignore') as f:
for row, line in enumerate(f, 1):
length = len(line)
if length < offset:
offset -= length
else:
return row, offset + 1
|
434b60a80fffd8068ea6d90ead92d914127f3b3e
| 21,236 |
def scale_to_percent(val, min, max):
"""
Utility function to scale a given value to a percentage within a range
"""
current = val
# first, ensure that current is within our defined min/max
if val < min:
current = min
elif current > max:
current = max
# now, we scale it to b/t 0 and 1
scaled = (current-min)/(max - min)
return scaled * 100
|
7397f16e8c9ee014ceec62065ad830d3451484f5
| 21,239 |
def get_node_exec_options(profile_string, exec_node_id):
""" Return a list with all of the ExecOption strings for the given exec node id. """
results = []
matched_node = False
id_string = "(id={0})".format(exec_node_id)
for line in profile_string.splitlines():
if matched_node and line.strip().startswith("ExecOption:"):
results.append(line.strip())
matched_node = False
if id_string in line:
# Check for the ExecOption string on the next line.
matched_node = True
return results
|
c7b6329b9caca6feb3bf00c1e9559887d03e4139
| 21,242 |
def leaf_2(key):
"""Returns the key value of the leaf"""
return key
|
d67bc6527028bbc7ffabb5d88cbff149e1ddea4c
| 21,246 |
def denormalize_images(imgs_norm):
""" De normalize images for plotting
"""
imgs = (imgs_norm + 0.5) * 255.0
return imgs
|
ed050b241ab63385119324ac6927c30be7a9d237
| 21,248 |
def drop_zombies(feed):
"""
In the given Feed, drop stops with no stop times,
trips with no stop times, shapes with no trips,
routes with no trips, and services with no trips, in that order.
Return the resulting Feed.
"""
feed = feed.copy()
# Drop stops of location type 0 that lack stop times
ids = feed.stop_times['stop_id'].unique()
f = feed.stops
cond = f['stop_id'].isin(ids)
if 'location_type' in f.columns:
cond |= f['location_type'] != 0
feed.stops = f[cond].copy()
# Drop trips with no stop times
ids = feed.stop_times['trip_id'].unique()
f = feed.trips
feed.trips = f[f['trip_id'].isin(ids)]
# Drop shapes with no trips
ids = feed.trips['shape_id'].unique()
f = feed.shapes
if f is not None:
feed.shapes = f[f['shape_id'].isin(ids)]
# Drop routes with no trips
ids = feed.trips['route_id'].unique()
f = feed.routes
feed.routes = f[f['route_id'].isin(ids)]
# Drop services with no trips
ids = feed.trips['service_id'].unique()
if feed.calendar is not None:
f = feed.calendar
feed.calendar = f[f['service_id'].isin(ids)]
if feed.calendar_dates is not None:
f = feed.calendar_dates
feed.calendar_dates = f[f['service_id'].isin(ids)]
return feed
|
1d51dd42c6530f9f5dead54c16d9e8567463d3b1
| 21,249 |
def mock_get(pipeline, allowDiskUse): # pylint: disable=W0613,C0103
"""
Return mocked mongodb docs.
"""
return [
{'_id': 'dummy_id_A', 'value': 'dummy_value_A'},
{'_id': 'dummy_id_B', 'value': 'dummy_value_B'},
]
|
ee7c0062758c1bcb36a4cad88eb7b3575b37df11
| 21,256 |
def roce(net_income, preferred_dividends, average_common_equity):
"""Computes return on common equity.
Parameters
----------
net_income : int or float
Net income
preferred_dividends : int or float
Preferred dividends
average_common_equity : int or float
Average common equity
Returns
-------
out : int or float
Return on common equity
"""
return (net_income - preferred_dividends) / average_common_equity
|
dd800458e2379a72bbe8377b979cc3572ec0c525
| 21,264 |
def _is_int_in_range(value, start, end):
"""Try to convert value to int and check if it lies within
range 'start' to 'end'.
:param value: value to verify
:param start: start number of range
:param end: end number of range
:returns: bool
"""
try:
val = int(value)
except (ValueError, TypeError):
return False
return (start <= val <= end)
|
54ed477b4d6f603a48a1104d60c00433b1cc47db
| 21,267 |
def isstdiofilename(pat):
"""True if the given pat looks like a filename denoting stdin/stdout"""
return not pat or pat == b'-'
|
feff8e9c76be62c32cc46a7b02c3bf76da30179e
| 21,272 |
import bisect
def _RevisionState(test_results_log, revision):
"""Check the state of tests at a given SVN revision.
Considers tests as having passed at a revision if they passed at revisons both
before and after.
Args:
test_results_log: A test results log dictionary from GetTestResultsLog().
revision: The revision to check at.
Returns:
'passed', 'failed', or 'unknown'
"""
assert isinstance(revision, int), 'The revision must be an integer'
keys = sorted(test_results_log.keys())
# Return passed if the exact revision passed on Android.
if revision in test_results_log:
return 'passed' if test_results_log[revision] else 'failed'
# Tests were not run on this exact revision on Android.
index = bisect.bisect_right(keys, revision)
# Tests have not yet run on Android at or above this revision.
if index == len(test_results_log):
return 'unknown'
# No log exists for any prior revision, assume it failed.
if index == 0:
return 'failed'
# Return passed if the revisions on both sides passed.
if test_results_log[keys[index]] and test_results_log[keys[index - 1]]:
return 'passed'
return 'failed'
|
e6f49854e92c228dc620acb569d7232ecf27507c
| 21,276 |
def dmlab_level_label(level) -> str:
"""Returns the label for a DMLab level."""
return level.replace('_', ' ').title()
|
332de2e4b0a834083ec6ed6b3921c43c99b0678a
| 21,280 |
from typing import Dict
from typing import List
from typing import Tuple
def get_raw_dependency_information_from_dep_file(dep_file: str) -> Dict[str, List[Tuple[str, str]]]:
"""return RAW dependency information contained in dep_file in the form of a dictionary.
Format: {source_line: [(sink_line, var_name)]
:param dep_file: path to dependency file
:return: RAW dictionary
"""
raw_dependencies: Dict[str, List[Tuple[str, str]]] = dict()
with open(dep_file) as f:
for line in f.readlines():
line = line.replace("\n", "")
# format of dependency entries in _dep.txt-file:
# sourceLine NOM RAW sinkLine|variable
if " NOM " not in line:
continue
split_line = line.split(" NOM ")
source_line = split_line[0]
# split entries
entries = []
current_entry = ""
for word in split_line[1].split(" "):
word = word.replace(" ", "")
if word == "RAW" or word == "WAR" or word == "WAW" or word == "INIT":
if len(current_entry) > 0:
entries.append(current_entry)
current_entry = ""
if len(current_entry) > 0:
current_entry += " " + word
else:
current_entry += word
if len(current_entry) > 0:
entries.append(current_entry)
if source_line not in raw_dependencies:
raw_dependencies[source_line] = []
for entry in entries:
# filter for RAW dependencies
split_entry = entry.split(" ")
if split_entry[0] != "RAW":
continue
split_sink_line_var = split_entry[1].split("|")
sink_line = split_sink_line_var[0]
var_name = split_sink_line_var[1].replace(".addr", "")
raw_dependencies[source_line].append((sink_line, var_name))
return raw_dependencies
|
84a137b0620215f27fc04b92d23d1004e6241b7d
| 21,284 |
def flag(s):
"""Turn 'flag_name' into `--flag-name`."""
return '--' + str(s).replace('_', '-')
|
3f76ba5a765d9f050576e1535d0d72bbd260bc43
| 21,288 |
def pmi(financedamount, pmirate):
"""Return annual private mortgage insurance cost.
:param financedamount: Amount of money borrowed.
:type financedamount: double
:param pmirate: Rate charged when loan-to-value > 80%.
:type pmirate: double
:return: double
"""
return financedamount * pmirate
|
5290fd7bd6e90d8d6b5447e4d367b6e91f94ced3
| 21,289 |
def get_relevant_files(session_data: dict):
"""
Generates the pipeline's "starting node"
Parameters
----------
session_data : dict
A dictionary with the locations of all necessary session's data
Returns
-------
str,str
The "starting" node for processing
"""
return session_data.get("dwi"), session_data.get("fmap")
|
ceac08ed24fa081f63fb931cb97e8c433646114e
| 21,292 |
def _last_index(x, default_dim):
"""Returns the last dimension's index or default_dim if x has no shape."""
if x.get_shape().ndims is not None:
return len(x.get_shape()) - 1
else:
return default_dim
|
e6fe7892329b623c59c7e1c41f51bf7d9b1e81b6
| 21,296 |
def qsize(queue):
"""Return the (approximate) queue size where available; -1 where not (OS X)."""
try:
return queue.qsize()
except NotImplementedError:
# OS X doesn't support qsize
return -1
|
155dc900a4d31be6b3e1cdad2f3b9caf39c096b4
| 21,297 |
def layer_point_to_map(map_layer, point):
"""Convert a pair of coordinates from layer projection to map projection."""
return [point[0] / map_layer.data.tilewidth,
point[1] / map_layer.data.tileheight]
|
178472c93947405f855d1860b84105bd0290edcd
| 21,301 |
def prodigal_gene_start(rec_description: str) -> int:
"""Get a gene start index from a Prodigal FASTA header
Examples
--------
Given the following Prodigal FASTA output header, parse the gene start index (i.e. 197)
>>> prodigal_gene_start("k141_2229_1 # 197 # 379 # 1 # ID=4_1;partial=00;start_type=ATG;rbs_motif=AGGAGG;rbs_spacer=5-10bp;gc_cont=0.437")
197
Parameters
----------
rec_description : str
SeqRecord description of Prodigal FASTA header
Returns
-------
int
Gene start index
"""
return int(rec_description.split('#')[1].strip())
|
d0aaa9d09d67dea75537f2f48c550a9df31bcf45
| 21,302 |
def ini_conf_to_bool(value):
"""
Depending INI file interpreter, False values are simple parsed as string,
so use this function to consider them as boolean
:param value: value of ini parameter
:return: bollean value
"""
if value in ('False', 'false', '0', 'off', 'no'):
return False
return bool(value)
|
f9e5e14066bf4d2e17bbdb5cb97f3b2f1ba867c7
| 21,308 |
def has_cloned_parent(c, p):
"""Return True if p has a cloned parent within the @rst tree."""
root = c.rstCommands.root
p = p.parent()
while p and p != root:
if p.isCloned():
return True
p.moveToParent()
return False
|
1e0964520e188dad082b0f3e7c70f17bb945c063
| 21,312 |
import re
def get_depth_of_exec_function(backtrace):
"""
>>> get_depth_of_exec_function(["#1 0x00007f29e6eb7df5 in standard_ExecutorRun (queryDesc=0x562aad346d38,"])
1
>>> get_depth_of_exec_function(["#27 0x00007f29e6eb7df5 in pgss_ExecutorRun (queryDesc=0x562aad346d38,"])
27
>>> get_depth_of_exec_function(["#13 0x00007f29e6eb7df5 in explain_ExecutorRun (queryDesc=0x562aad346d38,"])
13
>>> get_depth_of_exec_function(["#4 0x00007f29e6eb7df5 in ExecEvalNot (notclause=<optimized out>,"])
4
>>> get_depth_of_exec_function(["#5 0x00007f29e6eb7df5 in ExecProcNode (node=node@entry=0x562aad157358,)"])
5
>>> get_depth_of_exec_function(["#12 0x00007f29e6eb7df5 in ExecutePlan (dest=0x562aad15e290,"])
12
>>> get_depth_of_exec_function(["#21 standard_ExecutorRun (queryDesc=0x562aad0b46f8, direction=<optimized out>,"])
21
>>> bt = ["#0 palloc0 (size=size@entry=328)", \
"#1 0x0000562aac6c9970 in InstrAlloc (n=n@entry=1, instrument_options=4)", \
"#2 0x0000562aac6bdddb in ExecInitNode (node=node@entry=0x562aad49e818,"]
>>> get_depth_of_exec_function(bt)
2
"""
exec_regexp = re.compile(r"#([0-9]+) .*Exec[a-zA-Z]+ \(")
for frame in backtrace:
m = exec_regexp.search(frame)
if m:
return int(m.group(1))
return None
|
8e5af7d4cda5db53f3be87916a60d2f6f146ed6c
| 21,314 |
def read_weights_file(weights_file):
"""
Given a tab separated file with leaf names for a phylogenetic tree in column one and multipliers for that leaf's
branch length in column two, will create a dictionary with leaf names as keys and multipliers as values
:param weights_file: Path to a tab-separated text file described above.
:return: dictionary with leaf names as keys and multipliers as values
"""
weights = dict()
with open(weights_file) as f:
for line in f:
stripped_line = line.rstrip()
x = stripped_line.split('\t')
if len(x) != 2 and stripped_line != '':
raise RuntimeError('One of the lines in your weights file ({}) is not formatted correctly. '
'Correct format is leafname\tweight, tab-separated. '
'Offending line was: {}'.format(weights_file, stripped_line))
elif len(x) == 2:
try:
weight = float(x[1])
except ValueError:
raise ValueError('The second column in your weights file ({}) must be a number. Please fix the '
'following line: {}'.format(weights_file, stripped_line))
weights[x[0]] = weight
return weights
|
68b1af3238f1f2b564c18139e55ee644f0bd4da0
| 21,317 |
def modelSpin(model, nodes):
"""
Determines and reports spin state of nodes.
Args:
model: an instance of a Model object.
nodes: a dictionary of node objects.
Returns:
state: a list of node spins for the model, either -1 or +1.
"""
state = []
for e in nodes:
state.append(nodes[e].getSpin())
state = ['+' if x > 0 else '-' for x in state]
return state
|
bf64721f47ab061cb0dc0c4a5f606a1987debc3e
| 21,318 |
def engineer_data(data):
"""
Returns modified version of data with left and right aggregate features while dropping weight and distance features
:param data: data to work with
:return: modified dataframe
"""
data['left'] = data['left_weight'] * data['left_distance']
data['right'] = data['right_weight'] * data['right_distance']
data = data.drop(['left_weight', 'left_distance', 'right_weight', 'right_distance'], axis=1)
return data
|
38774fe9213c95eb743679b5162daadeefe8f2ac
| 21,320 |
def convert_8_int_to_tuple(int_date):
""" Converts an 8-digit integer date (e.g. 20161231) to a date tuple (Y,M,D).
"""
return (int(str(int_date)[0:4]), int(str(int_date)[4:6]), int(str(int_date)[6:8]))
|
733982c2de2c74c5116c15a1a91adf03c3bd6871
| 21,325 |
def clean(iterator) -> list:
"""
Takes an iterator of strings and removes those that consist
that str.strip considers to consist entirely of whitespace.
"""
iterator = map(str.strip, iterator)
return list(filter(bool, iterator))
|
9c24da4a8cdfe59bf92fee3cea4b98b4b479147d
| 21,328 |
import json
def load_js(fname):
"""
Parameters
----------
fname: str
Returns
-------
obj
content of the json file, generally dict
"""
with open(fname,"r") as f:
jsdict = json.load(f)
return jsdict
|
6742fd1744eb30d51e937c7aa4069c161bb459c5
| 21,331 |
from pathlib import Path
def default_path_factory(refname: str, ispkg: bool) -> Path:
"""Default path factory for markdown."""
path = Path(*refname.split("."))
if ispkg:
filepath = path / "index.md"
else:
filepath = path.with_suffix(".md")
return filepath
|
96fb86391567269695b3638fec80d7f32f88407a
| 21,337 |
def loadtxt(filename):
"""Read list fo strings from file"""
txt = []
with open(filename, 'r') as f:
for l in f:
txt.append(l.strip())
return txt
|
ce4d3c411d571a4fc629d1664ee06d15a11b8614
| 21,338 |
import torch
def deltaE(lab1, lab2):
"""Delta E (CIE 1976).
lab1: Bx3xHxW
lab2: Bx3xHxW
return: Bx1xHxW
>>> lab1 = torch.tensor([100., 75., 50.]).view(1, 3, 1, 1)
>>> lab2 = torch.tensor([50., 50., 100.]).view(1, 3, 1, 1)
>>> deltaE(lab1, lab2).item()
75.0
"""
return torch.norm(lab1 - lab2, 2, 1, keepdim=True)
|
cbf123e42c74c15e4a4851e17ab5f475280387b1
| 21,340 |
def derivative_relu(relu_output):
""" Compute derivative of ReLu function """
relu_output[relu_output <= 0] = 0
relu_output[relu_output > 0] = 1
return relu_output
|
eb5d3d2f3fe912c4426cbd60a33bc96b81bfe5a0
| 21,344 |
import torch
def pdist2(x, y):
"""
Compute distance between each pair of row vectors in x and y
Args:
x: tensor of shape n*p
y: tensor of shape m*p
Returns:
dist: tensor of shape n*m
"""
p = x.shape[1]
n = x.shape[0]
m = y.shape[0]
xtile = torch.cat([x] * m, dim=1).view(-1, p)
ytile = torch.cat([y] * n, dim=0)
dist = torch.pairwise_distance(xtile, ytile)
return dist.view(n, m)
|
2e3694f58c3b7b7b743c57c64a0aeacbb78288b6
| 21,345 |
def s2ca(s):
"""Takes a string of cipher texts and returns it as an array of cipher texts"""
cypher_array = []
for i in range(int((len(s))/314)):
cypher_array.append(s[i:i+314])
return cypher_array
|
c92bb8d2ec4bf48b2ca1c1b2b8143450ae6a3ac3
| 21,355 |
def is_dict(value):
""" is value a dict"""
return isinstance(value, dict)
|
73244012a40dd73cc8bdfea34a9453b03af5941f
| 21,356 |
def _kernel_seq(inputs, estimator):
"""
Wrapper around a function that computes anything on two sequences and returns a dict
While it is written as a general purpose kernel for anything, here it is used for
causal discovery and estimation from CCM based methods.
The function unpacks inputs into an index element and a sequence pair and runs the
estimator function on the sequence pair, returning various estimates in a dict
Parameters
----------
inputs : tuple
Tuple of two elements - (a, b) where a is an index, b is a tuple of two. a can
be produced manually or more typically using enumerate; b holds the two sequences
usually passed in by zip-ping larger iterables or itertools' product/combinations.
a, the index, is passed to keep track of order in case of asynchronous execution
Should look like this: (index, (sequence_x, sequence_y)
estimator : function
A function that can compute something on two arrays and return a dict. Preferably
one that can compute something meaningful, like causal discovery
Returns
-------
out : dict
Estimates obtained by running estimator on inputs.
"""
# Unpack inputs
idx, seqs = inputs
# Unpack sequences
idx_x, idx_y, seq_x, seq_y = seqs
# Initialize dictionary of output estimates with index
out = {"index_pair": idx, "index_x": idx_x, "index_y": idx_y}
# Execute the estimator function on the sequence pair
out.update(estimator(seq_x, seq_y))
# Some feedback to console
# print(".", end="")
return out
|
bd1b02ae84e959f9dabf712d7193408f5ddc6ff0
| 21,362 |
def _lookup_response_str(status_code):
"""
Simple function to return a response string for a Ping StatusCode
:param status_code: int:
:return: str: Response string
"""
status_msg = {0: 'Success',
11001: 'Buffer Too Small',
11002: 'Dest Net Unreachable',
11003: 'Dest Host Unreachable',
11004: 'Dest Protocol Unreachable',
11005: 'Dest Port Unreachable',
11006: 'No Resources',
11007: 'Bad Option',
11008: 'Hardware Error',
11009: 'Packet Too Big',
11010: 'Timed Out',
11011: 'Bad Request',
11012: 'Bad Route',
11013: 'TTL Expired Transit',
11014: 'TTL Expired Reassembly',
11015: 'Parameter Problem',
11016: 'Source Quench',
11017: 'Option Too Big',
11018: 'Bad Destination',
11032: 'Negotiating IPSEC',
11050: 'General Failure'}
return status_msg.get(status_code, 'Unknown StatusCode')
|
984b857b4fbc0bd407d5da592ead5e19160cadcd
| 21,363 |
def __create_python_code_block(message):
"""Create a python code block"""
return f"```python\n{message}```"
|
8397187487af0780542e8a227118994e1fc8ced8
| 21,364 |
def label_map(value):
""" Function that determines the diagnosis according to the Glucose level of an entry.
The three possible diagnosis are: Hypoglycemia, hyperglycemia and normal
:param value: Glucose level
:return: Diagnosis (String)
"""
hypoglycemia_threshold = 70
hyperglycemia_threshold = 180
severe_hyperglycemia_threshold = 240
if value < hypoglycemia_threshold:
return 'Hypoglycemia'
elif value > hyperglycemia_threshold:
if value > severe_hyperglycemia_threshold:
return 'Severe_Hyperglycemia'
else:
return 'Hyperglycemia'
else:
return 'In_Range'
|
7c9798dbce01c3de3ec4a09b20523ef2b3ba5888
| 21,365 |
def getSizeOfVST(vst):
"""
Description: Return the size of the vector space of an vst variable.
Look for the first existing vector of the vst and get its size.
NB: Used only to not have to pass the size of the vst as a parameter.
"""
size = 0
for key in vst:
if vst[key] is not None:
#size = vst[key].size
size = len(vst[key])
break
return size
|
c8704d51fac22ad3ada5cf14bf2df268f7f0558e
| 21,367 |
def within_bounds(
x: float, y: float, min_x: float, min_y: float, max_x: float, max_y: float
):
"""
Are x and y within the bounds.
>>> within_bounds(1, 1, 0, 0, 2, 2)
True
"""
return (min_x <= x <= max_x) and (min_y <= y <= max_y)
|
d57c8f63b8548dd62efd126ccb68a92c3d4ca5af
| 21,372 |
def validate_sp(sp):
"""Validate seasonal periodicity.
Parameters
----------
sp : int
Seasonal periodicity
Returns
-------
sp : int
Validated seasonal periodicity
"""
if sp is None:
return sp
else:
if not isinstance(sp, int) and (sp >= 0):
raise ValueError(f"Seasonal periodicity (sp) has to be a positive integer, but found: "
f"{sp} of type: {type(sp)}")
return sp
|
265bed38cad2f6eae96c3611a90e8e4b3ef4d620
| 21,375 |
def integrate(func, interval=None, rects=100000):
"""
Returns the result of the integral from the inclusive
interval (a, b) using a Riemann sum approximation
"""
if interval is None or not isinstance(interval, tuple):
interval = eval(input('Interval (a, b): '))
a, b = interval
if a > b:
print('note: the calculated area will be negative')
if b - a > rects:
rects = b - a
area = 0
x = a
dx = (b - a) / rects
for n in range(rects):
try:
area += func(x) * dx
except Exception as e:
print('Error:', e)
x += dx
return area
|
1068e78718c411c151952de27bca5c9b6bb3dcf5
| 21,377 |
def log_hide(message):
"""Hide security sensitive information from log messages"""
if type(message) != dict:
return message
if "token" in message:
message["token"] = "xxxxxxxx-xxxx-xx"
if "AccessToken" in message:
message["AccessToken"] = "xxxxxxxx-xxxx-xx"
return message
|
cb19624b7e6153c4eba70024c5b584edf42e5b9a
| 21,379 |
def extractLabels(data, label_column):
"""
Extracts the labels from the data
Arguments:
data {Dict/List of times series} -- Time series
label_column {str} -- Name of the column to extract
"""
if isinstance(data, dict):
labels = {d: data[d][label_column] for d in data}
data = {d: data[d][[c for c in data[d].columns if c != label_column]] for d in data}
else:
labels = data[label_column]
data = data[[c for c in data.columns if c != label_column]]
return data, labels
|
19990c64bfa2055b739928797f875656724995fa
| 21,380 |
from functools import reduce
def product(nums):
"""
Like sum, but for product.
"""
return reduce(lambda x,y:x*y,nums)
|
fdc492d6aa94ccbeb83de21d4c5164c62f1b2ca8
| 21,381 |
def get_published_templateid(context):
"""
Return the template id, as of the PUBLISHED variable, or None
"""
request = context.REQUEST
if request.has_key('PUBLISHED'):
return request['PUBLISHED'].__name__
return None
|
5e036a957e2085690437a8e6d927a5bafc75d53f
| 21,382 |
def CalcTimeStep(CFL, diff, conv, dX, dY, Dimension, Model):
"""Return the time step size in the numerical approximation.
Call Signature:
CalcTimeStep(CFL, diff, conv, dX, dY, Dimension, Model)
Parameters
----------
CFL: float
In this program, CFL is treated as the
diffusion number for diffusion equations, and
Courant number for the convection equations.
Caution: This is not a true numerical definition of CFL though.
diff: float
Physics specific coefficient in the diffusion model.
For example, kinematic viscosity or thermal diffusivity.
conv: float
Physics specific coefficient in the convection model.
For example, speed of sound in the first-order linear wave eq.
dX: float
Grid step size along X-axis.
dY: float
Grid step size along Y-axis. Value required for 2D applications.
Dimension: str
Dimension of the domain. Allowed inputs are "1D" or "2D".
Model: str
Model of the governing equation. To see available options for this
parameter, type the following command on your terminal
python fetchoption.py "model"
Returns
-------
TimeStep: float
Time step in the model equation.
"""
print("Calculating time step size for the simulation: Completed.")
# ************** DIFFUSION EQN. ******************
if Model.upper() == "DIFFUSION":
dX2 = dX*dX
if Dimension.upper() == "1D":
TimeStep = CFL*dX2/diff
return TimeStep
elif Dimension.upper() == "2D":
dY2 = dY*dY
TimeStep = CFL*(1.0/((1/dX2) + (1/dY2)))/diff
return TimeStep
# ************** FIRST-ORDER WAVE EQN. *****************
elif Model.upper() == "FO_WAVE":
if Dimension.upper() == "1D":
TimeStep = CFL*dX/conv
return TimeStep
# ************** BURGERS EQN. *****************
elif Model.upper() == "INV_BURGERS":
if Dimension.upper() == "1D":
TimeStep = CFL*dX
return TimeStep
elif Model.upper() == "VISC_BURGERS":
if Dimension.upper() == "1D":
dX2 = dX*dX
TimeStep = CFL*dX2
return TimeStep
|
0ecbfb3c9179140b920947240e25f6e91296e387
| 21,390 |
import math
def smooth(x):
""" smooth value x by using a cosinus wave (0.0 <= x <= 1.0)
"""
return (-math.cos(math.pi * x) + 1) / 2
|
5b8c041835e49cd858f439a4a902935bb78b87a7
| 21,391 |
import ast
def matches(value, pattern):
"""Check whether `value` matches `pattern`.
Parameters
----------
value : ast.AST
pattern : ast.AST
Returns
-------
matched : bool
"""
# types must match exactly
if type(value) != type(pattern):
return False
# primitive value, such as None, True, False etc
if not isinstance(value, ast.AST) and not isinstance(pattern, ast.AST):
return value == pattern
fields = [
(field, getattr(pattern, field))
for field in pattern._fields
if hasattr(pattern, field)
]
for field_name, field_value in fields:
if not matches(getattr(value, field_name), field_value):
return False
return True
|
a7e05fc31e6387794f28d04e5aedfdbe7eb5f9bc
| 21,393 |
def int_to_base36(integer: int) -> str:
"""Convert an integer to a base36 string."""
char_set = "0123456789abcdefghijklmnopqrstuvwxyz"
if integer < 0:
raise ValueError("Negative base36 conversion input.")
if integer < 36:
return char_set[integer]
b36 = ""
while integer != 0:
integer, index = divmod(integer, 36)
b36 = char_set[index] + b36
return b36
|
115091a7b8766fe4488127d9b5fcfc904f67bae0
| 21,394 |
def _fit_estimator(clf, X, y):
"""Helper to fit estimator"""
return clf.fit(X, y)
|
1f99369f29260336f5a5f2a6e10e9c5721421831
| 21,395 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.