content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def steps_per_quarter_to_steps_per_second(steps_per_quarter, qpm):
"""Calculates steps per second given steps_per_quarter and a qpm."""
return steps_per_quarter * qpm / 60.0
|
f1ca6976700b1290c67a6f6670e5c3eee14664bb
| 42,660 |
def split_lhs_rhs(expr):
"""Split the equation into left and right hand side.
>>> split_lhs_rhs(" 12 + a ")
(None, '12 + a')
>>> split_lhs_rhs(" c = 12 + a ")
('c', '12 + a')
"""
expr = [x.strip() for x in expr.split("=")]
if len(expr) == 1:
rhs = expr[0]
output = None
else:
output, rhs = expr
return output, rhs
|
ac7fd4861ad3289365030d6eac656e021ee39e6f
| 42,664 |
def make_predictions(model, test_x):
"""
Make predictions
"""
predictions = model.predict(test_x)
return predictions
|
f6e783a5a69c50d129453edf29c1d7f65419f675
| 42,665 |
def whisper(text):
"""Creates a response only the sender can see."""
return {
"response_type" : "ephemeral",
"text" : text
}
|
4efce220d7a65f4dad54f0d7586a9677e46321e2
| 42,669 |
import json
def json_write_data(json_data, filename):
"""Write json data into a file
"""
with open(filename, 'w') as fp:
json.dump(json_data, fp, indent=4, sort_keys=True, ensure_ascii=False)
return True
return False
|
4fb8a9d654460d34a05b6bc88bf5512ff505a7a4
| 42,675 |
def decodeDegreesStr(valStr):
"""
Return a signed latitude/longitude value from a string. Only copes with the integer
values used in grid cell names.
"""
val = int(valStr[:-1])
if valStr[-1] in ("S", "W"):
val = -val
return val
|
e0dd53f4a26a5bf1bb35ea121beeaa09566cf35b
| 42,678 |
def get_results(combinations, config_dict):
"""
This function finds for each mission its most resilient configuration and probability.
:param combinations: dictionary which contains for each mission its configurations
:param config_dict: dictionary which contains for each configuration and goal host
the inferred probabilities (C, I, A)
:return: dictionary which contains for each mission only its most resilient configuration
and probability
"""
result_dict = {}
for mission in combinations:
result_dict[mission] = {}
for index in config_dict[mission]:
partial_result = {}
for host in config_dict[mission][index]:
if "probability" not in partial_result or \
sum(config_dict[mission][index][host]['result']) > \
sum(partial_result["probability"]):
partial_result["configuration"] = combinations[mission][index]
partial_result["probability"] = config_dict[mission][index][host]['result']
if "probability" not in result_dict[mission] or \
sum(partial_result["probability"]) < sum(result_dict[mission]["probability"]):
result_dict[mission]['configuration'] = partial_result["configuration"]
result_dict[mission]["probability"] = partial_result["probability"]
return result_dict
|
5ba0f5a68f03e8fc252d3eb4aee96493e57a97eb
| 42,680 |
def ones_like(x):
"""Return an array of the same shape as `x` containing only ones."""
return x * 0. + 1.
|
4178d00551fe7e499eaba77eebec1aca00212890
| 42,684 |
def estimate_phones(x):
"""
Allocate consumption category given a specific luminosity.
"""
if x['mean_luminosity_km2'] > 5:
return 10
elif x['mean_luminosity_km2'] > 1:
return 5
else:
return 1
|
c592c1d5b5b44ed360c03aaf15461803e39704e3
| 42,689 |
def predict_cluster_by_precomputed_distances(precomputed_distances):
"""
Predict a cluster for each object with precomputed distances.
Parameters
----------
precomputed_distances : np.array
array of shape (n_topics, n_objects) - distances from clusters to objects
Returns
-------
np.array
array of length X.shape[0], each element is cluster of ith object
"""
return precomputed_distances.T.argmin(axis=1).ravel()
|
7c42073d7e3dd1369d4ab4986db8b9474bea5516
| 42,695 |
from typing import Any
from typing import List
def fill_array(variable: Any, default: Any, final_size: int) -> List[Any]:
"""Fills the default value for parameter that don't set values for all plots (keys)
Parameters
----------
variable
The parameter to fill values for
default
The default value for the argument on a single plot
final_size
The number of keys in the figure
Returns
-------
A list of length 'final_size' with the default values for the parameter filling to the end of the list
"""
# ensure the parameter is a list
if not isinstance(variable, list):
variable = [variable]
# get the current length of the parameter (i.e. the number of keys with user-specified values)
num_vars = len(variable)
if num_vars < final_size:
for _ in range(final_size - num_vars):
if num_vars == 1:
# if only one value is provided for the parameter, use that by default for all keys
variable.append(variable[0])
else:
# otherwise, add the default value for the parameter to fill the list
variable.append(default)
return variable
|
9f8db3a2252bc08236a2fa4837f1bb0941886b34
| 42,696 |
def import_component(name):
"""
As a workaround for __import__ behavior,
returns extract the desired component
>>> import_component('os').__name__
'os'
>>> import types
>>> type(import_component('os.path.join')) == types.FunctionType
True
"""
# Import component
# As a workaround for __import__ behavior,
# 1. import module
# 2. use getattr() to extract the desired component
# 1. import module
names = name.split('.')
assert names and len(names) > 0
if len(names) == 1:
return __import__(names[0])
try:
modname = '.'.join(names[:-1])
module = __import__(modname)
for name in names[1:-1]:
module = getattr(module, name)
except ImportError:
raise
else:
# 2. use getattr() to extract the desired component
return getattr(module, names[-1])
|
7862be8ee1d92b4951a7e1a648541de5e636cf85
| 42,704 |
def hamming_distance(pattern1, pattern2):
"""Return the hamming distance between 2 patterns."""
if len(pattern1) == len(pattern2):
return sum([
pattern1[index] != pattern2[index]
for index in range(len(pattern1))
])
raise Exception('Length of both reads do not match')
|
aceb59a7136ed6aef9a32ff39ebf32567bef780c
| 42,707 |
def to_pecha_id_link(pecha_id):
"""Return pecha_id_link for `pecha_id`."""
return f"[{pecha_id}](https://github.com/OpenPecha/{pecha_id})"
|
9115992e22aa8705af488df3ed7ca6b738641b69
| 42,710 |
def assignment_display_name(assignment):
"""Get name for an assignment"""
if assignment.session.type.slug == 'regular' and assignment.session.historic_group:
return assignment.session.historic_group.name
return assignment.session.name or assignment.timeslot.name
|
25cb7ed877dfd8f47e108d497ff040d9ba17e52e
| 42,719 |
def is_df(
df):
"""is_df
Test if ``df`` is a valid ``pandas.DataFrame``
:param df: ``pandas.DataFrame``
"""
return (
hasattr(df, 'to_json'))
|
fe5c111e8883ff64e3b63602e57aaa793ef710fa
| 42,721 |
import re
def parse_variant(variant):
"""
Parse specified genomic variant.
Generally speaking, the input string should consist of chromosome,
position, reference allele, and alternative allele separated by any one
or combination of the following delimiters: ``-``, ``:``, ``>`` (e.g.
'22-42127941-G-A'). The method will return parsed variant as a tuple with
a shape of ``(chrom, pos, ref, alt)`` which has data types of ``(str,
int, str, str)``.
Note that it's possible to omit reference allele and alternative allele
from the input string to indicate position-only data (e.g.
'22-42127941'). In this case, the method will return empty string for
the alleles -- i.e. ``(str, int, '', '')`` if both are omitted and
``(str, int, str, '')`` if only alternative allele is omitted.
Parameters
----------
variant : str
Genomic variant.
Returns
-------
tuple
Parsed variant.
Examples
--------
>>> from fuc import common
>>> common.parse_variant('22-42127941-G-A')
('22', 42127941, 'G', 'A')
>>> common.parse_variant('22:42127941-G>A')
('22', 42127941, 'G', 'A')
>>> common.parse_variant('22-42127941')
('22', 42127941, '', '')
>>> common.parse_variant('22-42127941-G')
('22', 42127941, 'G', '')
"""
fields = re.split('-|:|>', variant)
chrom = fields[0]
pos = int(fields[1])
try:
ref = fields[2]
except IndexError:
ref = ''
try:
alt = fields[3]
except IndexError:
alt = ''
return (chrom, pos, ref, alt)
|
3e58345da18a0ddc72eb1a1a73c4b1afc826f88e
| 42,722 |
def get_point(msg):
"""(str) -> tuple
prints a message specified by <msg> and allows the user to enter the
(x, y, z) coordinates of a point.
Returns the point as a tuple
"""
print(msg)
x = float(input('Enter x coordinate: '))
y = float(input('Enter y coordinate: '))
z = float(input('Enter z coordinate: '))
return x, y, z
|
136491e061519ec1aebef1db4a7577bc60f28316
| 42,724 |
def get_measurement_from_rule(rule):
"""
Return the name of the measurement from
the Alert_Rule checkfield
"""
if len(rule.check_field.split('#')) == 2:
model, measurement = rule.check_field.strip().split('#')
return measurement
elif len(rule.check_field.split('#')) == 1:
return rule.check_field
|
183b3efa9d75c05a71223fee11b78ce243ae2129
| 42,728 |
from typing import SupportsRound
def iround(x: SupportsRound) -> int:
"""Rounds x and converts to int.
Because round(np.float32) returns np.float32 instead of int.
"""
return int(round(x))
|
6a8a2d089e4b0986052be308f6179002ab414f1d
| 42,733 |
import re
def is_module(content):
"""Checks if the contents are from a Lua module.
It looks for a returned value at the end of the file. If it finds one, it's
safe to assume that it's a module.
content : str
The Lua source code to check.
"""
# We match any number of whitespace after the `return` in case of accidental
# spacing on the user's part.
#
# Then we match any characters to catch variables (`return module`) and
# functions (`return setmetatable(t1, t2)`)
#
# We're optionally matching any number of spaces at the end of the file
# incase of a final newline, or accidentally added spaces after the value.
return re.search(r"return\s+.*(\s+)?$", content)
|
8f9ac0dcad6623d73bb47e87ec56ae8519b75f2f
| 42,734 |
def prepare_input(input_str, from_item):
"""
A function for preparing input for validation against a graph.
Parameters:
input_str: A string containing node or group identifiers.
from_item: Start processing only after this item.
Returns:
A list of node identifiers.
"""
# Get the list of nodes provided by the user
input_list = input_str.lower().split()[from_item:]
# Strip commas
input_list = [u.strip(',') for u in input_list]
# Strip extra whitespace
input_list = [u.strip() for u in input_list]
# Create a placeholder for the final list of identifiers
final_list = []
# Check if the input contains group aliases
for i in input_list:
# If the input contains a range of identifiers, unpack
if ':' in i:
# Get the prefix of the alias (I, B, T, or G)
prefix = i[0]
# Get numbers and cast: ignore the first character of the first id
try:
start, end = int(i.split(':')[0][1:]), int(i.split(':')[1])
except ValueError:
# Print error message
print("[ERROR] Check syntax for identifier range: do not add "
"identifier prefix to the second part, i.e. g1:g5.")
# Append erroneous identifier to the list to catch the error
final_list.append(i)
continue
# Create a list of unpacked identifiers in the range
unpacked = [prefix + str(x) for x in range(start, end + 1)]
# Extend the list of identifiers
final_list.extend(unpacked)
# Otherwise, append identifier to the final list
if ':' not in i:
final_list.append(i)
return final_list
|
b0545f0d58ad9788fa8a1538e163d0959965dc8a
| 42,747 |
import pickle
def _pickle(pickle_file):
"""
Loads a pickle file that works in both py 2 and 3
Parameters
------
pickle_file : str
path to pickle file to load
"""
try:
with open(pickle_file.as_posix(), "rb") as f:
return pickle.load(f)
except UnicodeDecodeError:
with open(pickle_file.as_posix(), "rb") as f:
return pickle.load(f, encoding="latin1")
|
f34f7649d0c0b0480e86fc1cb76eae74b1099113
| 42,753 |
def _aihub_coord_to_coord(coords):
"""Covert aihub-style coords to standard format.
>>> _aihub_coord_to_coord({
... "X좌표1": 602.004,
... "X좌표2": 571.004,
... "X좌표3": 545.004,
... "X좌표4": 531.004,
... "Y좌표1": 520.004,
... "Y좌표2": 505.004,
... "Y좌표3": 465.004,
... "Y좌표4": 428.004,
... })
[(602.004, 520.004), (571.004, 505.004), (545.004, 465.004), (531.004, 428.004)]
"""
max_num = max(int(item[3:]) for item in coords)
return [(coords[f"X좌표{n}"], coords[f"Y좌표{n}"]) for n in range(1, max_num + 1) if f"X좌표{n}" in coords]
|
fed881fe532442ccb2c13d44262a5ef12f5c5143
| 42,754 |
def f1_from_roc(fpr, tpr, pos, neg):
"""Calculate f1 score from roc values.
Parameters
----------
fpr : float
The false positive rate.
tpr : float
The true positive rate.
pos : int
The number of positive labels.
neg : int
The number of negative labels.
Returns
-------
float
The f1 score.
"""
fp = fpr * neg
fn = (1 - tpr) * pos
tp = pos - fn
f1 = tp / (tp + ((fn + fp) / 2))
return f1
|
d682ef92c8f3a43f1e88ab98125cb3f3d33cf189
| 42,756 |
def datetime_to_pretty_str(dt):
"""
Convert datetime object to string similar to ISO 8601 but more compact.
Arguments:
----------
dt: dateime object
... for which the string will be generated.
Returns:
--------
dt_str: string
The pretty string respresentation of the datetime object.
"""
dt_str = dt.strftime('%Y-%m-%d %H:%M:%S')
return dt_str
|
24508950d6a2995247a0dd305ddb82086285bd18
| 42,757 |
def convert_string_to_bool(string_value):
"""
simple method used to convert a tring to a bool
:param string_value: True or False string value
:type string_value: string - required
:return: bool True or False
:rtype bool
"""
if string_value == 'True':
return True
else:
return False
|
3e4113721df399408719ae7737136691f904ae78
| 42,762 |
def has_finite_length(obj):
"""
Return ``True`` if ``obj`` is known to have finite length.
This is mainly meant for pure Python types, so we do not call any
Sage-specific methods.
EXAMPLES::
sage: from sage.sets.set import has_finite_length
sage: has_finite_length(tuple(range(10)))
True
sage: has_finite_length(list(range(10)))
True
sage: has_finite_length(set(range(10)))
True
sage: has_finite_length(iter(range(10)))
False
sage: has_finite_length(GF(17^127))
True
sage: has_finite_length(ZZ)
False
"""
try:
len(obj)
except OverflowError:
return True
except Exception:
return False
else:
return True
|
483a5cbb69f197622373c224de41f9e0ddd149ec
| 42,763 |
import csv
def cluster_keywords(input_file_path):
""" Cluster keywords based on the shorted version of the keywords read from the input file.
Args:
input_file_path: the path to the tsv file containing keywords and their shortened version.
Returns:
shortened_keywords_list: a dictionary with shortened keywords as key, and list of keywords in that cluster as value
total_keyword_counts: total number of keywords being clustered
model_name_list: a list of names of the LaserTagger models used for shortening keywords, as indicated in tsv file
"""
shortened_keywords_list = []
total_keyword_counts = 0
with open(input_file_path) as f:
read_tsv = csv.reader(f, delimiter="\t")
model_name_list = next(read_tsv)[1:]
for i in range(len(model_name_list)):
shortened_keywords_list.append({})
for line in read_tsv:
total_keyword_counts += 1
for index, shortened_keyword in enumerate(line[1:]):
shortened_keyword = shortened_keyword.lower()
if shortened_keyword == "":
continue
if shortened_keyword not in shortened_keywords_list[index]:
shortened_keywords_list[index][shortened_keyword] = [line[0]]
else:
shortened_keywords_list[index][shortened_keyword].append(line[0])
return shortened_keywords_list, total_keyword_counts, model_name_list
|
fb000bc9d36f901e09f3a958a42e555b90c9ae56
| 42,765 |
def float_div(num1, num2):
"""Function: float_div
Description: Takes two numbers and does floating division. Returns zero
if the divisor is zero.
Arguments:
(input) num1 number -> First number.
(input) num2 number -> Second number.
(output) Return results of division or 0.
"""
try:
return float(num1) / num2
except ZeroDivisionError:
return 0
|
372c1eb0fda84d066d7ed5c6a7990869380fffb8
| 42,767 |
from functools import reduce
from operator import truediv
def analyze(sample_paragraph, typed_string, start_time, end_time):
"""Returns a list containing two values:
words per minute and accuracy percentage.
This function takes in a string sample_paragraph,
a string provided by user input typed_string,
a number start_time and
a number end_time.
Both start_time and end_time are measured in seconds.
"""
num_of_wrds = len(typed_string) / 5
try:
wrds_per_min = num_of_wrds / (end_time - start_time) * 60
except ZeroDivisionError:
wrds_per_min = float('Inf')
words_pair = zip(sample_paragraph.split(), typed_string.split())
alg = lambda cnt_length, sp_ts: (cnt_length[0] + (sp_ts[0] == sp_ts[1]), cnt_length[1] + 1) # the algebra for catamorphism
try:
accuracy = truediv(*reduce(alg, words_pair, (0, 0))) * 100
except ZeroDivisionError:
accuracy = 0.0
return [wrds_per_min, accuracy]
|
ac797542fc90cc800deec731209fa336a7181739
| 42,769 |
def filter_by_hardware_interface(ctrl_list,
hardware_interface,
match_substring=False):
"""
Filter controller state list by controller hardware interface.
@param ctrl_list: Controller state list
@type ctrl_list: [controller_manager_msgs/ControllerState]
@param hardware_interface: Controller hardware interface
@type hardware_interface: str
@param match_substring: Set to True to allow substring matching
@type match_substring: bool
@return: Controllers matching the specified hardware interface
@rtype: [controller_manager_msgs/ControllerState]
"""
list_out = []
for ctrl in ctrl_list:
for resource_set in ctrl.claimed_resources:
if match_substring:
if hardware_interface in resource_set.hardware_interface:
list_out.append(ctrl)
break
else:
if resource_set.hardware_interface == hardware_interface:
list_out.append(ctrl)
break
return list_out
|
ce1dc94543b0fde61944f8a730fd4717b3c83da7
| 42,771 |
def _columnspace(M, simplify=False):
"""Returns a list of vectors (Matrix objects) that span columnspace of ``M``
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix(3, 3, [1, 3, 0, -2, -6, 0, 3, 9, 6])
>>> M
Matrix([
[ 1, 3, 0],
[-2, -6, 0],
[ 3, 9, 6]])
>>> M.columnspace()
[Matrix([
[ 1],
[-2],
[ 3]]), Matrix([
[0],
[0],
[6]])]
See Also
========
nullspace
rowspace
"""
reduced, pivots = M.echelon_form(simplify=simplify, with_pivots=True)
return [M.col(i) for i in pivots]
|
4bc7b18b6781426ff4e0cb9b587836b46aef23b7
| 42,774 |
import hashlib
def get_md5_hash(path):
"""
Calculates the md5 hash for a specific file.
"""
md5_hash = hashlib.md5()
md5_hash.update(open(path, 'rb').read())
return md5_hash.hexdigest()
|
30120003948d334a11a0ca45fb6d22125e4b85ce
| 42,776 |
def get_indexes(cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
# This query retrieves each index on the given table, including the
# first associated field name
# "We were in the nick of time; you were in great peril!"
sql = """
WITH primarycols AS (
SELECT user_cons_columns.table_name, user_cons_columns.column_name, 1 AS PRIMARYCOL
FROM user_cons_columns, user_constraints
WHERE user_cons_columns.constraint_name = user_constraints.constraint_name AND
user_constraints.constraint_type = 'P' AND
user_cons_columns.table_name = %s),
uniquecols AS (
SELECT user_ind_columns.table_name, user_ind_columns.column_name, 1 AS UNIQUECOL
FROM user_indexes, user_ind_columns
WHERE uniqueness = 'UNIQUE' AND
user_indexes.index_name = user_ind_columns.index_name AND
user_ind_columns.table_name = %s)
SELECT allcols.column_name, primarycols.primarycol, uniquecols.UNIQUECOL
FROM (SELECT column_name FROM primarycols UNION SELECT column_name FROM
uniquecols) allcols,
primarycols, uniquecols
WHERE allcols.column_name = primarycols.column_name (+) AND
allcols.column_name = uniquecols.column_name (+)
"""
cursor.execute(sql, [table_name, table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
indexes[row[0]] = {'primary_key': row[1], 'unique': row[2]}
return indexes
|
7f71b0f2c493deb4d3ef6e0b73021575ba1e2ce1
| 42,782 |
def make_chunks(l, n):
"""
Chunks a list into ``n`` parts. The order of ``l`` is not kept. Useful for
parallel processing when a single call is too fast, so the overhead from
managing the processes is heavier than the calculation itself.
Parameters
----------
l : list
Input list.
n : int
Number of parts.
Examples
--------
.. code-block:: python
make_chunks(range(13), 3)
# -> [[0, 3, 6, 9, 12], [1, 4, 7, 10], [2, 5, 8, 11]]
"""
return [l[i::n] for i in range(n)]
|
df0c3ddf67ed892ce47cd073f67003ed9e85b6d6
| 42,785 |
def pr(vp,vs):
"""
Computes the Poisson ratio
Parameters
----------
vp : array
P-velocity.
vs : array
S-velocity.
Returns
-------
pr : array
Poisson ratio.
"""
vpvs=vp/vs
pr = 0.5*((vpvs**2-2)/(vpvs**2-1))
return (pr)
|
bec82f868b847b85e39c90016f6787e20faa91ae
| 42,788 |
import time
def timestamp_to_gmtime(ts):
"""Return a string formatted for GMT
>>> print(timestamp_to_gmtime(1196705700))
2007-12-03 18:15:00 UTC (1196705700)
>>> print(timestamp_to_gmtime(None))
******* N/A ******* ( N/A )
"""
if ts:
return "%s (%d)" % (time.strftime("%Y-%m-%d %H:%M:%S UTC", time.gmtime(ts)), ts)
else:
return "******* N/A ******* ( N/A )"
|
7e0dd51d2811c361c301ee92e62eb5b271539bf1
| 42,790 |
def to_positive_int(int_str):
"""
Tries to convert `int_str` string to a positive integer number.
Args:
int_str (string): String representing a positive integer number.
Returns:
int: Positive integer number.
Raises:
ValueError: If `int_str` could not be converted to a positive integer.
"""
try:
int_int = int(int_str)
except ValueError:
raise ValueError("argument must represent an integer number")
if int_int<1:
raise ValueError("argument must be a positive integer number")
return int_int
|
322942f257ca390e5d7ae5d9f74c33d6b8623baf
| 42,791 |
def get_module_url(module_items_url):
"""
Extracts the module direct url from the items_url.
Example:
items_url https://canvas.instance.com/api/v1/courses/course_id/modules/module_id/items'
becomes https://canvas.instance.com/courses/course_id/modules/module_id
"""
return module_items_url.replace('api/v1/','').replace('/items', '')
|
bf03e0139c07e1d43be8123e1966fac5fd68239a
| 42,792 |
def horizontal_unfold(A):
"""
For a 3D tensor A(a,i,b), we unfold like: A(a,ib)
"""
S = A.shape
return A.reshape(S[0], S[1] * S[2])
|
59caaa3db71c868d08264c64a88401e85ce6136c
| 42,793 |
import re
def _verify_ip(ip):
"""Return True if ip matches a valid IP pattern, False otherwise."""
if not ip:
return False
ip_pattern = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
return ip_pattern.match(ip) is not None
|
e828c03726d09480dcc3a7caa509a465b9a7a97e
| 42,796 |
def available_moves(hex_board):
"""
Get all empty positions of the HexBoard = all available moves.
:param hex_board: HexBoard class object
:return: list of all empty positions on the current HexBoard.
"""
return [(i, j) for i in range(hex_board.size) for j in range(hex_board.size) if hex_board.is_empty((i, j))]
|
4813cdc69c64c4260390bb8ee47f541eb274ae4d
| 42,799 |
from typing import Dict
def parse_group_id(group_id: str) -> Dict:
"""
解析插件组ID
:param group_id: sub_1234_host_1
:return: {
"subscription_id": 1234,
"object_type": host,
"id": 1,
}
"""
source_type, subscription_id, object_type, _id = group_id.split("_")
return {
"subscription_id": subscription_id,
"object_type": object_type,
"id": _id,
}
|
8e5e773993b7bea728d85133794b246901bc8c66
| 42,801 |
def textContent(node):
"""Return the text in `node`, including that in the child-nodes. This is the
equivalence of the text_content() method for HTML nodes, but works for both
HTML and XML nodes.
"""
return ''.join(node.itertext())
|
a6760b4855b4c674f38a4ad360faf9e1dd924f71
| 42,803 |
def extract_impression_id(line, assert_first_line=False):
"""
Extracts the impression_id from a line
"""
if type(line) == bytes:
line = line.decode()
return line[:line.index("|")].strip()
|
09f67f24e4e517c1ac66df5cc1fb8d7d359ad3c9
| 42,806 |
def check_number_threads(numThreads):
"""Checks whether or not the requested number of threads has a valid value.
Parameters
----------
numThreads : int or str
The requested number of threads, should either be a strictly positive integer or "max" or None
Returns
-------
numThreads : int
Corrected number of threads
"""
if (numThreads is None) or (isinstance(numThreads, str) and numThreads.lower() == 'max'):
return -1
if (not isinstance(numThreads, int)) or numThreads < 1:
raise ValueError('numThreads should either be "max" or a strictly positive integer')
return numThreads
|
a8d683d5c265f43567031e8c10314efad2411ec9
| 42,809 |
def read_maze(maze_file):
""" (file open for reading) -> list of list of str
Return the contents of maze_file in a list of list of str,
where each character is a separate entry in the list.
"""
res = []
for line in maze_file:
maze_row = [ch for ch in line.strip()]
res.append(maze_row)
return res
|
2084ac891012932774d46d507f550e8070e3cc47
| 42,814 |
def can(obs, action_id):
"""Returns True if the specified action is available."""
return action_id in obs.observation.available_actions
|
509e7baa411529114881d95c38684d232d71db5a
| 42,816 |
def removeengineeringpids(pids):
"""Removing propcodes that are associated with engineering and calibration proposals"""
new_pids=[]
for pid in pids:
if not pid.count('ENG_') and not pid.count('CAL_'):
new_pids.append(pid)
return new_pids
|
18a3f14f6645a2d27727192b045cfb7b64f959f3
| 42,818 |
def make_pin_name(port, index):
"""
Formats a pin name of a multi-bit port
"""
return "{}_b{}".format(port, index)
|
e3e7c3476583bd80a68b53e077399b278f501573
| 42,821 |
def getTime(t):
"""
Returns a string after converting time in seconds to hours/mins/secs
Paramters:
t (float): time in seconds
Returns:
s (str): number of hours, if more than 1 hour
number of minutes and seconds, if more than 1 minute
number of seconds, otherwise
"""
if t >= 3600:
s = str(round(t // (3600), 2)) + " hours.\n"
elif t >= 60:
s = str(t // 60) + " mins, " + str(t % 60) + " secs.\n"
else:
s = str(t) + " secs.\n"
return s
|
8448c6f3d5216ab6585d2367e9bac07170ecd08b
| 42,825 |
def make_tag_decorator(known_tags):
"""
Create a decorator allowing tests to be tagged with the *known_tags*.
"""
def tag(*tags):
"""
Tag a test method with the given tags.
Can be used in conjunction with the --tags command-line argument
for runtests.py.
"""
for t in tags:
if t not in known_tags:
raise ValueError("unknown tag: %r" % (t,))
def decorate(func):
if (not callable(func) or isinstance(func, type)
or not func.__name__.startswith('test_')):
raise TypeError("@tag(...) should be used on test methods")
try:
s = func.tags
except AttributeError:
s = func.tags = set()
s.update(tags)
return func
return decorate
return tag
|
80a97f0db5198629aa1f48163d14b2eae463e933
| 42,827 |
def variance(rv, *args, **kwargs):
"""
Returns the variance `rv`.
In general computed using `mean` but may be overridden.
:param rv: RandomVariable
"""
return rv.variance(*args, **kwargs)
|
479175f7c101612ea14cef7caf3c5876c2714987
| 42,828 |
def param_nully(value) -> bool:
"""Determine null-like values."""
if isinstance(value, str):
value = value.lower()
return value in [None, '', 'undefined', 'none', 'null', 'false']
|
243ab7fdbd08f236a3382cc5e545f035557d8c53
| 42,829 |
def assemble_cla_status(author_name, signed=False):
"""
Helper function to return the text that will display on a change request status.
For GitLab there isn't much space here - we rely on the user hovering their mouse over the icon.
For GitHub there is a 140 character limit.
:param author_name: The name of the author of this commit.
:type author_name: string
:param signed: Whether or not the author has signed an signature.
:type signed: boolean
"""
if author_name is None:
author_name = 'Unknown'
if signed:
return author_name, 'EasyCLA check passed. You are authorized to contribute.'
return author_name, 'Missing CLA Authorization.'
|
9ea59337f1d3d04531c6fe3457a6c769327ab767
| 42,831 |
def has_optional(al):
""" return true if any argument is optional """
return any([a.init for a in al])
|
6c2bf7836afc34fa47408cbdb94c94335fa21b26
| 42,832 |
import pathlib
def stringify_path(filepath):
"""Attempt to convert a path-like object to a string.
Parameters
----------
filepath: object to be converted
Returns
-------
filepath_str: maybe a string version of the object
Notes
-----
Objects supporting the fspath protocol (Python 3.6+) are coerced
according to its __fspath__ method.
For backwards compatibility with older Python version, pathlib.Path
objects are specially coerced.
Any other object is passed through unchanged, which includes bytes,
strings, buffers, or anything else that's not even path-like.
"""
if hasattr(filepath, "__fspath__"):
return filepath.__fspath__()
elif isinstance(filepath, pathlib.Path):
return str(filepath)
return filepath
|
83fca05a40e3b0f518d6bed454848a4ba6ed14f9
| 42,835 |
def renderFKPs(landmarks) -> dict:
"""Extract facial keypoints
Args:
landmarks ([type]): [description]
Returns:
dict: {fkp: [x, y]}
"""
keypoints = {}
index = 0
for fkp in landmarks:
keypoints[index] = [fkp.x, fkp.y]
index += 1
return keypoints
|
c9c8a9efaa3f78fdba0bdad4bb22cd6d503ca2ad
| 42,838 |
def validate_coverage(
route,
metric="node_coverage",
max_gap_sec=3 * 60 * 60,
min_node_coverage_percent=0.75,
):
"""
Make sure there is sufficient coverage of the planned route with live data
"""
if metric == "time":
last_update, live = None, []
for r in route:
live += [l for l in r.get("updates", []) if l.get("eventTime")]
for l in sorted(live, key=lambda l: l.get("eventTime")):
if last_update is None:
last_update = l.get("eventTime")
if (l.get("eventTime") - last_update).total_seconds() > max_gap_sec:
return False
last_update = l.get("eventTime")
if last_update is None:
# Did not see any updates
return False
elif metric == "node_coverage":
# Do not allow less than 75% node coverage with live data
live_stations, planned_stations = [], []
for r in route:
live_stations += [
l.get("stationId") for l in r.get("updates", []) if l.get("stationId")
]
planned_stations += [
p.get("stationId") for p in r.get("stations", []) if p.get("stationId")
]
intersection = set(planned_stations).intersection(set(live_stations))
if (
float(len(intersection)) / float(len(set(planned_stations)))
< min_node_coverage_percent
):
return False
return True
|
0257b7a746141425194c69ed50ac4e554cccb980
| 42,839 |
def mean_center_utilmat(U, axis=1, fillna=True, fill_val=None):
"""Gets the mean-centered utility matrix
Parameters:
U (DataFrame) : utilily matrix (rows are users, columns are items)
axis (int) : The axis along mean is evaluated,
{0/'index', 1/'columns'}, default 1
fillna (bool) : Indicates whether missing/null values are to be filled
fill_val (None/float) : Value to be used to fill null values when
fillna==True, default None
Returns:
U (DataFrame): mean-centered utility matrix
"""
mean_centered = U.sub(U.mean(axis=axis), axis=1-axis)
if fillna:
if fill_val is not None:
return mean_centered.fillna(fill_val)
else:
return mean_centered.fillna(0)
else:
return mean_centered
|
dad6239843aa47e8894a04b49f87ef34e4bc2e7a
| 42,842 |
def as_list(val):
"""return a list with val if val is not already a list, val otherwise"""
if isinstance(val, list):
return val
else:
return [val]
|
484c4163ea8e3dd17c9c4372554b54f16434b995
| 42,843 |
import tempfile
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
with tempfile.NamedTemporaryFile("w", suffix=".cpp") as f:
print("Testing for flag %s" % (flagname))
f.write("int main (int argc, char **argv) { return 0; }")
try:
compiler.compile([f.name], extra_postargs=[flagname])
except Exception as e:
print(e)
return False
return True
|
68467b6424ac8eb7f3049f21812d877f0fbe7cd3
| 42,848 |
def metadata(data):
"""Convert a dictionary of strings into an RST metadata block."""
template = ":%s: %s\n"
return ''.join(template % (key, data[key]) for key in data)
|
8d47746df2a232ff043b5a60527917f6f75329ee
| 42,850 |
def cli(ctx, group_id):
"""Get information about a group
Output:
a dictionary containing group information
"""
return ctx.gi.groups.show_group(group_id)
|
5bb99f5d76ab7a4dd1e471ca39339f7082105849
| 42,852 |
from typing import Union
from typing import Tuple
import secrets
import hashlib
def wep_encypher_pw(password: str, salt: Union[None, str] = None) -> Tuple[str, str]:
"""
Hash and salt a password string and return the SHA-512 digest of the
hashed and salted string.
Args:
password: A password inputted by the user
salt: The salt to apply to the password before hashing
Returns:
The SHA-512 message digest, in hexadecimal form, of the password
string with a salt applied, along with the salt itself.
"""
if not salt:
salt = secrets.token_hex(16)
return salt, hashlib.sha512((password + salt).encode("utf-8")).hexdigest()
|
ffbb5ec08b2e9f8c8c9567f254bcc90180f9d7f5
| 42,855 |
def patient_form(first_name, last_name, patient_id, gender, birthdate):
"""OpenMRS Short patient form for creating a new patient.
Parameters OpenMRS form field Note
first_name personName.givenName N/A
last_name personName.familyName N/A
patient_id identifiers[0].identifier N/A
gender patient.gender M or F
birthdate patient.birthdate single digits must be padded
N/A identifiers[0].identifierType use "2"
N/A identifiers[0].location use "1"
"""
data = {"personName.givenName": first_name,
"personName.familyName": last_name,
"identifiers[0].identifier": patient_id,
"identifiers[0].identifierType": 2,
"identifiers[0].location": 1,
"patient.gender": gender,
"patient.birthdate": birthdate,}
return data
|
98e41b828b1de828bf6925b24d9c9e321c4c4cfa
| 42,859 |
def middle(lst):
"""
Takes a list and returns a new list that contains all but the first and
last elements.
Input: lst -- a list
Output: new -- new list with first and last elements removed
"""
new = lst[1:] # Stores all but the first element
del new[-1] # Deletes the last element
return new
|
ee065663b7ace7a8f582a6967096862585b9f599
| 42,868 |
from datetime import datetime
def count_time(time_logon,time_logoff):
"""count the logoning time
Arg:
time_longon: for example, 07:20:00
time_logoff: for example, 15:20:00
return:
last_time: float, the number of hours of online.
"""
time_logon=datetime.strptime(time_logon,'%H:%M:%S')
time_logoff=datetime.strptime(time_logoff,'%H:%M:%S')
last_time=(time_logoff-time_logon).total_seconds()/3600
last_time=round(last_time,2)
return last_time
# print(last_time)
# print(type(last_time))
|
cd22d78525c54306328689ffe752f1e5a5d24464
| 42,871 |
from contextlib import suppress
def _purge_headers_cb(headers):
"""
Remove headers from the response.
Args:
headers (list): headers to remove from the response
Returns:
callable: for been used in before_record_response VCR constructor.
"""
header_list = []
for item in headers:
if not isinstance(item, tuple):
item = (item, None)
header_list.append(item[0:2]) # ensure the tuple is a pair
def before_record_response_cb(response):
"""
Purge headers from response.
Args:
response (dict): a VCR response
Returns:
dict: a VCR response
"""
for (header, value) in header_list:
with suppress(KeyError):
if value:
response['headers'][header] = value
else:
del response['headers'][header]
return response
return before_record_response_cb
|
9d0c0cc04ee407d6f4f60c2c65c39869d58333c0
| 42,872 |
import re
def get_label_and_caption(lines, i):
"""Capture any label and caption immediately after a code environment's end
Use regex on the two lines after a code environment's end (e.g. !ec) to
extract any label or caption. NB! This method might modify the two lines
after the code environment by removing any label{} and caption{} commands
:param list lines: lines of code
:param int i: current index
:return: label and caption
:rtype: (str, str)
"""
# capture any caption and label in the next two
# lines after the end of the code environment
label = None
label_regex = re.compile(r"[\\]*label\{(.*?)\}")
label_match = re.search(label_regex, "".join(lines[i + 1:i + 3]))
if label_match:
label = label_match.group(1)
caption = None
caption_regex = re.compile(r"[\\]*caption\{(.*?)\}")
caption_match = re.search(caption_regex, "".join(lines[i + 1:i + 3]))
if caption_match:
caption = caption_match.group(1)
# Remove label{} and caption{}
if len(lines) > i + 1:
lines[i + 1] = re.sub(label_regex, "", lines[i + 1])
lines[i + 1] = re.sub(caption_regex, "", lines[i + 1])
if len(lines) > i + 2:
lines[i + 2] = re.sub(label_regex, "", lines[i + 2])
lines[i + 2] = re.sub(caption_regex, "", lines[i + 2])
return label, caption
|
4de2d4dc103a01fdd3505be3078f860079e8a30c
| 42,874 |
def compute_yield(x):
"""
Compute yield as measured with UMIs for a droplet x.
"""
return x["az_total"]*x["nb_hp"]*10.0/x["hp_total"]
|
e2fc9f96b828cd9cd8e9a93bbb874d67d0ce5671
| 42,876 |
def compute_multiples(origin_shape, broadcast_shape):
"""Compute multiples between origin shape with broadcast shape."""
len_gap = len(broadcast_shape) - len(origin_shape)
return broadcast_shape[0:len_gap] + tuple(map(lambda x, y: x // y, broadcast_shape[len_gap:], origin_shape))
|
e3fa23db26988ea1c096491598c81ba587043bee
| 42,878 |
import pkgutil
import encodings
def encoding_exists(encoding):
"""Check if an encoding is available in Python"""
false_positives = set(["aliases"])
found = set(name for imp, name, ispkg in pkgutil.iter_modules(encodings.__path__) if not ispkg)
found.difference_update(false_positives)
if encoding:
if encoding in found:
return True
elif encoding.replace('-', '_') in found:
return True
return False
|
2e5d1bb114a15010523a9ed29636375fe2c6e87e
| 42,880 |
import pickle
def deserialize(src):
"""
The default deserialization method,
can automatically read the version number, so no need to specify
Parameters
----------
src
Returns
-------
"""
return pickle.loads(src)
|
ea75e6ed28296020fffed913c0514cdbf09ecf26
| 42,883 |
def terminate(library, session, degree, job_id):
"""Request a VISA session to terminate normal execution of an operation.
Corresponds to viTerminate function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
degree : None
Not used in this version of the VISA specification.
job_id : VISAJobId
Specifies an operation identifier. If a user passes None as the
job_id value to viTerminate(), a VISA implementation should abort
any calls in the current process executing on the specified vi.
Any call that is terminated this way should return VI_ERROR_ABORT.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viTerminate(session, degree, job_id)
|
ae976515c84cdbae36623483f7d0915bee2cfd56
| 42,887 |
def calculate_dynamic_pressure(rho, TAS):
"""Calculates the dynamic pressure.
Parameters
----------
rho : float
Air density (kg/m³).
TAS : float
True Air Speed (m/s).
Returns
-------
q_inf : float
Dynamic pressure. (Pa)
Notes
-----
$$ q_{inf} = 1/2 · rho · TAS² $$
"""
return 0.5 * rho * TAS ** 2
|
f4f1a8f389f59a9fae129e616ba8b1de9f44472c
| 42,891 |
def parse_hostnames_from_ingresses(ingress_list):
"""
This function parses a list of Ingress objects into a map of hostname=>address
"""
hostnames = {}
for ingress in ingress_list:
rules = ingress.spec.rules
if ingress.status.load_balancer.ingress is None:
continue
address = ingress.status.load_balancer.ingress[0].ip
for rule in rules:
host = rule.host
hostnames[host] = address
return hostnames
|
16bdd826f5a41af19f5d1ada61b7738c3f16c91a
| 42,896 |
from pathlib import Path
import re
def get_min_ver(dependency: str) -> str:
"""Retrieve version of `dependency` from setup.py, raise if not found."""
setup_py = Path(__file__).parent.joinpath("../../../setup.py")
with open(setup_py, "r") as setup_file:
for line in setup_file.readlines():
min_ver = re.findall(fr'"{dependency}~=([0-9]+(\.[0-9]+){{,2}})"', line)
if min_ver:
return min_ver[0][0]
else:
raise RuntimeError(f"Cannot find {dependency} dependency in setup.py")
|
5be3214783391962da4b71d75ae7352d518a1232
| 42,899 |
def init(module, weight_init, bias_init, gain=1):
"""
Parameters
----------
module : nn.Module
nn.Module to initialize.
weight_init : func
Function to initialize module weights.
bias_init : func
Function to initialize module biases.
Returns
-------
module : nn.Module
Initialized module
"""
weight_init(module.weight.data, gain=gain)
weight_init(module.weight.data)
bias_init(module.bias.data)
return module
|
94c66b98ff26591a33ed17980108706658e6c091
| 42,901 |
def convert_ftp_url(url):
"""Convert FTP to HTTPS URLs."""
return url.replace('ftp://', 'https://', 1)
|
7088d0f9f802cbfdeaa85c77d80f3ac41f33c1d1
| 42,903 |
import warnings
def _fix_auth(auth, username=None, password=None, verify=None, cert=None):
"""Updates auth from deprecated parameters username, password, verify and cert."""
if any(p is not None for p in (username, password, verify, cert)):
message = 'The use of "username", "password", "verify", and "cert" is deprecated. ' + \
'Please use the "auth" keyword during class instantiation. ' + \
'These keywords will be removed in a future release.'
warnings.warn(message, DeprecationWarning)
if username is not None:
auth.username = username
if password is not None:
auth.password = password
if verify is not None:
auth.verify = verify
if cert is not None:
auth.cert = cert
return auth
|
ecd28033279973482ec981ad23041929325ba2f6
| 42,905 |
def postproc(maps):
"""Generate PD, R1, R2* (and MTsat) volumes from log-parameters
Parameters
----------
maps : ParameterMaps
Returns
-------
pd : ParameterMap
r1 : ParameterMap
r2s : ParameterMap
mt : ParameterMap, optional
"""
maps.r1.volume = maps.r1.fdata().exp_()
maps.r1.name = 'R1'
maps.r1.unit = '1/s'
maps.r2s.volume = maps.r2s.fdata().exp_()
maps.r2s.name = 'R2*'
maps.r2s.unit = '1/s'
maps.pd.volume = maps.pd.fdata().exp_()
maps.r2s.name = 'PD'
maps.r2s.unit = 'a.u.'
if hasattr(maps, 'mt'):
maps.mt.volume = maps.mt.fdata().neg_().exp_()
maps.mt.volume += 1
maps.mt.volume = maps.mt.fdata().reciprocal_()
maps.mt.volume *= 100
maps.mt.name = 'MTsat'
maps.mt.unit = 'p.u.'
return maps.pd, maps.r1, maps.r2s, maps.mt
return maps.pd, maps.r1, maps.r2s
|
db16ec87e2400e7a627f23cc3f89a982c6a3ba66
| 42,910 |
def join_dict(keys, values):
"""
Create a dictionary from a list of
keys and values having equal lengths
"""
if len(keys) == len(values):
adict = dict(zip(keys, values))
return adict
else:
print('Error: Attempting to create a dictionary from '
'a key and value list of unequal length')
return -1
|
8b0297b85cdd3bf07544f954ac21d1e0e6328a0f
| 42,911 |
from typing import List
def get_whitespace_operations(from_sequence: str, to_sequence: str) -> List[int]:
"""
Get the repair sequence that turns from_sequence into to_sequence (after applying the repair_whitespace function)
:param from_sequence: sequence that the returned repair tokens should be applied to to get the to_sequence
:param to_sequence: sequence that should result from applying the whitespace operations to the from_sequence
:return: list of repair tokens
"""
assert from_sequence.replace(" ", "") == to_sequence.replace(" ", ""), \
f"make sure from_sequence and to_sequence only differ in whitespaces:\n{from_sequence}\n{to_sequence}"
from_sequence_ptr = 0
to_sequence_ptr = 0
repair_tokens = []
while from_sequence_ptr < len(from_sequence): # and to_sequence_ptr < len(to_sequence):
from_char = from_sequence[from_sequence_ptr]
to_char = to_sequence[to_sequence_ptr] if to_sequence_ptr < len(to_sequence) else ""
if from_char == to_char:
repair_tokens.append(0)
from_sequence_ptr += 1
to_sequence_ptr += 1
elif to_char == " ":
repair_tokens.append(1)
from_sequence_ptr += 1
to_sequence_ptr += 2
elif from_char == " ":
repair_tokens.append(2)
from_sequence_ptr += 1
else:
raise ValueError("should not happen")
assert len(repair_tokens) == len(from_sequence), \
f"{''.join(str(r) for r in repair_tokens)}\n'{from_sequence}'\n'{to_sequence}'"
return repair_tokens
|
abd51f6d21ca3daded25d9bc0fb389c7c6133ae3
| 42,912 |
def batch_norm(inputs,
activation_fn=None,
normalizer_fn=None,
normalizer_params=None):
"""Batch normalization layer compatible with the classic conv. API.
Simpler to use with arg. scopes.
"""
outputs = inputs
# BN...
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
|
857c8a75c998c60478008b9db877bd4a8f974cb2
| 42,918 |
def sanitize(address):
"""
Sanitize the location field if needed.
:param str address: address to sanitize
"""
# Remove 'block of ' from the address.
addr = address.lower()
addr = addr.replace('block of ', '')
addr = addr.replace('block ', '')
return addr
|
e740a2cf9e12c4c8befed6a5efff1a1a672b4a24
| 42,921 |
def paint(width, height, performance):
"""Calculates how many paint does one need
for given area
@param width: area's width
@param height: area's height
@param performance: paint performance/m^2"""
area = width * height
return area / performance
|
02243f92ab5b3f714bb94f489b2b8e6e49f6c4f0
| 42,925 |
import torch
def decode(box_p, priors):
"""
Decode predicted bbox coordinates using the same scheme
employed by Yolov2: https://arxiv.org/pdf/1612.08242.pdf
b_x = (sigmoid(pred_x) - .5) / conv_w + prior_x
b_y = (sigmoid(pred_y) - .5) / conv_h + prior_y
b_w = prior_w * exp(loc_w)
b_h = prior_h * exp(loc_h)
Note that loc is inputed as [(s(x)-.5)/conv_w, (s(y)-.5)/conv_h, w, h]
while priors are inputed as [x, y, w, h] where each coordinate
is relative to size of the image (even sigmoid(x)). We do this
in the network by dividing by the 'cell size', which is just
the size of the convouts.
Also note that prior_x and prior_y are center coordinates which
is why we have to subtract .5 from sigmoid(pred_x and pred_y).
"""
variances = [0.1, 0.2]
boxes = torch.cat((priors[:, :2] + box_p[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(box_p[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
|
28c4909e7207cb813e7622d3313574c42da36fe9
| 42,926 |
def cross(environment, book, row, sheet_source, column_source, column_key):
"""
Returns a single value from a column from a different dataset, matching by the key.
"""
a = book.sheets[sheet_source]
return environment.copy(a.get(**{column_key: row[column_key]})[column_source])
|
384fe03dec39de6e2f7bec51ed2b72c6a9555e78
| 42,930 |
from typing import Dict
from pathlib import Path
def _certificate_check(path) -> Dict[str, Path]:
"""
Check if the right certificates are at the given path.
"""
certi_path = {
"ca": "ca.crt",
"crt": "client.crt",
"key": "client.key",
}
r_paths: Dict[str, Path] = {}
for k, f in certi_path.items():
r_paths[k] = path / f
if not r_paths[k].exists():
raise FileNotFoundError(f"'{f}' was not found in at: {path}")
return r_paths
|
b38dc126715feca3ac982a6ea34314ef5fc10a25
| 42,934 |
def map_unconstrained_range(
x: float, in_min: float, in_max: float, out_min: float, out_max: float
) -> float:
"""
Maps a number from one range to another. Somewhat similar to the Arduino
:attr:`map()` function, but returns a floating point result, and
does not constrain the output value to be between :attr:`out_min` and
:attr:`out_max`. If :attr:`in_min` is greater than :attr:`in_max` or
:attr:`out_min` is greater than :attr:`out_max`, the corresponding range
is reversed, allowing, for example, mapping a range of 0-10 to 50-0.
See also :py:func:`map_range`
.. code-block::
from adafruit_simplemath import map_unconstrained_range
celsius = -20
fahrenheit = map_unconstrained_range(celsius, 0, 100, 32, 212)
print(celsius, "degress Celsius =", fahrenheit, "degrees Fahrenheit")
:param float x: Value to convert
:param float in_min: Start value of input range.
:param float in_max: End value of input range.
:param float out_min: Start value of output range.
:param float out_max: End value of output range.
:return: Returns value mapped to new range.
:rtype: float
"""
in_range = in_max - in_min
in_delta = x - in_min
if in_range != 0:
mapped = in_delta / in_range
elif in_delta != 0:
mapped = in_delta
else:
mapped = 0.5
mapped *= out_max - out_min
mapped += out_min
return mapped
|
cb07a0c71bd89f574faccd5bcd197d82558f4e6a
| 42,936 |
def find_unique_value_error(exc_detail):
"""Find unique value error in exception details."""
for field, errors in exc_detail.items(): # noqa: B007
for error in errors:
if error.code == 'unique':
return error
return None
|
aa25202b311e03e19c842174bc641ad1236db920
| 42,940 |
def split_and_strip(string, delimiter):
"""
Return a list of stripped strings after splitting `string` by `delimiter`.
Parameters
----------
string : str
The string to split and strip.
delimiter : str
The string to split by.
Returns
-------
list[str]
The list of strings that are stripped after splitting by `delimiter`.
"""
# begin solution
return [s.strip() for s in string.split(delimiter)]
# end solution
|
83e08c2a243aa01b5e0b670bf7ed81937b16de8f
| 42,942 |
import requests
def get_word_count(title, base_url="https://klexikon.zum.de/api.php") -> int:
"""
Return the word count of an Klexikon article.
:param title:
:param base_url
:return:
"""
params = {
"action": "query",
"format": "json",
"list": "search",
"srlimit": 5,
"srsearch": title
}
try:
res = requests.get(url=base_url, params=params)
# print(len(res.json()['query']['search']))
article = res.json()['query']['search'][0]
return article['wordcount']
except IndexError: # Likely due to incorrect search results.
return 0
|
d70dccc868cb5bf8837b43f05ac55f648ece6882
| 42,944 |
import math
def f0(system):
"""Return the natural frequency of the system."""
c1,c2,r1,r2 = system
fn = 1 / (2 * math.pi * math.sqrt(r1 * c1 * r2 * c2))
return fn
|
018393c0eea1da35a22c45130ee61743e2535070
| 42,945 |
def get_last_data_idx(productions):
"""
Find index of the last production
:param productions: list of 24 production dict objects
:return: (int) index of the newest data or -1 if no data (empty day)
"""
for i in range(len(productions)):
if productions[i]['total'] < 1000:
return i - 1
return len(productions) - 1
|
d06f2b8e6ff4c94931f57c66e81a9198bdd2baa9
| 42,950 |
import re
def split_keyword(keyword):
"""Split a keyword in multiple ones on any non-alphanumeric character
:param string keyword: keyword
:return: keywords
:rtype: set
"""
split = set(re.findall(r'\w+', keyword))
return split
|
015eb669f8ca309c3abe139d6dbb20d0b9022ae8
| 42,951 |
def valid_history(history, expected_count, expected_messages=None):
"""Checks if history is valid"""
expected_messages = expected_messages or []
if len(history) != expected_count:
return False
for i, value in enumerate(expected_messages):
if history[i]["type"] != value:
return False
return True
|
3eef39ab8877236a22697b907703e4ee30a2685c
| 42,953 |
def _get_main_opset_version(model):
"""
Returns the main opset version.
"""
for op in model.opset_import:
if op.domain == '' or op.domain == 'ai.onnx':
return op.version
return None
|
ba4717b473d08ae40840c681c55ed4b28e28dea5
| 42,955 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.