content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def get_words_from_tuples(examples):
"""
You may find this useful for testing on your development data.
params:
examples - a list of tuples in the format [[(token, label), (token, label)...], ....]
return:
a list of lists of tokens
"""
return [[t[0] for t in example] for example in examples]
|
fceca9e34d59338a4dd612a6b61826c328616643
| 39,955 |
def predict_test_data(rf, test_features):
""" Predicts outcomes from test data
Parameters
----------
rf : model
Random forest model built from train data, returned from create_random_forest()
test_features : numpy array
List of fraction of "x variables" used to test the model, returned from split_train_test()
Returns
-------
predictions : numpy array
List of predicted "y values" when using the test data (test_features) and the random forest model (rf)
"""
predictions = rf.predict(test_features)
return predictions
|
86314bf6dcba87d1fb8fe47b2bbca2cc8091fb96
| 39,958 |
def next_batch_for_narx(dataset, window_size, n_steps, batch_size):
"""Returns a batch in a narx format: x0, y, inputs.
Args:
dataset (TSDataset): time series dataset.
window_size (int): number of steps to include in x0.
n_steps (int): number of steps to include in y and u.
batch_size (int): number of samples in the batch.
Returns:
(x0, y, u): batch.
"""
def format_batch(batch, window_size, n_steps):
_y = [batch['y'][i, :, :]
for i in range(batch['y'].shape[0])]
x0 = _y[:window_size]
y = _y[window_size:]
# dy = [_y[i] - _y[i - 1] for i in range(window_size, len(_y))]
u = [batch['inputs'][i, :, :]
for i in range(window_size - 1, batch['inputs'].shape[0] - 1)]
assert (len(x0) == window_size and
len(y) == n_steps and
len(u) == n_steps), \
'len of x0, y or u do not coincide with provided '\
'window_size and n_steps'
return x0, y, u
batch = dataset.next_batch(window_size=window_size + n_steps,
batch_size=batch_size)
x0, y, u = format_batch(batch, window_size, n_steps)
return x0, y, u
|
724b3381b57a37d0f74a23f0dd624278968809a9
| 39,961 |
def _sourcenames(short=False):
"""Return a list with the source names.
:param short: True for shorter names, defaults to False
:type short: bool, optional
:return: Source names.
:rtype: dict [list [str]]
"""
if short is False:
sources = ["psicov", "ccmpred", "deepmetapsicov"]
else:
sources = ["psicov", "ccmpred", "dmp"]
return sources
|
94144f783e3e6de83e2522a0f4f070b53e69b913
| 39,963 |
def replace_short_forms(note: str, long_forms: list, span: list) -> str:
"""
Given a list of long forms and the span of the short forms, replace
the short form by long form in note using the string indeces.
"""
note_replaced: str = note
# Iterates in reverse order, otherwise we would have to change the span indeces
for long_form, index_span in zip(long_forms[::-1], span[::-1]):
note_replaced = note_replaced[: index_span[0]] + long_form + note_replaced[index_span[1]:]
return note_replaced
|
ae2f1855ecab940ea6ef265648d806a7b71acfe6
| 39,970 |
def decode_image(layer_list):
"""
Takes list of image layer strings as input.
Produces an output by looking through each layer until the following:
If a digit is 0 that means that pixel is black.
If a digit is 1 that means the pixel is white.
If a digit is 2 that means the pixel is transparent.
The first layer with a 0 or 1 pixel will dictate the color of that pixel.
"""
decoded = []
for i in range(len(layer_list[0])):
layer = 0
while True:
if layer_list[layer][i] == '0':
decoded.append('0')
break
elif layer_list[layer][i] == '1':
decoded.append('1')
break
layer += 1
return decoded
|
c0c0ef4a4831f2399174957fa504496945008c91
| 39,973 |
import string
import random
def random_str(length: int = 20, has_num: bool = False) -> str:
"""
generate a random str and len == length
:param length: the random str`length, default=20
:param has_num: has int?
:return: str
"""
all_char = string.ascii_lowercase + string.ascii_uppercase
if has_num:
all_char += string.digits
lis = list()
for _ in range(length):
lis.extend(random.choices(all_char, k=1))
return ''.join(lis)
|
6e7aff8f6a256652fd067124b6566c4f649008af
| 39,974 |
def batch_list(inputlist, batch_size):
"""
Returns the inputlist split into batches of maximal length batch_size.
Each element in the returned list (i.e. each batch) is itself a list.
"""
list_of_batches = [inputlist[ii: ii+batch_size]
for ii in range(0, len(inputlist), batch_size)]
return list_of_batches
|
b1de33c663e8d17645eeabd190a6f17442795a8d
| 39,975 |
def clear_all_none(args):
""" Strip all None from a key/value structure, including keys which nested them
Args:
args (dict): key/value pairs which may contain None values
Returns:
dict: structure with all None removed and no empty values
Example:
>>> clear_all_none({'a': 1, 'b': {'c': None}})
{'a': 1}
"""
return dict((x, y) for x, y in [
(k, (v if not isinstance(v, dict) else clear_all_none(v)))
for k, v in args.items() if v is not None ] if y != {})
|
8416266e99a9e03158ab024140f93a1ec3597e9c
| 39,979 |
def dYdx(Ys, h):
"""
Return the first derivative of Ys w.r.t x
"""
first = 0.0 * Ys
# skip the end points
for i in range(1, len(Ys) - 1):
first[i] = (Ys[i + 1] - Ys[i - 1]) / (2.0 * h)
return first
|
2fa4e0929ad5c254cd3a391aaebb8657ea5d4b78
| 39,984 |
def read_experiment_lines(readme_lines, start_marker_a="TGA",
start_marker_b="###", end_marker="###"):
"""
This function iterates over a list of strings and searches for information
about a desired experiment. The information is found by looking for
sub-strings (markers) that encapsulate the desired information. A
shortened list is returned containing the experiment information.
:param readme_lines: List of string containing text file content
:param start_marker_a: Marker to find the desired experiment
:param start_marker_b: Additional marker to make sure the correct
line is chosen
:param end_marker: Marker to indicate when to stop collecting lines.
:return: list, containing lines of string related to a desired experiment
"""
# Initialise collection of experiment description.
experiment_lines = list()
# Flag to control what lines to collect.
collect_entry = False
# Iterate over all the lines of the file content.
for line in readme_lines:
# Skip empty lines.
if line == '':
continue
if end_marker in line:
if "####" not in line:
# Stop collecting lines after the TGA experiment
# description concluded and a new section starts.
collect_entry = False
if start_marker_a in line and start_marker_b in line:
# Allow collection of lines.
collect_entry = True
if collect_entry is True:
# Collect lines.
experiment_lines.append(line)
return experiment_lines
|
e80cd6213ba703f970db8f9b6b42704c179c4fdd
| 39,985 |
def create_square(lat, lon, radius=0.0001):
"""
Create the a geojson square polygon
Args:
lat: the center latitude for the polygon
lon: the center longitude for the polygon
radius (int): half of the length of the edge of the square
Returns:
list: a list of lat/lon points defining a square polygon
"""
return [
[round(lon + radius, 7), round(lat + radius, 7)],
[round(lon + radius, 7), round(lat - radius, 7)],
[round(lon - radius, 7), round(lat - radius, 7)],
[round(lon - radius, 7), round(lat + radius, 7)]
]
|
5dac7311997ea812d59b7fce9c3ab21c92178f98
| 39,986 |
def filter_stream(streams, excludes):
"""
Uses a list of keywords to remove sensors or streams from the list returned by OOI Net.
:param streams: list of sensor or streams returned from OOI Net
:param excludes: list of keywords to use in pruning the list
:return: a cleaned, pruned list
"""
clean = []
for stream in streams:
if not any(sub in stream for sub in excludes):
clean.append(stream)
return clean
|
d7c7278714cb80541a2aa29ab1c111256ef6b618
| 39,987 |
import math
def angle_to_comp(n, deg=False):
"""Returns the complex number with a magnitude of 1 that forms an angle of n with the real axis
n: angle as float \\
deg: bool (if ```True```, n is taken to be in degrees, if ```False```, n is taken to be in radians)"""
if deg:
n = math.radians(n)
return complex(math.cos(n), math.sin(n))
|
a6e873b7d3bf382d3ea7077bca0a89d436742336
| 39,988 |
def error_j(Dj,Pap,Pdc,PolError,exp_loss_jt):
"""
Calculates the conditional probability for a pulse of intensity mu_j
to cause an error, after sifting, in the time slot t.
Defined as e_k in Sec. IV of [1].
Parameters
----------
Dj : float, array
Expected detection rate.
Pap : float
Probability of an afterpulse event.
Pdc : float
Dark count probability.
PolError : float
Errors in polarisation basis.
exp_loss_jt : float, array
Loss, per intensity per time slot, decay function.
Returns
-------
float, array
Error rate per intensity per time slot.
"""
return Pdc + (0.5*Pap*Dj) + PolError*(1 - exp_loss_jt)
|
b3a8059154ef6be339c833f4955b1f78d3a89f77
| 39,992 |
import random
def shuffled(seq):
"""Returns a list with the elements in seq, shuffled.
"""
lst = list(seq)
random.shuffle(lst)
return lst
|
9e80483084b290aace080269d697accffb649755
| 39,993 |
import hashlib
def md5_hash(string):
""" Calculates the MD5 sum of a string
"""
m = hashlib.md5()
m.update(string.encode('utf-8'))
return m.hexdigest()
|
cf89d1c83e3fa1382c2f883627a774bfc51475e1
| 39,996 |
import functools
def tile_flatten_sources(tile):
"""
Extract sources from tile as a flat list of Dataset objects,
this removes any grouping that might have been applied to tile sources
"""
return functools.reduce(list.__add__, [list(a.item()) for a in tile.sources])
|
d598c6ffc4c6ed061f2093dd2c743cb7edd18da0
| 40,002 |
def resolve_checks(names, all_checks):
"""Returns a set of resolved check names.
Resolving a check name involves expanding tag references (e.g., '@tag') with
all the checks that contain the given tag.
names should be a sequence of strings.
all_checks should be a sequence of check classes/instances.
"""
resolved = set()
for name in names:
if name.startswith("@"):
for check in all_checks:
if name[1:] in check.tags:
resolved.add(check.name)
else:
resolved.add(name)
return resolved
|
f1e1e7a880c2626e8086a90fffce5aac88e38a74
| 40,006 |
def parse_device_env(data):
"""
parse something like this:
{meo}=>env list
_SW_FLAG=E1
_ETHERNET=SWITCH
_COMPANY_NAME=THOMSON
_COMPANY_URL=http://www.thomson.net
_PROD_NAME=Thomson TG
_BRAND_NAME=Thomson
_PROD_URL=http://www.thomson-broadband.com
_PROD_DESCRIPTION=DSL Internet Gateway Device
_PROD_NUMBER=784n
_SSID_SERIAL_PREFIX=Thomson
_BOARD_SERIAL_NBR=134TAP7
_PROD_SERIAL_NBR=CP1430NTAP7
_FII=9.5.16.16.0
...
...
...
WE WILL SPLIT EACH LINE in the '=' Character.
- left part will be the key in our dict.
- right part will be the value
E.G.
line = "_PROD_SERIAL_NBR=CP1430NTAP7"
splitted_line = ["_PROD_SERIAL_NBR", "CP1430NTAP7"]
key = "_PROD_SERIAL_NBR"
val = "CP1430NTAP7"
:param data: <list> data for parsing
:return:<dictionary> Dynamic Structured Data
"""
output = {}
data = data[1:-1]
for line in data:
#EXAMPLE: line: '\nSTS_TelnetSshFtp_Fix=Enabled'
new_line = line.split("\n")[1].split("=")
output[new_line[0]] = new_line[1]
return output
|
9c5e3b89adf50944352273f12bfb0561f3ec6d43
| 40,009 |
def getColData(data, colnums = None, colnames = None, copy=False):
"""
Get data from a DataFrame column(s)
Inputs:
> data: The DataFrame
> colnums: The number(s) of the column(s)
> colnames: The name(s) of the column(s)
> copy (False by default): should we copy the data?
Output:
> DataFrame of the specified column data
"""
if colnums is not None:
if copy is True:
subdata = data.iloc[:,colnums].copy()
else:
subdata = data.iloc[:,colnums]
elif colnames is not None:
if copy is True:
subdata = data[colnames].copy()
else:
subdata = data[colnames]
else:
print("Not doing anything with the data in getColData")
return data
return subdata
|
a215142f19327c26bebc96be5063cbcd54dae0f6
| 40,015 |
def as_sender_tuple(sender):
"""build a sender tuple
>>> as_sender_tuple('joe@testco.com')
('joe@testco.com', 'joe@testco.com')
>>> as_sender_tuple(('joe@testco.com', 'joe@testco.com'))
('joe@testco.com', 'joe@testco.com')
>>> as_sender_tuple(['joe@testco.com', 'joe@testco.com'])
('joe@testco.com', 'joe@testco.com')
"""
if isinstance(sender, str):
return sender, sender
return tuple(sender)
|
59e48d4a0e4fab84a80a7891b184745db810e21c
| 40,016 |
def listify_multiline_string(string):
"""
Return a list constructed by splitting the given multiline string,
stripping whitespace, and filtering out empty values.
:param string: The multiline string to convert into a list.
:return: The resulting list.
"""
result = [i.strip() for i in string.splitlines()]
return filter(None, result)
|
3234b4e07cd8b47c9ca30dc2cd5e866ddee76969
| 40,017 |
def tennis_2d_ip(env_obs, subtask_obs):
"""Return True if the ball bounces off the floor for the 2nd time."""
return env_obs["achieved_goal"][2] == 1.
|
84f82a0c977286a9de218f1b96a17212b79b53a2
| 40,019 |
def resolve_slice(slice_, n):
"""Return a bounded slice given length `n`."""
return slice(*slice_.indices(n))
|
0f96737d05a9cf3845f9bb267e5cf75696fc42c1
| 40,021 |
def nid_to_slurm_nid_name(nid):
"""
Return string with slurm nid name for given nid number
"""
return "nid%06d" % nid
|
7db01a6a8e27565b58d697195229a1cfe9626c83
| 40,024 |
def validate_subnet_mask(subnet_mask):
"""Checks that the argument is a valid subnet mask.
:param str subnet_mask: The subnet mask to check.
:return: True if the subnet mask is valid; false if not.
:rtype: bool
:raises ValueError: if the subnet mask is invalid.
.. seealso::
https://codereview.stackexchange.com/questions/209243/verify-a-subnet-mask-for-validity-in-python
"""
if subnet_mask is not None and subnet_mask.strip():
subnet_mask = subnet_mask.strip()
a, b, c, d = (int(octet) for octet in subnet_mask.split("."))
mask = a << 24 | b << 16 | c << 8 | d
if mask < 1:
raise ValueError("Invalid subnet mask: {0}".format(subnet_mask))
else:
# Count the number of consecutive 0 bits at the right.
# https://wiki.python.org/moin/BitManipulation#lowestSet.28.29
m = mask & -mask
right0bits = -1
while m:
m >>= 1
right0bits += 1
# Verify that all the bits to the left are 1"s
if mask | ((1 << right0bits) - 1) != 0xffffffff:
raise ValueError("Invalid subnet mask: {0}".format(subnet_mask))
return True
else:
raise ValueError("Invalid subnet mask: {0}.".format(subnet_mask))
|
64c4af917125183700df8849c6aaea058d89977d
| 40,025 |
import math
def divisors(n: int) -> list[int]:
"""Find all positive integer divisors of a given positive integer.
Parameters
----------
n : int
Number whose divisors are to be found.
Returns
-------
list[int]
List of divisors of :math:`n`.
"""
if n == 1:
return [1]
d = [1, n]
sqrt = math.ceil(math.sqrt(n))
for k in range(2, sqrt):
if n % k == 0:
d.extend([k, n // k])
if n == sqrt ** 2 and sqrt not in d:
d.append(sqrt)
return sorted(d)
|
fda406fd51d81119b9e42c04abf51eb2f3f534b1
| 40,026 |
import torch
import typing
def unvectorize(vector: torch.Tensor, reference_state_dict: typing.Dict[str, torch.Tensor]):
"""Convert a vector back into a state dict with the same shapes as reference state_dict."""
if len(vector.shape) > 1: raise ValueError('vector has more than one dimension.')
state_dict = {}
for k in sorted(reference_state_dict.keys()):
if vector.nelement() == 0: raise ValueError('Ran out of values.')
size, shape = reference_state_dict[k].nelement(), reference_state_dict[k].shape
this, vector = vector[:size], vector[size:]
state_dict[k] = this.reshape(shape)
if vector.nelement() > 0: raise ValueError('Excess values.')
return state_dict
|
8ad072c18dd4af9dbb41034d010f0f3ce1d78602
| 40,030 |
from typing import Awaitable
import asyncio
async def delay_task(delay: float, task: Awaitable):
"""Wait a given amount of time before executing an awaitable."""
await asyncio.sleep(delay)
return await task
|
90fb999293528043faf5a3841a8740436f337cd8
| 40,031 |
def build_user_agent(octavia_version: str, workspace_id: str) -> str:
"""Build user-agent for the API client according to octavia version and workspace id.
Args:
octavia_version (str): Current octavia version.
workspace_id (str): Current workspace id.
Returns:
str: the user-agent string.
"""
return f"octavia-cli/{octavia_version}/{workspace_id}"
|
4226a93a4a06a744680ac6836785d01e81d1dfbc
| 40,032 |
def extended_gcd(a, b):
"""
We know:
ax + by = gcd(a, b)
This function returns gcd(a,b), x , y
"""
if a == 0:
return b, 0, 1
gcd, x_, y_ = extended_gcd(b % a, a)
x = y_ - (b // a) * x_
y = x_
return gcd, x, y
|
f3e2b0a7c9001cac23586397349859ab02e4507d
| 40,035 |
def label_from_id(id_string):
"""
Returns a label string constructed from the suppliued Id string
Underscore characters in the Id are replaced by spaces.
The first character may be capirtalized.
>>> label_from_id("entity_id") == "Entity id"
True
"""
temp = id_string.replace('_', ' ').strip()
label = temp[0].upper() + temp[1:]
return label
|
9b52abdce169d26412a69585281ebadcff7cb0c2
| 40,036 |
import pytz
def get_obj_type(obj):
"""Determines the string representation of object's type."""
# Get default type value.
obj_type = type(obj).__name__
# Special handling for certain types.
if obj_type == 'NoneType':
obj_type = 'null'
elif isinstance(obj, pytz.BaseTzInfo):
obj_type = 'pytz_timezone'
return obj_type
|
33537083991b4c8968b4dbec359295b9841ce29f
| 40,037 |
def read_taxdump(nodes_fp, names_fp=None):
"""Read NCBI taxdump.
Parameters
----------
nodes_fp : str
file path to NCBI nodes.dmp
names_fp : str, optional
file path to NCBI names.dmp
Returns
-------
dict of dict
taxid : {
'parent' : str
parent taxid
'rank' : str
taxonomic rank
'name' : str
taxon name, empty if names_fp is None
'children' : set of str
child taxids
}
"""
taxdump = {}
# format of nodes.dmp: taxid | parent taxid | rank | more info...
with open(nodes_fp, 'r') as f:
for line in f:
x = line.rstrip('\r\n').replace('\t|', '').split('\t')
taxdump[x[0]] = {'parent': x[1], 'rank': x[2], 'name': '',
'children': set()}
# format of names.dmp: taxid | name | unique name | name class |
if names_fp is not None:
with open(names_fp, 'r') as f:
for line in f:
x = line.rstrip('\r\n').replace('\t|', '').split('\t')
if x[3] == 'scientific name':
taxdump[x[0]]['name'] = x[1]
# identify children taxids
for tid in taxdump:
pid = taxdump[tid]['parent']
if tid != pid: # skip root whose taxid equals its parent
taxdump[pid]['children'].add(tid)
return taxdump
|
e0d7e7f8150bb2f13fbf0f586117bf69f33cb9ef
| 40,040 |
def _create_group_to_col_position(column_groups):
"""Get mapping from column groups to column positions.
Args:
column_names (list): The column groups to display in the estimatino table.
Returns:
group_to_col_index(dict): The mapping from column group titles to column
positions.
"""
if column_groups is not None:
group_to_col_index = {group: [] for group in list(set(column_groups))}
for i, group in enumerate(column_groups):
group_to_col_index[group].append(i)
else:
group_to_col_index = None
return group_to_col_index
|
4ff10a8b3076f940dffd4ea81ea25509b61565be
| 40,045 |
import math
def law_of_sines(a, b, c):
"""
Return the angle of the corner opposite to side c in a triangle given by its 3 sides a, b and c (Law of sines)
"""
return math.degrees(math.acos((c**2 - b**2 - a**2)/(-2.0 * a * b)))
|
fa35c3e5e3df025701644049ba8c6eec8a5042ef
| 40,055 |
def _sanitize_filename(filename):
"""
Get a filename that lacks the / character (so it doesn't express a path by
accident) and also lacks spaces (just for tab-completion convenience).
"""
return filename.replace('/', '_').replace(' ', '_')
|
ce19efdb439762d305987447651b8ffd9c6aaff8
| 40,056 |
def ascii_chars_to_image(ascii_chars, width=250):
"""Function to take a string of ASCII chars, and append a new line character after X (width) of pixels.
This essentially translates the ASCII string to an image.
"""
# join a newline character after every X amount of pixels (ie if width is 100, adds a newline char every 100 chars)
return "\n".join(
ascii_chars[i : i + width] for i in range(0, len(ascii_chars), width)
)
|
f94403a7e03ded22be107dc0446ffe5aa671bf0a
| 40,060 |
def nice_price(price):
""" Returns the price in nice numbers with k/m/b on the end as a string """
if price < 1000:
return f'{price:,.0f} gp'
elif price < 1000000:
return f'{price / 1000:,.1f} K gp'
elif price < 1000000000:
return f'{price / 1000000:,.1f} M gp'
else:
return f'{price / 1000000000:,.2f} B gp'
|
fe1ae999321808b7cc819db2f71433509143802c
| 40,067 |
def title(text):
"""Convert to title case."""
return text.title()
|
4e66499cb607f5656f2463b28ed84ba40cb6039a
| 40,069 |
def byLength(word1, word2):
"""
Compars two strings by their length
Returns:
Negative if word2 > word1
Positive if word1 > word2
Zero if word1 == word 2
"""
return len(word1) - len(word2)
|
98221c3dd8d308eb9bd2055eef8f345208d0b166
| 40,074 |
def invert(array):
"""return a dictionary mapping array values to arrays of indices
containing those values
"""
inverted_array = {}
for i, val in enumerate(array):
inverted_array.setdefault(val, []).append(i)
return inverted_array
|
ed04b2cf90d0ec07d96f4153f6a793c780397eb9
| 40,075 |
def hash_distance(left_hash, right_hash):
"""Compute the hamming distance between two hashes"""
if len(left_hash) != len(right_hash):
raise ValueError('Hamming distance requires two strings of equal length')
return sum(map(lambda x: 0 if x[0] == x[1] else 1, zip(left_hash, right_hash)))
|
3061fd1f22e2c56240656256508a009b0f1d4fe5
| 40,076 |
def reduce_dict(dictionary, keys):
"""Returns a dictionary containing only the input keys"""
return {key: (dictionary[key] if key in dictionary else []) for key in keys}
|
be075ac04376d1922c70ae8de37fa2843a06ba12
| 40,078 |
def _node_size(node):
"""Computes `node`'s size."""
if node:
return 1 + _node_size(node.left) + _node_size(node.right)
else:
return 0
|
95c25a3380cfe880f27e67be4bc0bfe5e7e0682c
| 40,079 |
import re
def parse_ksql_query(query):
"""Read the parameters of the provided KSQL query.
Args:
query (str): the query given as string.
Returns:
(str, str, str, str): a tuple that contains: the name of the table, the name of
the column that holds the metrics value, the name of the column that is used
for comparison with the metric name, and the name to compare to.
"""
query_pattern = "SELECT (\\w+) FROM (\\w+) WHERE (\\w+) *= *'(.+)';"
match_group = re.match(query_pattern, query, re.IGNORECASE)
assert match_group is not None, f"The query {query!r} has an invalid format."
value_column, table, comparison_column, metric_name = match_group.groups()
message = (
"The column for the metric names and the one for the metrics values"
" cannot be the same."
)
assert value_column != comparison_column, message
return table, value_column, comparison_column, metric_name
|
2207428b49650b7f5a843673a4ee242886bab781
| 40,080 |
import math
def _on_base(base_run_id) -> str:
"""
Exists Runner
:param base_run_id: retrosheet base_run_oid
:return: '1' or '0'(1:True, 0:False)
"""
if type(base_run_id) == float and math.isnan(base_run_id):
return '0'
elif type(base_run_id) == str and len(base_run_id) > 0:
return '1'
return '0'
|
13116740f95a1b26c72e4861a3dd6f611e8e3cb6
| 40,081 |
def clean_line(string, stop_char):
"""
# clean_line :: String char -> String
Receives a String and a 'stop_char'.
Scans the string backwards and cuts at the first 'stop_char', returning the new String
ex:
clean_line("this is a # string", '#') --> "this is a "
clean_line("[ X ] > [ V ] # V eh a palavra vazia.", '#') --> "[ X ] > [ V ] "
clean_line("[ X ] > [ V ] # V eh a # palavra vazia.", '#') --> "[ X ] > [ V ] # V eh a "
"""
pos = len(string) - 1
cut_pos = 0
stop_char_found = False
while stop_char_found is False and pos >= 0:
if string[pos] == stop_char:
cut_pos = pos + 1
stop_char_found = True
pos -= 1
return string[:cut_pos]
|
47fb50ca276794b605e5178493afa51ea8155722
| 40,082 |
import pkg_resources
def parse_version(version_string):
"""
Parse string as a verison object for comparison
Example: parse_version('1.9.2') > parse_version('1.9.alpha')
See docs for pkg_resource.parse_version as this is just a wrapper
"""
return pkg_resources.parse_version(version_string)
|
678554ac2095bd2939f634c7c45bddbac86ec3d4
| 40,086 |
def parse_db_arguments(string):
"""Return a list of db arguments parsed from string.
Split string into arguments, strip whitespace from them, and return a list of
the resulting arguments.
"""
arguments = string.split(',')
arguments = [argument.strip() for argument in arguments]
return arguments
|
47dd28393794ddc5ee3edff487f2b80efd84496f
| 40,089 |
def get_grid_coordinates(img_num, grid_size, w, h):
""" given an image number in our sprite, map the coordinates to it in X,Y,W,H format"""
y = int(img_num / grid_size)
x = int(img_num - (y * grid_size))
img_x = x * w
img_y = y * h
return "%s,%s,%s,%s" % (img_x, img_y, w, h)
|
851a0a08cd833cfd498c3d062ada371f07f5e83c
| 40,091 |
def divide_integer_evenly(n, m):
"""Returns a list of `m` integers summing to `n`, with elements as even as
possible. For example:
```
divide_integer_evenly(10, 4) -> [3, 3, 2, 2]
divide_integer_evenly(20, 3) -> [7, 6, 6]
```
"""
lengths = [n // m] * m
for i in range(n - sum(lengths)):
lengths[i] += 1
return lengths
|
90f9ac9533d859834048abcddc4d8acab44c3189
| 40,095 |
import re
def temp_dir(request, tmpdir_factory):
"""
Similar to the pytest built-in tmpdir fixture, but returns a string, and with a less horrible name.
"""
name = re.sub('[\W]', '_', request.node.name)[:30]
return str(tmpdir_factory.mktemp(name, numbered=True))
|
fd4638667b4957c9af3bfae7efd02d5e16afc159
| 40,097 |
def map_to_range(
old_min: float,
old_max: float,
new_min: float,
new_max: float,
value: float,
) -> float:
"""Maps a value from within one range of inputs to within a range of outputs."""
return ((value - old_min) / (old_max - old_min)) * (new_max - new_min) + new_min
|
c46897d0a8cc1bad79d0fd5c06589075d433c0d8
| 40,098 |
def leaf_edges(network):
"""
returns leaf edges of the "compas" network as a list
"""
leaf_ver_lis=network.leaves()
leaf_edg_lis=[]
for key in leaf_ver_lis:
edg=network.connected_edges(key)
leaf_edg_lis.append(edg[0])
return leaf_edg_lis
|
62e28f2d9a4ccb8c948e8eb77282ca8a04c008c2
| 40,102 |
from typing import Union
from typing import List
def is_valid_label(label: Union[str, List[str]]) -> bool:
"""Test whether label has a valid value.
:param label: a phrase label (either a string or a list of strings)
:type label: Union[str, List[str]]
:return: whether the label is valid
:rtype: bool
"""
if isinstance(label, list):
for item in label:
if not isinstance(item, str):
return False
return True
return isinstance(label, str)
|
5042b45c3ae984a5a623f48887bfa7d6084cbcc8
| 40,104 |
import json
def weather(horizon):
"""
Reads json and outputs based on selected paramter
Horizon can be either "all" or an integer between 1 and 90 representing desired timestamp
Eg: http://127.0.0.1:5000/latest/all or http://127.0.0.1:5000/latest/54
Parameters
----------
horizon : string
Horizon can be either "all" or an integer between 1 and 90 representing desired timestamp
Returns
-------
output
Json to output to page
"""
with open(r'.\RESULTS\saved_forecasts_PRD.json', 'r') as jsonfile:
file_data = json.loads(jsonfile.read())
if horizon == "all":
output = json.dumps(file_data)
else:
output = json.dumps(file_data[horizon])
return output
|
9d54cde52f3685475dfe5691dff3ebc6e6584428
| 40,105 |
def get_name(first_name,last_name):
"""Return a full name neatly formatted"""
full_name = first_name + ' '+ last_name
return full_name.title()
|
7b5e1c185e4e22abb91d63a1f92c3b86e75da3ca
| 40,106 |
def is_palindrome(num):
"""
Returns true if num is a palindrome.
"""
return str(num) == str(num)[::-1]
|
490ca1326254e525bcefb21e7822c82ad730962a
| 40,119 |
def _dist(i_a, i_b):
"""
Just a random distance metric used to decide if to compute mutual
information for nearby epochs
:param i_a: information for epoch a
:param i_b: information for epoch b
:return: some notion of distance
"""
d = max(
max(abs(i_a[0] - i_b[0])),
max(abs(i_a[1] - i_b[1])),
)
return d
|
32f158d8f3c79c70e90b8ecc66e0a604afa9f153
| 40,121 |
from typing import List
import random
def gen_ints(a: int, b: int, n: int) -> List[int]:
"""Returns an iterable (currently list) of non-repeating, randomized ints."""
assert a < b, "a must be smaller than b"
return random.sample(range(a, b), n)
|
566da812bacaf4f420401ae72a8471e1ac5e1097
| 40,125 |
def default_path(path):
"""
Converts path to default form (with no slash at the end)
:param path: string - path to convert
:return: string - result path
"""
while path[len(path) - 1] == '/' or path[len(path) - 1] == '\\':
path = path[0:-1]
return path
|
32d050c46b1830b13d7ca6a9a94405d6c053610c
| 40,129 |
from typing import Any
def main(backend, user_messenger, **kwargs) -> Any:
"""Main entry point of the program.
Args:
backend (qiskit.providers.Backend): Backend to submit the circuits to.
user_messenger (qiskit.providers.ibmq.runtime.UserMessenger): Used to communicate with the
program consumer.
kwargs: User inputs.
Returns:
Final result of the program.
"""
return "Done"
|
138ab2cf556fb0a1da08581cc732e201741d877d
| 40,131 |
def may_view_cronjobs_not_logged_in(app, identity, model, permission):
""" Cronjobs are run anonymously from a thread and need to be excluded
from the permission rules as a result.
"""
return True
|
21d6e7f999e94c5aa8b88af6378906e632144e49
| 40,132 |
def unpack_singleton(x):
"""
Return original except when it is a sequence of length 1 in which case return the only element
:param x: a list
:return: the original list or its only element
"""
if len(x) == 1:
return x[0]
else:
return x
|
0bffdcc339c593aafb1f657134da5d67fc538cbf
| 40,136 |
def paren_join(items, sep):
"""Join items by sep with parens around individual items but not the whole."""
return items[0] if len(items) == 1 else "(" + (") " + sep + " (").join(items) + ")"
|
b513413cdd1c47628068bbe6942db11ac713ad47
| 40,137 |
import re
def clean(word):
"""Removes any non A-Z characters and any vowels from a word"""
cleanWord = re.sub('[^A-Za-z]', '', word) #remove any special characters (non A-Z chars)
cleanWord = re.sub('[AaEeIiOoUu]', '', cleanWord) #remove any vowels
return cleanWord
|
2d37f8f474c52009dc7278eacf9cc81535fc9786
| 40,139 |
def filter_hmm_hit_list(hmm_hit_list, e_value_cutoff="1e-25", hmm_coverage=0.3, max_align_overlap=0.5):
"""
Filters HMM gits by E-Value, Coverage and Overlap between hits.
:param hmm_hit_list: List of HMM hit objects.
:param e_value_cutoff: The E-Value cutoff for hits.
:param hmm_coverage: The HMM coverage cutoff for hits.
:param max_align_overlap: The maximum overlap percentage between overlapping HMM hits.
:return: List of filtered HMM hit objects.
"""
hmm_hit_list = [hit for hit in hmm_hit_list if hit.e_value < float(e_value_cutoff)] # Filters hits by E-value.
i = 0
while i < (len(hmm_hit_list) - 1):
hit_one = hmm_hit_list[i] # Current Row in hit table.
hit_two = hmm_hit_list[i + 1] # Row below.
if hit_one.target_protein == hit_two.target_protein:
overlap_between_hits = hit_one.ali_to - hit_two.ali_from
if overlap_between_hits > 0:
# If the overlap is greater than 50% of either alignment.
if ((float(overlap_between_hits) / float(hit_one.ali_length)) > max_align_overlap) or (
(float(overlap_between_hits) / float(hit_two.ali_length)) > max_align_overlap):
if hit_one.e_value < hit_two.e_value:
hmm_hit_list.remove(hit_two)
else:
hmm_hit_list.remove(hit_one)
i -= 1 # Resets list index.
i += 1
hmm_hit_list = [hit for hit in hmm_hit_list if hit.hmm_coverage > hmm_coverage] # Filters by Query Coverage.
return hmm_hit_list
|
de8f6932a895021bbc56d7d7a17d4d8c6c2f2746
| 40,140 |
import torch
def divide_img(img, patch_size):
"""Divides image into tensor of image patches
Args:
img: batch of images, torch.tensor, e.g. [batch_size, channels, 32, 32]
patch_size: patch size, tuple e.g. (4,4)
Returns:
A torch.tensor of stacked stacked flattened image patches, e.g. [batch_size, 64, channels, 4, 4]
"""
height = img.shape[2]
width = img.shape[3]
patch_size_h = patch_size[0]
patch_size_w = patch_size[1]
A = []
for i in range(int(height/patch_size_h)):
for j in range(int(width/patch_size_w)):
A.append(img[:,:,i*patch_size_h:i*patch_size_h+patch_size_h,j*patch_size_w:j*patch_size_w+patch_size_w])
return torch.stack(A).permute(1,0,2,3,4)
|
83afe56f1dbf985a2edc8c299cc046d7f71690e6
| 40,141 |
def dict_hex_finder(single_hex_dict: dict):
"""Pulls the 'hex' key, 'num_files' key, and the file list out of the dict.
- Args:
- single_hex_dict (dict): dict with one hex, plus various other keys
- Returns:
- 0 [str]: hex
- 1 [list]: files
- 2 [int]: number of files
"""
hex_val, files, num_files = '', '', 0
for k, v in single_hex_dict.items():
if k == 'num_files':
num_files = v
elif k == 'action':
pass
elif k == 'sub-action':
pass
else:
hex_val = k
files = v
return hex_val, files, num_files
|
a9446c57881a3c29d44a53b8fffbe269c5cb17f4
| 40,142 |
import mimetypes
def to_mime(file_path):
"""
Return the mime type from a given path
:param file_path: Path to analyse
:type file_path: str
:return: Mime type
:rtype: str
"""
return mimetypes.guess_type(file_path)[0]
|
29493825b494fce5268e40c220c81fc8ca85457a
| 40,144 |
import math
def calc_angle(x: float, y: float) -> float:
"""
Calculation of angle based on axial projections
:param x: x axis value [-1 1]
:param y: y axis value [-1 1]
:return: angle [0 2Pi]
"""
if x == 0:
if y == 0:
angle = -1
elif y > 0:
angle = math.pi / 2
else:
angle = 3 * math.pi / 2
else:
angle = math.atan(y / x)
if x < 0:
angle = angle + math.pi
angle = angle % (2 * math.pi)
return angle
|
721befad6395cc7aabe4a05a256c83fa9fe10466
| 40,145 |
def make_date(v):
"""
Convert a date string in DD.MM.YYYY format to YYYY-MM-DD.
>>> make_date("01.02.2003")
'2003-02-01'
"""
return "-".join(reversed(v.split(".")))
|
6f8658aa80c7f118a138d648bad578eea57d12e4
| 40,147 |
import torch
def combine_variance(avg_a, count_a, var_a, avg_b, count_b, var_b):
"""
Compute variance of X given mean and variances of A and B, where X = A union B.
Reference: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#cite_note-:0-10
"""
if count_a + count_b <= 1:
return torch.zeros(var_a.size()).cuda()
delta = avg_b - avg_a
M2_a = var_a * (count_a - 1)
M2_b = var_b * (count_b - 1)
M2 = M2_a + M2_b + delta ** 2 * count_a * count_b / (count_a + count_b)
return M2 / (count_a + count_b - 1)
|
af2f1be4de2ce4ce82413a7a673f7e2f724c2523
| 40,149 |
def get_abs_axis_pos(rel_pos, ax):
"""
Find coordinates in terms of axis quantities, given relative coordinates from 0 to 1,
so that e.g. (1,1) refers to (xmax,ymax)
:param rel_pos: relative coordinates
:param ax: axis
:return: absolute coordinates
"""
xlims = ax.get_xlim()
ylims = ax.get_ylim()
return (xlims[0] + rel_pos[0] * (xlims[1] - xlims[0]), ylims[0] + rel_pos[1] * (ylims[1] - ylims[0]))
|
c2b2bdbcfff6fc71af9d41e21957bf999af5a982
| 40,152 |
def not_found(e):
"""Page not found."""
return "page not founderino"
|
08da45a9a17bd51a3c3531210e5cef5b8bfbd79d
| 40,154 |
def get_fren_word(category_data):
"""
Takes the category_data list and appends the values corresponding to the
"french_word" key, to a new list called fren_words.
Params:
1) category_data - list containing all the information that corresponds to the user's selected level and category
Examples:
fren_words = get_fren_word(category_data)
> fren_words = ['chien','chat'...]
"""
fren_words = []
for data in category_data:
fren_word = data["french_word"]
fren_words.append(fren_word)
return fren_words
|
1c7d5763dfa2165904df265b4ee0b950da9c48d7
| 40,156 |
def remove_pos(tt):
"""Given a POS tagged token (<token>, <pos>), return only the token.
"""
return tt[0]
|
098e88b31276d45f1e66738ffbe5a9ac517e6e94
| 40,157 |
def write_line(filename, line=""):
"""Helper method to open a file a write a line to it.
Args:
filename (str): Path to the file
line (str, optional): Line to write to the file. Defaults to "".
Returns:
bool: True if successfully written else False.
"""
try:
with open(filename, "w") as text_file:
print(line, file=text_file)
return True
except Exception as e:
print(f"Error while writing: {e}")
return False
|
8b7e73d44c93af9067ec919b0bfff158c30442da
| 40,159 |
import math
def ceil_pow10(value):
"""Similar to math.ceil() but to a power of 10.
>>> floor_pow10(42)
100
"""
return 10 ** math.ceil(math.log10(value))
|
2fa2cf60645cdf2858e4182976a486cedbe54cad
| 40,166 |
def _adjust_bounds(model, rxn, bounds):
"""
Applied new bounds to specified reactions in a cobra model.
"""
skip = False
if bounds[0] < bounds[1]: # to fix the issue with negaive values above
try:
model.reactions.get_by_id(rxn).lower_bound = round(bounds[0], 1)
model.reactions.get_by_id(rxn).upper_bound = round(bounds[1], 1)
except KeyError:
print(f'Did not work for {rxn}')
skip = True
else:
try:
model.reactions.get_by_id(rxn).upper_bound = round(bounds[0], 1)
model.reactions.get_by_id(rxn).lower_bound = round(bounds[1], 1)
except KeyError:
print(f'Did not work for {rxn}')
skip = True
return model, skip
|
a2feb75a39995d662920bd218ebfe4c1126cc25d
| 40,168 |
def s3_bucket_exists(session, name):
"""Test for existence of an S3 bucket.
Note that this method can only test for the existence of buckets owned by
the user.
Args:
session (Session): Boto3 session used to lookup information in AWS.
name (string): Name of S3 bucket.
Returns:
(bool): True if bucket exists.
"""
client = session.client('s3')
resp = client.list_buckets()
for bucket in resp['Buckets']:
if bucket['Name'] == name:
return True
return False
|
8005ea3eb81a8ce34f2def9ea5430fda60cad65c
| 40,176 |
def get_input(text):
"""Prompt text and return text write by the user."""
return input(text)
|
0460eb23c349179c5607b86a1a647c3e1e95e856
| 40,177 |
def get_positive_values(x):
"""Return a list of values v from x where v > 0."""
result = []
for _x in x:
if _x > 0:
result.append(_x)
else:
return result
return result
|
5245dc17e361bbc93596a537553c7878ebbba947
| 40,183 |
def JD_to_MJD(JD):
"""Convert Julian Day (JD) to Modified Julian Day (MJD).
"""
return JD - 2400000.5
|
ee4a12effe0b2cb1eaeeaf092a7ad509f0230711
| 40,186 |
import torch
def log_sum_exp(inputs, keepdim=False, mask=None):
"""Numerically stable logsumexp on the last dim of `inputs`.
reference: https://github.com/pytorch/pytorch/issues/2591
Args:
inputs: A Variable with any shape.
keepdim: A boolean.
mask: A mask variable of type float. It has the same shape as `inputs`.
Returns:
Equivalent of log(sum(exp(inputs), keepdim=keepdim)).
"""
if mask is not None:
mask = 1. - mask
max_offset = -1e7 * mask
else:
max_offset = 0.
s, _ = torch.max(inputs + max_offset, dim=-1, keepdim=True)
inputs_offset = inputs - s
if mask is not None:
inputs_offset.masked_fill_(mask.byte(), -float('inf'))
outputs = s + inputs_offset.exp().sum(dim=-1, keepdim=True).log()
if not keepdim:
outputs = outputs.squeeze(-1)
return outputs
|
f4e417e6f1f408abf243b21ad644c736aed61343
| 40,189 |
def handle_extension(extensions, f):
"""
Returns a decoder handler function for the list of extensions.
Extensions can be a space separated list of extensions.
Extensions can contain dots, in which case the corresponding number
of extension components must be present in the key given to f.
Comparisons are case insensitive.
Examples:
handle_extension("jpg jpeg", my_decode_jpg) # invoked for any file.jpg
handle_extension("seg.jpg", special_case_jpg) # invoked only for file.seg.jpg
"""
extensions = extensions.lower().split()
def g(key, data):
extension = key.lower().split(".")
for target in extensions:
target = target.split(".")
if len(target) > len(extension):
continue
if extension[-len(target):] == target:
return f(data)
return None
return g
|
76bcf48d30e0fd894eddafb53b580f8d6a1b06bf
| 40,192 |
def IsProcessAlive(pid, ppid=None):
"""Returns true if the named process is alive and not a zombie.
A PPID (parent PID) can be provided to be more specific to which process you
are watching. If there is a process with the same PID running but the PPID is
not the same, then this is unlikely to be the same process, but a newly
started one. The function will return False in this case.
Args:
pid: process PID for checking
ppid: specified the PID of the parent of given process. If the PPID does
not match, we assume that the named process is done, and we are looking at
another process, the function returns False in this case.
"""
try:
with open('/proc/%d/stat' % pid) as f:
stat = f.readline().split()
if ppid is not None and int(stat[3]) != ppid:
return False
return stat[2] != 'Z'
except IOError:
return False
|
0c1bff8c3c4109ee6ed3f95f64c9184739a98157
| 40,193 |
def selectivity_formula(counts):
"""Return selectivity
counts: dict of counts, containing at least TP, FP, and FP_COMP
"""
tp = counts['TP']
fp = counts['FP']
fp_comp = counts['FP_COMP']
if not tp and fp==fp_comp:
return 0.0
selectivity = tp/(tp + (fp - fp_comp))
return selectivity
|
fe8124708f271b4312708da0e9f4892bf01d595f
| 40,194 |
def forecast_lstm(model, batch_size, X):
"""
make a one-step forecast
:param model: the model
:param batch_size: the batch size
:param X: the X to generate predictions from
:return: the predicted Y
"""
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0, 0]
|
485ae331802d8d894920ba8f3db2f853a0b86067
| 40,201 |
def parser(word, objconf, skip=False, **kwargs):
""" Parses the pipe content
Args:
word (str): The string to parse
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: substr)
stream (dict): The original item
Returns:
dict: The item
Examples:
>>> from meza.fntools import Objectify
>>>
>>> item = {'content': 'hello world'}
>>> conf = {'start': 3, 'length': 4}
>>> args = item['content'], Objectify(conf)
>>> kwargs = {'stream': item, 'conf': conf}
>>> parser(*args, **kwargs) == 'lo w'
True
"""
end = objconf.start + objconf.length if objconf.length else None
return kwargs['stream'] if skip else word[objconf.start:end]
|
2b43796af7776be5bdca1aec2854f0c5b1c5b799
| 40,207 |
def recursive_check(task, attr="rerun"):
"""Check if a task or any of its recursive dependencies has a given attribute set to True."""
val = getattr(task, attr, False)
for dep in task.deps():
val = val or getattr(dep, attr, False) or recursive_check(dep, attr)
return val
|
b85773b4dcadb20b97e2777c6736654bb1b72957
| 40,209 |
def interp_n2(t, x, y):
"""
Interpolation function for N * 2 value arrays.
Parameters
----------
t : float
Point for which the interpolation is calculated
x : 1-d array with two values
x-axis values
y : 2-d array with size N-by-2
Values corresponding to x
Returns
-------
N-by-1 array
interpolated values at `t`
"""
return y[:, 0] + (t - x[0]) * (y[:, 1] - y[:, 0]) / (x[1] - x[0])
|
ee4da6eebcdb6c686a82966a2511f76d511b8e9f
| 40,210 |
def clean_up(entry,replace_newlines=True):
"""Clean up field.
Any empty field is converted from None to a null string.
Within each field, leading/trailing whitespace are stripped
(typically stray trailing spaces or newlines), and internal
newlines are replaced with vertical bars.
Limitations: The replace_newlines option only applies to string
values, not lists of strings.
Arguments:
entry (str or list or None): value to be cleaned up
replace_newlines (boolean, optional) : whether or not to replace newlines in entries
Returns:
(str or list or None): cleaned-up value
"""
if (entry is None):
# case: empty field, read as None
cleaned = None
elif (type(entry) is list):
# case: entries in trailing columns aggregated into list
# handle recursively
cleaned = list(map(clean_up,entry))
else:
cleaned = entry.strip()
if (replace_newlines):
cleaned = cleaned.replace("\n"," | ")
return cleaned
|
4e55b0a0799219ee1c81921630adb7908960b1db
| 40,211 |
import pydoc
import logging
def load_by_path(path):
"""Load functions or modules or classes.
Args:
path: path to modules or functions or classes,
such as: tf.nn.relu
Return:
modules or functions or classes
"""
path = path.strip()
if path == '' or path is None:
return None
components = path.split('.')
if components[0] == 'tf':
components[0] = 'tensorflow'
path = '.'.join(components)
try:
return pydoc.locate(path)
except pydoc.ErrorDuringImport:
logging.error('load %s failed' % path)
return None
|
b878b7b91d6885778c97c7f5cdbb0039e58176c1
| 40,220 |
def config_line(setting, value):
"""
Generate a single configuration line based on the setting and value
Parameters
----------
setting : str
The configuration setting
value : str
The value for the configuration setting
Returns
-------
str
The configuration line based on the setting and value
"""
if setting in [
'appendfilename', 'dbfilename', 'dbdir', 'dir', 'pidfile', 'unixsocket'
]:
value = repr(value)
return '{setting} {value}'.format(setting=setting, value=value)
|
9999b1f49f5bce7c37ae1958f48eb0958218393a
| 40,222 |
def analysis_dataset_config_for_message(analysis_dataset_configs, message):
"""
Gets the analysis dataset configuration to use to process this message, by looking-up the configuration that refers
to this message's engagement db "dataset" property.
:param analysis_dataset_configs: Dataset configurations to search for the one that relates to the given message.
:type analysis_dataset_configs: list of src.engagement_db_to_analysis.configuration.AnalysisDatasetConfiguration
:param message: Message to retrieve the analysis dataset configuration for.
:type message: engagement_database.data_models.Message
:return: Analysis dataset configuration to use for this message.
:rtype: src.engagement_db_to_analysis.configuration.AnalysisDatasetConfiguration
"""
for config in analysis_dataset_configs:
if message.dataset in config.engagement_db_datasets:
return config
raise ValueError(f"No analysis dataset configuration found for message '{message.message_id}', which has engagement"
f"db dataset {message.dataset}")
|
983ea4428ff305a05eca65c5cc0fc062ee2e6801
| 40,224 |
def nxz(PAxz,PBxz,Npulse,P_times_Dj):
"""
Calculates the number of events in the X or Z sifted basis per pulse
intensity per time slot.
nx[j,t] or nz[j,t]; j = {1:3}, t = {1:Nt}
Parameters
----------
PAxz : float
Probability of Alice preparing a state in the X/Z basis.
PBxz : float
Probability of Bob measuring a state in the X/Z basis.
Npulse : integer/float
Number of pulses sent by Alice.
P_times_Dj : float, array
The element-wise multiplication of the intensity probability array P
with the expected detection rate per time slot array Dj.
Returns
-------
float, array
The number of events in the sifted X/Z basis.
"""
return PAxz*PBxz*Npulse*P_times_Dj
|
3a51cc162a294ec4071341b01734046604ebdeae
| 40,226 |
import math
def gamma_0(m, rho):
"""
See p124 immediately after Eq 2.16.
:param m: int
number of servers
:param rho: float
lambda / (mu * m)
:return: float
"""
term1 = 0.24
term2 = (1 - rho) * (m - 1) * (math.sqrt(4 + 5 * m) - 2 ) / (16 * m * rho)
return min(term1, term2)
|
1e24a151530d1cc2157c386fb4748447788465e4
| 40,230 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.