content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def torsion(a, b, c):
"""
Find the torsion angle between planes ab and bc.
Arguments:
*a,b,c*
Vector instances.
Returns:
The torsion angle in radians
"""
n1 = a.cross(b)
n2 = b.cross(c)
return n1.arg(n2)
|
898019acb8724c61ab56b887c71cee8d0134eae9
| 694,291 |
import unicodedata
def normalize_email_address(email):
"""
Normalizes an email address by stripping the whitespace, converting to lowercase
and by normlizing the unicode.
:param email: The email address that needs to be normalized.
:type email: str
:return: The normalized email address.
:rtype: str
"""
return unicodedata.normalize("NFKC", email).strip().lower()
|
9baa0130050e5d1579a88a4e7312a385c2d39e61
| 694,292 |
def _get_dev_port_var(backend, instance=None):
"""Return the environment variable for a backend port.
Backend ports are stored at GET_PORT_<backend> for backends
and GET_PORT_<backend>.<instance> for individual instances.
Args:
backend: The name of the backend.
instance: The backend instance (optional).
Returns:
string: The environment variable where the backend port is stored.
"""
port_var = 'BACKEND_PORT.%s' % str(backend).lower()
if instance is not None:
port_var = '%s.%d' % (port_var, instance)
return port_var
|
1eea3611bf16edd477df0becd04436672283e176
| 694,293 |
from typing import Dict
from typing import List
def config_to_match_string(config: Dict, config_space: Dict, keys: List[str]) -> str:
"""
Maps configuration to a match string, which can be used to compare configs
for (approximate) equality. Only keys in `keys` are used, in that ordering.
:param config: Configuration to be encoded in match string
:param config_space: Configuration space
:param keys: Keys of parameters to be encoded
:return: Match string
"""
parts = []
for key in keys:
domain = config_space[key]
value = config[key]
parts.append(f"{key}:{domain.match_string(value)}")
return ",".join(parts)
|
8d577183a2112306cea6c03e48515bff3329b2c3
| 694,295 |
def _format_key(key: str) -> str:
"""Internal function for formatting keys in Tensorboard format."""
return key.title().replace('_', '')
|
498b31240754164b0259ecc0a6ca3c46728db332
| 694,298 |
import fnmatch
def is_matching(filename, patterns=None):
"""Check if a filename matches the list of positive and negative patterns.
Positive patterns are strings like ``"1.txt"``, ``"[23].txt"``, or
``"*.txt"``.
Negative patterns are strings like ``"!1.txt"``, ``"![23].txt"``, or
``"!*.txt"``.
Each pattern is checked in turn, so the list of patterns ``["!*.txt",
"1.txt"]`` will still match ``"1.txt"``.
>>> from django_remote_submission.tasks import is_matching
>>> is_matching("1.txt", patterns=["1.txt"])
True
>>> is_matching("1.txt", patterns=["[12].txt"])
True
>>> is_matching("1.txt", patterns=["*.txt"])
True
>>> is_matching("1.txt", patterns=["1.txt", "!*.txt"])
False
>>> is_matching("1.txt", patterns=["!*.txt", "[12].txt"])
True
"""
if patterns is None:
patterns = ['*']
is_matching = False
for pattern in patterns:
if not pattern.startswith('!'):
if fnmatch.fnmatch(filename, pattern):
is_matching = True
else:
if fnmatch.fnmatch(filename, pattern[1:]):
is_matching = False
return is_matching
|
1cb02d694640664ab3c743f9b30573618082f1cb
| 694,300 |
def simplify(value):
"""Return an int if value is an integer, or value otherwise.
>>> simplify(8.0)
8
>>> simplify(2.3)
2.3
>>> simplify('+')
'+'
"""
if isinstance(value, float) and int(value) == value:
return int(value)
return value
|
ef4d4332544b5a11b85a8d067f8b39d3873f9304
| 694,307 |
def extend_event(events, time, max_time):
"""Extends events in event list by time.
The start time of each event is moved time seconds back and the end
time is moved time seconds later
Args:
events: list of events. Each event is a tuple
time: time to extend each event in seconds
max_time: maximum end time allowed of an event.
Return
extended_events: list of events which each event extended.
"""
extended_events = events.copy()
for i, event in enumerate(events):
extended_events[i] = [max(0, event[0] - time),
min(max_time, event[1] + time)]
return extended_events
|
8d52afb64e6be7f09ecf619aa0a45d7acfb0fbf2
| 694,309 |
import uuid
def generate_random_string(string_length=10):
"""Generate a random string of specified length string_length.
Args:
string_length (int): Size of string to generate.
Returns:
str: Random string of specified length (maximum of 32 characters)
"""
random_str = str(uuid.uuid4())
random_str = random_str.upper()
random_str = random_str.replace("-", "")
return random_str[0:string_length]
|
0637b2cd345bb9e16bb07b5407bb3a198751757d
| 694,311 |
import unicodedata
def normalize(label):
"""normalize string to unicode Normal Form C (composed)"""
return unicodedata.normalize('NFC', label)
|
bccffae75756d696187e81cbf3743579350d2181
| 694,316 |
from typing import Union
from typing import Dict
from typing import Any
from typing import List
import copy
def _get(prop: str) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
"""Returns a deep copy of the global property with the given name
(normally either an object definition or an object definition list)."""
return copy.deepcopy(globals()['_' + prop])
|
3baec32f1dc849db5920cef26dbcc7d13a5f16ef
| 694,319 |
def correctly_classified_negatives(negatives, threshold):
"""Evaluates correctly classifed negatives in a set, based on a threshold
This method returns an array composed of booleans that pin-point, which
negatives where correctly classified for the given threshold
The pseudo-code for this function is:
.. code-block:: python
classified = []
for k in negatives:
if k < threshold:
classified.append(True)
else:
classified.append(False)
Parameters
==========
negatives : numpy.ndarray (1D, float)
The scores generated by comparing objects of different classes
threshold : float
The threshold, for which scores should be considered to be
correctly classified
Returns
=======
classified : numpy.ndarray (1D, bool)
The decision for each of the ``negatives``
"""
return negatives < threshold
|
60248319b0ba887eb601cdbfc289958c738f7e95
| 694,323 |
def constructCommonRating(tup1, tup2):
"""
Args:
tup1 and tup2 are of the form (user, [(movie, rating)])
Returns:
((user1, user2), [(rating1, rating2)])
"""
user1, user2 = tup1[0], tup2[0]
mrlist1 = sorted(tup1[1])
mrlist2 = sorted(tup2[1])
ratepair = []
index1, index2 = 0, 0
while index1 < len(mrlist1) and index2 < len(mrlist2):
if mrlist1[index1][0] < mrlist2[index2][0]:
index1 += 1
elif mrlist1[index1][0] == mrlist2[index2][0]:
ratepair.append((mrlist1[index1][1], mrlist2[index2][1]))
index1 += 1
index2 += 1
else:
index2 += 1
return ((user1, user2), ratepair)
|
761690baf2a9ad261bf40fa3e6e92500235840e4
| 694,324 |
def cell_count_from_extent(extent):
"""Returns the number of cells in a grid with the given extent"""
result = 1
for d in range(len(extent)): # list, tuple or 1D numpy array
result *= extent[d]
return result
|
d2350a87c6b69b8a7b1602d32f59ae312ac14d74
| 694,325 |
def resolution_human_readable(output: dict) -> dict:
"""
Creates the human readable dictionary from the output of the resolution of the request
Args:
output: The resolution output that was created for the called request
Returns:
A dictionary containing all the valid fields in the resolution output
"""
hr = {}
for key in output.keys():
if key == 'SubmittedBy':
hr['SubmittedBy'] = output.get('SubmittedBy', {}).get('name', '')
else:
hr[key] = output.get(key, '')
return hr
|
0204d93b843449a476ccdff46d10703ee2eb1e68
| 694,327 |
def getCoverage(contigHeader):
"""
Gets the coverage given a contigHeader.
"""
return float(contigHeader.split(' ')[-1].split('_')[1])
|
77dc814ed96cd2479c86843bf4030fa0476e355b
| 694,331 |
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
return {i:i for i in rng}
|
1e8e66ebc5aa146d47ce24a6631f11723330aede
| 694,335 |
def convert_audio_frequency_to_duty_cycle(freqs):
"""
Converts audio frequencies to duty cycle (out of 1000)
"""
d = (freqs - 1000) / 15
return d
|
c4059a90abb11034a5f9fe13714280cdaa601c77
| 694,336 |
def SlashEscapePackageName(pkg_ref, unused_args, request):
"""Escapes slashes in package name for ListVersionsRequest."""
request.parent = "{}/packages/{}".format(
pkg_ref.Parent().RelativeName(),
pkg_ref.packagesId.replace("/", "%2F"))
return request
|
46d4a20f119ca4655cf6e4f693d9c4b9eb524b1c
| 694,337 |
def interval_partitioning(events):
"""
Created a sorted collection of `Event(t)`, where an event can be arrival or
departure, sorted based on the time.
Then starting from the beginning, keep a count of `#arrival - #departure`.
The max value ever encountered of `#arrival - #departure` is the answer.
"""
# CAREFUL: I was thinking of heap, but this solution is so much better than that.
events_sorted = []
for event in events:
events_sorted.append((event[0], True))
events_sorted.append((event[1], False))
events_sorted.sort()
answer = 0
num_resources_required_right_now = 0
for event in events_sorted:
if event[1]:
num_resources_required_right_now += 1
else:
num_resources_required_right_now -= 1
answer = max(answer, num_resources_required_right_now)
return answer
|
7515831cb83220cad3bd36ce47adce805f0ebd63
| 694,339 |
def is_constant_fill(bytes, fill_byte):
"""Check a range of bytes for a constant fill"""
return all(b == fill_byte for b in bytes)
|
11106cc2a3d82fe71ca6daf712992d846ee24c96
| 694,342 |
def read(f):
"""Read contents of specified file."""
return open(f).read().strip()
|
5b3b0d06b3e746b7af6f9126288883680ed4fc59
| 694,348 |
import torch
def gradient_wrt_input(model, target_weights, initial_guess, n_iter=100, mask=None, lr=1e-1, verbose=True, device=None,
dtype=None):
"""Find input tensor such that the model produces an allocation close to the target one.
Parameters
----------
model : torch.Module
Network that predicts weight allocation given feature tensor.
target_weights : torch.Tensor
Vector of targeted asset weights of shape `(n_assets,)`.
initial_guess : torch.Tensor
Initial feature tensor serving as the starting point for the optimization. The shape is
`(n_channels, lookback, n_assets)` - the sample dimension is not included.
n_iter : int
Number of iterations of the gradients descent (or other) algorithm.
mask : None or torch.Tensor
If specified, then boolean ``torch.Tensor`` of the same shape as `initial_guess` than
one can elementwise choose what parts of the inputs to optimize (True) and which
keep the same as the initial guess (False).
lr : float
Learning rate for the optimizer.
verbose : bool
If True, then verbosity activated.
dtype : None or torch.dtype
Dtype to be used. If specified, casts all used tensors.
device : None or torch.device
Device to be used. If specified, casts all used tensors.
Returns
-------
result : torch.Tensor
Feature tensor of the same shape as `initial_guess` that is mapped by the network (hopefully)
close to `target_weights`.
hist : list
List of losses per iteration.
"""
device = device or torch.device('cpu')
dtype = dtype or torch.float32
x = initial_guess.clone().to(device=device, dtype=dtype)
x.requires_grad = True
if mask is None:
mask = torch.ones_like(x)
elif torch.is_tensor(mask):
if mask.shape != x.shape:
raise ValueError('Inconsistent shape of the mask.')
else:
raise TypeError('Incorrect type of the mask, either None or torch.Tensor.')
# casting
mask = mask.to(dtype=torch.bool, device=device)
model.to(device=device, dtype=dtype)
target_weights = target_weights.to(device=device, dtype=dtype)
optimizer = torch.optim.Adam([x], lr=lr)
model.train()
hist = []
for i in range(n_iter):
if i % 50 == 0 and verbose:
msg = '{}-th iteration, loss: {:.4f}'.format(i, hist[-1]) if i != 0 else 'Starting optimization'
print(msg)
loss_per_asset = (model((x * mask)[None, ...])[0] - target_weights) ** 2
loss = loss_per_asset.mean()
hist.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if verbose:
print('Optimization done, final loss: {:.4f}'.format(hist[-1]))
return x, hist
|
c645ae9389a9d44adcf006d6a772a35f4c42507d
| 694,353 |
def StripPrefix(s: str, prefix: str):
"""Skips the provided prefix from s, if present at its beginning."""
if s.startswith(prefix):
return s[len(prefix):]
return s
|
19dae6a11f61b81f80bbbca2a87b59348ca8cd25
| 694,364 |
def hflip(tensor):
"""Flips tensor horizontally.
"""
tensor = tensor.flip(2)
return tensor
|
d7bcb1ea7d463e0b4826f75437ecc3f1f298dd2f
| 694,368 |
import torch
def cbrt(x):
"""Cube root. Equivalent to torch.pow(x, 1/3), but numerically stable."""
return torch.sign(x) * torch.exp(torch.log(torch.abs(x)) / 3.0)
|
759a873f820daac609b68682cd5f41283f8f175d
| 694,370 |
def merge_lists(a, b):
"""
Merge lists - e.g.,
[1, 2, 3, 4, 5, 6] & ['a', 'b', 'c']
=> [1, 'a', 2, 'b', 3, 'c', 4, 5, 6]
:param a: List a
:param b: List b
:return: Merged lists
"""
result = []
length = min([len(a), len(b)])
for i in range(length):
result.append(a[i])
result.append(b[i])
result += a[length:] + b[length:]
return result
|
8272fe726299953f1cd28aea662e6720fb75c300
| 694,373 |
import io
import math
def get_virtual_tx_size(tx):
"""Return the virtual transaction size as defined in BIP141"""
def streamed_size(tx, include_witness_data):
"""Return the streamed size of a tx, optionally with witness data"""
buffer_ = io.BytesIO()
tx.stream(buffer_, include_witness_data=include_witness_data)
return len(buffer_.getvalue())
base_tx_size = streamed_size(tx, include_witness_data=False)
total_tx_size = streamed_size(tx, include_witness_data=True)
tx_weight = base_tx_size * 3 + total_tx_size
return int(math.ceil(tx_weight / 4.0))
|
fc7ab30ebfaf8cb74b01da2c31ec3bb82bee973a
| 694,375 |
def binary_search(array, item):
"""
Given a sorted array, this function performs a binary search to locate the
item.
Parameters: Array, item
Returns: index of the item or -1 if the item is not found
"""
first = 0
last = len(array) - 1
indx = None
while first <= last:
mid = (first + last) / 2
# Check if the item is at the middle
if array[mid] == item:
indx = mid
return indx
# If item is greater than half, ignore the left half of the array
elif array[mid] < item:
first = mid + 1
# If item is less than half, ignore the right half of the array
elif array[mid] > item:
last = mid - 1
return -1
|
247b00888eaf1da2d3bf1ec9cb672d9ba0eb5436
| 694,379 |
def relatively_prime(a, b):
"""
Returns true or false for whether the given numbers are relatively prime,
having no common factors other than 1.
"""
for number in range(2, min(a, b) + 1):
if a % number == b % number == 0:
return False
return True
|
21db4d8bf44d1829859713fd0ca1cd72b4e62266
| 694,380 |
def verify_users(g, user):
"""Verify a |user| or all names in the collection |user| as being valid
Github users."""
if isinstance(user, str):
user = [ user ]
for u in user:
try:
g.get_user(u)
except:
raise SystemExit('User "%s" does not exist"' % (u,))
return 1
|
4c033b713e376fc0b630cfa83e083e005fb5de91
| 694,382 |
import yaml
def load_yaml(filepath):
"""
Safely read a yaml file.
:param `filepath`: a filepath to a yaml file
:type filepath: str
:returns: Python objects holding the contents of the yaml file
"""
with open(filepath, "r") as _file:
return yaml.safe_load(_file)
|
cfbda1dcb4e63441d36750d8a63da0b8f8cea777
| 694,385 |
def arg_command_group(parser, group_name, group_argument_list):
"""Add a group of optional arguments to the parser.
Params:
parser: argparse.ArgumentParser where the argument group will be added.
group_name: String with the name of the argument group.
group_argument_list: List of dict objects where each dict specifies an
argument.
Returns:
group: The argument group object that has been created for the parser.
Raises:
ValueError: if the group_argument_list is empty
"""
if not group_argument_list:
raise ValueError('Invalid group_argument_list')
# Add argument group
group = parser.add_argument_group(group_name)
# Add arguments
for arg_dict in group_argument_list:
arg_name = arg_dict['name']
arg_name = f'--{arg_name}'
arg_help = arg_dict['help']
arg_value = arg_dict['default']
if isinstance(arg_value, bool):
# Attention: always interpret boolean flag in a positive sense
# The arg_value specifies where to add the corresponding template
# component by default. The presence of a boolean flag negates the
# corresponding boolean action.
group.add_argument(arg_name, action='store_true', help=arg_help)
else:
group.add_argument(arg_name, default=arg_value, help=arg_help)
return group
|
89b5ebd14fe2f331255aaaa1656c4c4857bc1f40
| 694,389 |
def create_service(*, name):
"""Create a Service resource for the Schema Registry.
Parameters
----------
name : `str`
Name of the StrimziKafkaUser, which is also used as the name of the
deployment.
Returns
-------
service : `dict`
The Service resource.
"""
s = {
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'name': name,
'labels': {
'name': name
}
},
'spec': {
'ports': [
{
'name': 'schema-registry',
'port': 8081
}
],
'selector': {
'app': name,
}
}
}
return s
|
eaa16b23dae0f889970d8589271807ba1fbad228
| 694,392 |
def compare_partial_dicts(result, expected):
"""
Make sure all the keys in expected are matched by keys in result, and
that the values stored in those keys match. Result can contain more
items than expected - those are ignored.
Used in the test_lvs, test_pvs and test_vgs tests.
"""
# return all(result[k] == expected[k] for k in expected.keys())
mismatches = 0
for k in expected.keys():
if not result[k] == expected[k]:
print("Failed for key {k}, {r} != {e}".format(k=k, r=result[k], e=expected[k]))
mismatches += 1
return mismatches == 0
|
88c16fdf45b1e8fe917f2d23f0b05c0bcf6b7b6b
| 694,393 |
def detrend_none(x, axis=None):
"""
Return x: no detrending.
Parameters
----------
x : any object
An object containing the data
axis : int
This parameter is ignored.
It is included for compatibility with detrend_mean
See Also
--------
detrend_mean : Another detrend algorithm.
detrend_linear : Another detrend algorithm.
detrend : A wrapper around all the detrend algorithms.
"""
return x
|
73a99772443220314c7ce803fdbd815910e706d0
| 694,398 |
def get_top_k_from_counts(n, counts):
"""
Given a map of counts mapping from a key to its frequency, returns the top k keys (based on frequency) after
normalizing the frequencies by the total.
:param n: The number of keys to return
:param counts: A map of counts mapping from a key to its frequency.
:return: A map from every key to its normalized frequency
"""
total = sum(counts.values())
sorted_counts = sorted([(k, v / total) for (k, v) in counts.items() if k != 'total'], key=lambda x: x[1],
reverse=True)
return sorted_counts[:n]
|
c2f1b1ff0ac7c78ba02472a81b28c0573411830b
| 694,399 |
def param_filter(curr_dict, key_set, remove=False):
"""
Filters param dictionary to only have keys in the key set
Args:
curr_dict (dict): param dictionary
key_set (set): set of keys you want
remove (bool): filters by what to remove instead of what to keep
Returns:
filtered param dictionary
"""
if remove:
return {key: curr_dict[key] for key in curr_dict.keys() if key not in key_set}
else:
return {key: curr_dict[key] for key in key_set if key in curr_dict.keys()}
|
0c9ff5cc546b56749e344d3302de05e523bef190
| 694,403 |
def split_str_w_esc(astring, delimiter, escape='\\'):
"""
Split string based on delimiter defined in call and the escape character \\
To escape use of the delimiter in the strings. Delimiter may be multi
character.
Returns list of elements split from the input str
"""
ret = []
current_element = []
iterator = iter(astring)
for ch in iterator:
if ch == escape:
try:
next_character = next(iterator)
# Do not copy escape character if intended to escape either the
# delimiter or the escape character itself. Copy the escape
# character if it is not in use to escape one of these
# characters.
if next_character not in [delimiter, escape]:
current_element.append(escape)
current_element.append(next_character)
except StopIteration:
current_element.append(escape)
elif ch == delimiter:
# split! (add current to the list and reset it)
ret.append(''.join(current_element))
current_element = []
else:
current_element.append(ch)
ret.append(''.join(current_element))
return ret
|
a07aebaccca29c72843f9b2b80ef34549b137574
| 694,406 |
def ask_for_file(prompt, mode):
"""
Ask user for a file.
:param str prompt: Prompt presented to the user for the input
:param str mode: Mode to be used when opening the file
:return: file object
"""
file_obj = None
while not file_obj:
filename = input(prompt)
try:
file_obj = open(filename, mode)
except Exception:
print("{} is invalid.".format(filename))
return file_obj
|
663b991e7e288b747c2a9e67220d561b1598aa81
| 694,408 |
def to_requests_format(ip, port):
""" Returns the proxy format for requests package """
return {'http': 'http://{}:{}'.format(ip, port),
'https': 'http://{}:{}'.format(ip, port)}
|
54611879183d1d072dd8bd36f062a3cf3119e110
| 694,413 |
def prob_service(state, lambda_1, mu, num_of_servers):
"""
Gets the probability of finishing a service
"""
return (min(state[1], num_of_servers) * mu) / (
lambda_1 + (mu * min(state[1], num_of_servers))
)
|
e6b0df74ec0e2ce5399fd24487e984ac34b2234d
| 694,415 |
import string
import random
def random_string(length: int = 6, chars: str = string.ascii_lowercase) -> str:
"""Create and return a random string."""
return ''.join(random.SystemRandom().choice(chars) for _ in range(length))
|
a06efa5c50c42c15b727fede72dd17ca4e877ada
| 694,418 |
def dbg(x):
"""does nothing, legacy dummy function"""
return ''
|
0d6db000ffe6d5174d09196f2e817ecc3ce20583
| 694,419 |
import itertools
def flatten(lst):
"""
Flattens one level of a list.
Parameters
----------
lst : list
Returns
-------
list
List flattened by one level.
"""
return list(itertools.chain.from_iterable(lst))
|
5d30ca71acabeec57252f1e466dbe250ce6742cd
| 694,423 |
async def guild_only_predicate(ctx):
"""A predicate to test if a command was run in a guild
:param ctx: The context of the predicate
"""
return ctx.guild
|
88b3ef772d80d22fe2fdd855e75370ce229d4d96
| 694,424 |
def set_axis_labels(inargs, xunits, yunits):
"""Set the x and y axis labels."""
if inargs.xlabel:
xname = inargs.xlabel.replace('_', ' ')
else:
xname = inargs.xvar.replace('_', ' ')
xlabel = '%s (%s/yr)' %(xname, xunits)
if inargs.ylabel:
yname = inargs.ylabel.replace('_', ' ')
else:
yname = inargs.yvar[0].replace('_', ' ')
if str(yunits) == 'kg m-2 s-1':
yunits = '$kg \: m^{-2} \: s^{-1}$'
ylabel = '%s (%s/yr)' %(yname, yunits)
return xlabel, ylabel
|
6ec5d56de735c6c664c757aeb448f0ae8b01fadc
| 694,425 |
from typing import Dict
import json
def dict_to_str(d: Dict) -> str:
""" Dump dict to string. """
return json.dumps(d)
|
2d4ab5fab232e3b77f6de5fcd0b5e7f0990395a0
| 694,427 |
def with_metaclass(meta_class, base_class=object):
"""
:param meta_class: The desired metaclass to use
:param base_class: The desired base class to use, the default one is object
:type base_class: Type
:return: Metaclass type to inherit from
:Example:
.. code-block:: python
class MyMetaClass(type):
def __new__(mcs, name, bases, attrs):
klass = super(MyMetaClass, mcs).__new__(mcs, name, bases, attrs)
klass.added = "Added field"
return klass
class MyClass(with_metaclass(MyMetaClass)):
pass
# This is equivalent to python 2:
class MyClass(object):
__metaclass__ = MyMetaClass
# Or python 3
class MyClass(object, metaclass=MyMetaClass):
pass
"""
return meta_class(
'with_meta_base_' + base_class.__name__ + '_' + meta_class.__name__,
(base_class,),
{}
)
|
ef63016c3939d451b62bb1790a5b9268cde11de1
| 694,428 |
import fcntl
import errno
def xioctl(fd, req, arg):
"""
Wrapper around ioctl that polls it until it no longer returns EINTR
"""
while True:
try:
r = fcntl.ioctl(fd, req, arg)
except IOError as e:
if e.errno != errno.EINTR:
raise
print("Waiting...")
else:
return r
|
42dd19410d17bc10d3e4a28610294a843140bc87
| 694,430 |
def critical_density(wavelength=800):
"""
Get the critical density for a laser with the given wavelength.
Args:
wavelength: Laser wavelength (in nm)
Returns:
(float) Critical density (particles/cm^3)
"""
# From the SI formula
# epsilon_0*electron mass/(electron charge)^2*(2*pi*c/(wavelength))^2/cm^-3
return 1.11485422E27 / wavelength ** 2
|
7ce5ac907148bdad463dfad8c9876c67ed8db6da
| 694,431 |
def translate(s, table, deletions=""):
"""translate(s,table [,deletions]) -> string
Return a copy of the string s, where all characters occurring
in the optional argument deletions are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256. The
deletions argument is not allowed for Unicode strings.
"""
if deletions:
return s.translate(table, deletions)
else:
# Add s[:0] so that if s is Unicode and table is an 8-bit string,
# table is converted to Unicode. This means that table *cannot*
# be a dictionary -- for that feature, use u.translate() directly.
return s.translate(table + s[:0])
|
8124932842fe59296e58b2b34e390dbed85a8d0e
| 694,432 |
from typing import Any
def islist(item: Any) -> bool:
"""Return true if item is list/tuple"""
return isinstance(item, (list, tuple))
|
9d84711020c3c372a42a5f3c7d435ec2696016a4
| 694,433 |
from typing import Optional
import logging
def get_existing_handlers(handlertype) -> Optional[logging.Handler]:
"""
Returns Existing handler or None (if the handler has not yet been added to the root handlers).
"""
return next((h for h in logging.root.handlers if isinstance(h, handlertype)), None)
|
b5cdfbf20133fcc7629c3291f1111fe353b067af
| 694,434 |
def sample_size(gdf,x,i):
"""
Parameters
----------
gdf:geodataframe
x: int
size of sample
i : int
number of samples
Returns
-------
list:
list of samples with input size
"""
sample_list = [gdf.sample(x) for j in range(i)]
return sample_list
|
9ed54a27a82c3f740a8813400254dab96f341c5a
| 694,436 |
def lift_calc(PPV, PRE):
"""
Calculate Lift score.
:param PPV: Positive predictive value (PPV)
:type PPV: float
:param PRE: Prevalence
:type PRE: float
:return: lift score as float
"""
try:
return PPV / PRE
except (ZeroDivisionError, TypeError):
return "None"
|
1d31719ba3d6c9dfbbdf8bdacff0e6099b9ad623
| 694,442 |
def _expand_dims_nonnegative_axis(axis, rank):
"""Get the nonnegative axis according to the rules of tf.expand_dims."""
# Implementation note: equivalent to get_positive_axis(axis, rank + 1)
if axis < 0:
new_axis = (1 + rank) + axis
if new_axis < 0:
# Note: this is unreachable in the current code.
raise ValueError("Axis out of range: " + str(axis))
return new_axis
elif axis > rank:
# Note: this is unreachable in the current code.
raise ValueError("Axis larger than rank: " + str(axis) + " > " + str(rank))
return axis
|
bf0b01ac8da9ba09ddecf5475db13f71968b00f6
| 694,445 |
def is_nonce_too_low_exception(exception):
"""check if the error thrown by web3 is a 'nonce too low' error"""
if not isinstance(exception, ValueError) or not isinstance(exception.args[0], dict):
return False
message = exception.args[0].get("message", "")
return (
"There is another transaction with same nonce in the queue" in message
or "Transaction nonce is too low" in message
)
|
f4b465fc222eb68b59e5ea6fef410ac68485966e
| 694,446 |
import math
def calculate_weighted_avg(bonds):
"""
Get the weighted average bond length given by the effective coordination number formula in Hoppe (1979)
:param bonds: (list) list of floats that are the bond distances between a cation and its peripheral ions
:return: (float) exponential weighted average
"""
minimum_bond = min(bonds)
weighted_sum = 0.0
total_sum = 0.0
for entry in bonds:
weighted_sum += entry*math.exp(1 - (entry/minimum_bond)**6)
total_sum += math.exp(1-(entry/minimum_bond)**6)
return weighted_sum/total_sum
|
13b69a8cc6a88c67b7d6abd0ad68962a038b141a
| 694,450 |
def extract_paths(thisjson, parent=[]):
"""
Extracts all paths from a json with nested structures.
You can use the resulting paths with get_json_path_element(j, path)
to get all values from a nested json structure.
E.g. running this function for the following json
thisjson = {
"key":"value",
"dict":{
"nested":"value"
},
"plainlist":[
"value1",
"value2"
],
"nestedlist": [
{
"object":"value"
},
"value3"
]
}
will give ['nestedlist,0,object', 'dict,nested', 'plainlist', 'key']
"""
attributes = []
for key in thisjson.keys():
val = thisjson[key]
if isinstance(val, dict):
attributes.extend(extract_paths(val, parent=parent+[key]))
elif isinstance(val, list):
has_dict = False
for i, item in enumerate(val):
if isinstance(item, dict):
has_dict = True
attributes.extend(extract_paths(item, parent=parent+[key, str(i)]))
if not has_dict:
if parent:
attributes.append(','.join(parent)+','+key)
else:
attributes.append(key)
else:
if parent:
attributes.append(','.join(parent)+','+key)
else:
attributes.append(key)
return attributes
|
5fda8da66f9533afeaa39d3141d155db6bf371ad
| 694,451 |
def count_pos(x):
"""Number of positive values in a numpy.array"""
return (x > 0).sum()
|
0c32810e7f0504c1c338519e89080ce0a76b854a
| 694,460 |
def get_all_votes(conn):
"""
Get all data from VoteData table
:param conn:
:return all_votes:
"""
sql = """ SELECT * FROM votedata; """
cur = conn.cursor()
cur.execute(sql)
return cur.fetchall()
|
74bbe9b20a907b1f898c22d2934e1fee5009a00d
| 694,467 |
import torch
def uniform_attention(queries, values):
"""
In the case of uniform attention, the weight assigned to each value is independent of the value of the
corresponding key; we can simply take the average of all of the values. This is the equivalent of the "vanilla"
neural process, where r* is the average of the context set embeddings.
:param queries: Queries correspond to x_target. [batch_size, N_target, key_size]
:param values: Values corresponding to the aggregated embeddings r_i. [batch_size, N_context, value_size]
:return:
"""
N_target = queries.shape[1]
attention = torch.mean(values, dim=1, keepdim=True) # [batch_size, 1, value_size]
output = attention.repeat(1, N_target, 1) # [batch_size, N_target, value_size]
return output
|
1a87ac2705ac7f5c2e97f503bc9182820d668bf4
| 694,468 |
import requests
def get_session(*args, **kwargs):
"""Get requests session, setting global config on here.
Returns:
[requests.seesion]: Requests session.
"""
session = requests.session(*args, **kwargs)
return session
|
42c2471011812dd36d98ed30e6a93d4abc19a4f1
| 694,469 |
def pathFromParent(soup, node):
""" For a given node, walk up the hierarchy in order to find
the first node in the hierarchy above with a 'path' attribute
"""
running = True
current = node
while running:
current = current.parent
path = current.get('path')
if path:
return str(path)
if current == soup.body:
running = False
return None
|
14c25a01cd9cc5c58982ed0e9a92954411247370
| 694,472 |
def post_incr_assign_expr(evaluator, ast, state):
"""Evaluates expression "expr++"."""
var = evaluator.eval_ast(ast["expr"], state)
ret = var.val.copy()
var += 1
return ret
|
78748568fb0c523fdef8beeb26965985fcf4740a
| 694,474 |
from typing import List
def accuracy_score(y_true: List, y_pred: List) -> float:
"""
Compute accuracy score
Parameters
----------
y_true : list
True labels
y_pred : list
Predicted labels
Returns
-------
float
Accuracy score
Examples
--------
>>> from evaluations.classification import accuracy_score
>>> accuracy_score([1, 1, 0, 0], [1, 1, 1, 0])
0.75
"""
count_true = sum([i == j for i, j in zip(y_true, y_pred)])
count_total = len(y_true)
return count_true / count_total
|
552cc0fab91b8dd62b08e512fb2d9f232f5b3606
| 694,476 |
def factorial(n, show):
"""
=> calculates the factorial of a number.
:param n: The number to be factored.
:param show: (Optional True or False) shows the process or not.
:return: return the value and shows it
"""
f = 1
for c in range(n, 0, -1):
f *= c
if show:
if c == 1:
print(f'{c} = ', end='')
else:
print(f'{c} x ', end='')
return f
|
838d4b2b8a6482fd7d3fe7276f2473cdc2a775ed
| 694,478 |
from pathlib import Path
import json
def data_dir(tmp_path, data_object):
"""Return a path to a directory loaded with 12 mock files
Args:
tmp_path (pathlib.Path): Path to temporary directory created by generic pytest fixture tmp_path
Returns:
pathlib.Path: Path to temporary directory holding 4 mock files, simulating the data folder
"""
with open(Path(tmp_path, str(".gitkeep")), "w") as f:
f.write("test1")
with open(Path(tmp_path, str("test.xlsx")), "w") as f:
f.write("test2")
for i in range(2):
with open(Path(tmp_path,"2020-01-01T0{}-00-00Z_data.json".format(str(i))), "w") as f:
json.dump(data_object, f)
print(tmp_path)
return tmp_path
|
13ab0919d3f41440040ea18b993dd769ee1a3c20
| 694,479 |
def fahrenheit(celsius):
""" Convert tempature celsius into farenheit """
return ((9 * celsius) / 5) + 32
|
e09cfe2e6e4402f77d8c15a52dc2d6c3ca3019ef
| 694,480 |
def thread(x, *fns):
"""Threads `x` left-to-right through the `fns`, returning the final result.
thread x :: a -> a
thread x, *fns :: a_0, *(a_i -> a_i+1) -> a_n"""
for f in fns:
x = f(x)
return x
|
b114a807b67a0d105f600e0348b21e47661c3d26
| 694,482 |
def rpc_completion_callback(callback):
"""Verify callback is callable if not None
:returns: boolean indicating nowait
:rtype: bool
:raises: TypeError
"""
if callback is None:
# No callback means we will not expect a response
# i.e. nowait=True
return True
if callable(callback):
# nowait=False
return False
else:
raise TypeError('completion callback must be callable if not None')
|
4149e3faa3bcff54ed494ad2ca53fa2457a8f694
| 694,490 |
import torch
def l2_loss(pred, target):
"""L2 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
assert pred.size() == target.size() and target.numel() > 0
loss = torch.abs(pred - target)**2
return loss
|
078b641ec7f6ec8fb88be188be214f2ead9c7dc8
| 694,491 |
def can_monitor(message):
"""
Determine if the user who sent the message is a race monitor.
Returns False if monitor status is indeterminate, e.g. message was sent
by a bot instead of a user.
"""
return message.get('is_monitor', False)
|
5370e2a41ad1c3e0b7173561aeaf19eec642181c
| 694,492 |
import pickle
def load(filename: str):
"""Loads a network from file and returns a list of modules in that network.
The connections between the module have been set according to the file.
Args:
filename (str): The path to the .rtc file containing a network.
Returns:
(list, list): A list of Modules that are connected and ready to be run
and a list of connections between those modules.
"""
mc_list = pickle.load(open(filename, "rb"))
module_dict = {}
module_list = []
connection_list = []
for m in mc_list[0]:
mod = m["retico_class"](**m["args"])
module_dict[m["id"]] = mod
module_list.append(mod)
for ida, idb in mc_list[1]:
module_dict[idb].subscribe(module_dict[ida])
connection_list.append((module_dict[idb], module_dict[ida]))
return (module_list, connection_list)
|
39f8f8e7f96c44528c3f691a5566760256c284fd
| 694,496 |
def calculateMRR(ranks):
"""
Return an MRR score based on the list of rank predictions
"""
MRR = 0
for rank in ranks:
MRR += 1.0 / rank
return MRR / len(ranks)
|
006634507655a3c48be960bd9dbceba68e7f4f68
| 694,499 |
def _to_bytes_or_false(val):
"""
An internal graph to convert the input to a bytes or to False.
The criteria for conversion is as follows and should be python 2 and 3
compatible:
- If val is py2 str or py3 bytes: return bytes
- If val is py2 unicode or py3 str: return val.decode('ascii')
- Otherwise, return False
"""
if isinstance(val, bytes):
return val
else:
try:
return val.encode('ascii')
except AttributeError:
return False
|
6fd24ecdb94784d0204cb50b43f7369bf7ac420a
| 694,500 |
def _get_tag_path(repository, tag=None):
"""Return the path for a tag, or list of tags if tag is empty.
"""
if tag:
return '/acr/v1/{}/_tags/{}'.format(repository, tag)
return '/acr/v1/{}/_tags'.format(repository)
|
51500ae144362c27c65ecc2e862ff043ef0f565e
| 694,506 |
def pastis_matrix_measurements(nseg):
"""
Calculate the total number of measurements needed for a PASTIS matrix with nseg segments
:param nseg: int, total number of segments
:return: int, total number of measurements
"""
total_number = (nseg**2 + nseg) / 2
return int(total_number)
|
133cb69837651ac3e6d0891179b365367fe848cc
| 694,508 |
def convert_percent(val):
"""
Convert the percentage string to an actual floating point percent
"""
new_val = val.replace('%', '')
return float(new_val) / 100
|
4d213cf7b767ba82858bb5d922f6e2c16b7d251e
| 694,509 |
from typing import List
import re
def split_text(text: str, max_length: int = 4096) -> List[str]:
"""Splits text by lines. If some line is too long, by spaces
"""
chunks = text.splitlines(keepends=True)
ans = []
cur = ""
while chunks:
cur_chunk = chunks.pop(0)
if len(cur_chunk) > max_length:
split_chunk = re.split('(\S*\s)', cur_chunk) # Split by whitespace, saving the delimeter
if len(split_chunk) == 1:
# if no spaces, split by length
split_chunk = [
cur_chunk[i: i + max_length]
for i in range(0, len(cur_chunk), max_length)
]
chunks = split_chunk + chunks
continue
if len(cur) + len(cur_chunk) > max_length:
ans.append(cur)
cur = cur_chunk
else:
cur += cur_chunk
if cur:
ans.append(cur)
return ans
|
e3b9cc032020c96a3365d34dc49a0854bc6760b1
| 694,511 |
from pathlib import Path
def validate_path(path, allow_none=True):
"""
When we have multiple types of files and directories, some may be allow to be None as they will not be required
whilst others like the working directory will always be required. This method is a generalisation of individual
setters.
:param path: Path to a directory or file
:type path: str
:param allow_none: Defaults to True, if true if a path is set to none it will just return None. If False, an
assertion will be run to validate that it is not none. In both cases, should the file not be None, then the
path is validated via Path.exists()
:type allow_none: Bool
:return: Path to the current file or directory if None return is not allowed, otherwise the Path return is
optional and the return may be none.
"""
if allow_none and not path:
return None
else:
assert path and Path(path).exists(), f"Path is invalid: {path}"
return Path(path)
|
7cfffa844438b76ee00a69b30b5ba8347c48615c
| 694,520 |
def _default_config() -> dict:
"""
Creates a default configuration, used if none was provided or if the provided configuration did not cover all values.
Please be careful with the spelling of the dictionary.
:return: The default configuration of the program.
"""
default_config = {}
# Program Wide Attributes
default_config["seed"] = 11
default_config["transformations"] = 50
# Supported are "global" and "per_class" (Spelling is important!)
default_config["transformationscope"] = "global"
# Transformer Related Attributes
default_config["AddUnusedVariableTransformer"] = True
default_config["UnusedVariableStringRandomness"] = "full"
default_config["AddCommentTransformer"] = True
default_config["AddCommentStringRandomness"] = "full"
default_config["RenameParameterTransformer"] = True
default_config["RenameParameterStringRandomness"] = "full"
default_config["RenameVariableTransformer"] = True
default_config["RenameVariableStringRandomness"] = "full"
default_config["AddNeutralElementTransformer"] = True
default_config["LambdaIdentityTransformer"] = True
return default_config
|
80e3c380153976ffb50fd71f969810fe93fa5c68
| 694,522 |
def get_unique_name(name, elems):
"""
Return a unique version of the name indicated by incrementing a numeral
at the end. Stop when the name no longer appears in the indicated list of
elements.
"""
digits = []
for c in reversed(name):
if c.isdigit():
digits.append(c)
else:
break
stem = name[0:len(name) - len(digits)]
val = ''.join(digits)[::-1] or 0
i = int(val)
while True:
i += 1
new_name = ''.join([stem, str(i)])
if new_name not in elems:
break
return new_name
|
ead72253480ae774b830b82119e91db848504348
| 694,525 |
import re
def flair_template_checker(input_text):
"""Small function that checks whether a given input is valid as a
Reddit post flair ID.
"""
try:
regex_pattern = r"^[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}$"
valid = re.search(regex_pattern, input_text)
except TypeError:
return False
if valid:
return True
else:
return False
|
242717f499a632d57765754f9872728ae6433fcc
| 694,529 |
def _index_to_timestamp(index):
"""Convert a pandas index to timestamps if needed.
Needed to parse pandas PeriodIndex to pyplot plotting functions."""
return index.to_timestamp() if hasattr(index, 'to_timestamp') else index
|
93fdd7bff3e32247b9b4648c75e75e63244ad17c
| 694,530 |
import time
def gen_timestamp() -> int:
"""gen_timestamp.
Generate a timestamp.
Args:
Returns:
int: Timestamp in integer representation. User `str()` to
transform to string.
"""
return int(1.0 * (time.time() + 0.5) * 1000)
|
1197eee6d349d6c1e3e5fd017920fa56f15a0f0b
| 694,531 |
def receive_data_in_chunks(sock, buffersize):
"""Receive data in chunks of size buffersize from the socket"""
chunk = sock.recv(buffersize)
chunks = [chunk]
# keep reading until chunks are available
while len(chunk.strip()):
chunk = sock.recv(buffersize)
chunks.append(chunk)
data = b''.join(chunks).strip()
return data
|
a3953a9240c021b41495f58e06ae77d2f5e0d9c9
| 694,534 |
def _nt_sum(cobj, prop, theta):
"""
Create sum expressions in n-t forms (sum(n[i]*theta**t[i]))
Args:
cobj: Component object that will contain the parameters
prop: name of property parameters are associated with
theta: expression or variable to use for theta in expression
Returns:
Pyomo expression of sum term
"""
# Build sum term
i = 1
s = 0
while True:
try:
ni = getattr(cobj, f"{prop}_coeff_n{i}")
ti = getattr(cobj, f"{prop}_coeff_t{i}")
s += ni * theta**ti
i += 1
except AttributeError:
break
return s
|
7ef4674b27069d2e254ef2cb1839fbc67c571029
| 694,537 |
def calc_max_length(tensor):
"""Find the maximum length of any tensor"""
return max(len(t) for t in tensor)
|
21ad43f14d8952261a45b8efcd927b82eadc83bd
| 694,540 |
def make_tuple(t):
"""
return the input if it's already a tuple.
return a tuple of the input if the input is not already a tuple.
"""
return t if isinstance(t, tuple) else (t, t)
|
70fd74c76db30f866b3d248d6444c2d02b31f36c
| 694,541 |
def char_array_to_string(arr):
"""
Converts a NumPy array of byte-long ASCII codes into an ASCII string.
e.g. [65, 67, 71, 84] becomes "ACGT".
"""
return arr.tostring().decode("ascii")
|
b5bc74ad96a34d619311ca6226075a3378989f3d
| 694,544 |
def get_column_headers(worksheet) -> list:
"""Get list of column headers from Excel sheet."""
num_columns = worksheet.max_column
headers = []
for j in range(1, num_columns+1):
cell_obj = worksheet.cell(row=1, column=j)
headers.append(cell_obj.value)
return headers
|
3181f8e9d3e9fa1e0967f4edd1090032e0f773ed
| 694,545 |
def unpacked_properties(full_prop_name, count=2):
"""Return properties that "unpack" a tuple property
For example, if a class defines::
x, y = unpacked_properties('pos')
then ``obj.x`` will return the same as ``obj.pos[0]``, and setting
``obj.x`` will update ``obj.pos`` to (new x, old y).
"""
def get_prop(index):
def getter(self):
return getattr(self, full_prop_name)[index]
def setter(self, value):
old = getattr(self, full_prop_name)
new = tuple(value if i == index else v for i, v in enumerate(old))
setattr(self, full_prop_name, new)
doc_template = 'Getter/setter for self.{0}[{1}]'
return property(getter, setter, doc=doc_template.format(full_prop_name, index))
return [get_prop(i) for i in range(count)]
|
cfd911afc1a313d8a5fda7d82e2a3825566ea59a
| 694,547 |
def SetWriterMolProps(Writer, Mol):
"""Setup molecule properties for a writer to output.
Arguments:
Writer (object): RDKit writer object.
Mol (object): RDKit molecule object.
Returns:
object : Writer object.
"""
PropNames = list(Mol.GetPropNames())
if len(PropNames):
Writer.SetProps(PropNames)
return Writer
|
2e4553c99fbd9c82e1ef451530d8bc10d7e18cf7
| 694,551 |
def init_task(parts, name='init-pdpart'):
"""Create a doit task to initialize a Partitioned directory.
Parameters
----------
parts : pdpart.Partitioned
Partitioned object to be initialized
name : str
name of task, defaults to 'init-pdpart'
"""
def _wrapper():
"""withhold return value for compatibility with doit"""
parts.init_dir()
return {
'name': name,
'actions': [(_wrapper, [], {})],
'file_dep': [],
'targets': [parts.fn_meta],
'uptodate': [True],
}
|
a8c1cb27461b48b5c9f0a8733b058a24403a5227
| 694,554 |
def partition(l, size):
"""
Partition the provided list into a list of sub-lists of the provided size. The last sub-list may be smaller if the
length of the originally provided list is not evenly divisible by `size`.
:param l: the list to partition
:param size: the size of each sub-list
:return: a list of sub-lists
"""
return [l[i:i + size] for i in range(0, len(l), size)]
|
6d24bdf1b8e46450b7070c2819180cf40fd418b3
| 694,556 |
def is_even(x):
"""
Return whether or not an integer ``x`` is even, e.g., divisible by 2.
EXAMPLES::
sage: is_even(-1)
False
sage: is_even(4)
True
sage: is_even(-2)
True
"""
try:
return x.is_even()
except AttributeError:
return x % 2 == 0
|
6149a4e266070d6d0dd0b7f03df30e3ee1685edf
| 694,559 |
import logging
def get_logger(name='default'):
"""Return the logger with name value automatically or editable by name param."""
name = locals().get('__name__') if name == 'default' else name
return logging.getLogger(name)
|
f73f507b588e085dd1d261cef6f72a9d390b3285
| 694,561 |
def drop(n):
"""Drop n items from collection (first in dropped)."""
def generator(coll):
for i, item in enumerate(coll):
if i >= n:
yield item
return generator
|
1852389372010ba6652e653e9c605a94859a24ca
| 694,562 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.