content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def get_contrib_read_ids(indexes, reads):
"""
Takes a set of indexes from assign_reads and the list of read signatures
plus the dictionary mapping signatures to aligned read IDs and returns
the set of corresponding aligned read IDs (BAM query IDs).
"""
hap_read_ids = set()
for read_idx in indexes:
for read_id in reads[read_idx]:
hap_read_ids.add(read_id)
return hap_read_ids
|
1839790b85917bed53675ae43cd04fafedfceea9
| 34,566 |
def default_cost(alignment_matrix, i, j, tokens_1, tokens_2):
"""
Ignore input and return 1 for all cases (insertion, deletion, substitution)
"""
return 1
|
ab5c721fad6d6a6cd5f5e7ee3262b0beed8c56e9
| 34,576 |
def saved_certificate_to_cnf(file_path):
""" Load a certificate from file
Parameters
----------
file_path :string
Path of the file that contains the certificate fo a cnf
Returns
-------
set[(string,bool)]
The object that represents the loaded certificate
"""
cert = set()
f = open(file_path, "r")
for line in f:
if line[0] == 'v' and line[2] != '0':
for leterals in line.split(' ')[1:]:
numeric_leterals =int(leterals)
cert.add(('x{}'.format(abs(numeric_leterals)),True if numeric_leterals > 0 else False))
return cert
|
b8a5b6d327c406fa9f8b1abdac97781f5813208b
| 34,582 |
def speaking_player(bot, state):
""" A player that makes moves at random and tells us about it. """
move = bot.random.choice(bot.legal_positions)
bot.say(f"Going {move}.")
return move
|
58392931510a86ddf1fd6bdc3402cdf1665241d0
| 34,583 |
def trim(d, prepended_msg):
"""remove the prepended-msg from the keys of dictionary d."""
keys = [x.split(prepended_msg)[1] for x in d.keys()]
return {k:v for k,v in zip(keys, d.values())}
|
a7bf495750713a51c74dfd95dbbabcbab76f1910
| 34,587 |
from datetime import datetime
def part_of_day() -> str:
"""Checks the current hour to determine the part of day.
Returns:
str:
Morning, Afternoon, Evening or Night based on time of day.
"""
am_pm = datetime.now().strftime("%p")
current_hour = int(datetime.now().strftime("%I"))
if current_hour in range(4, 12) and am_pm == 'AM':
greet = 'Morning'
elif am_pm == 'PM' and (current_hour == 12 or current_hour in range(1, 4)):
greet = 'Afternoon'
elif current_hour in range(4, 8) and am_pm == 'PM':
greet = 'Evening'
else:
greet = 'Night'
return greet
|
5736b7049924197595341a173e642b8e3ea9e856
| 34,588 |
import glob
def find_file(path):
"""
Search file
Parameters
----------
path : str
Path and/or pattern to find files.
Returns
-------
str or list of str
List of files.
"""
file_path = glob.glob(path)
if len(file_path) == 0:
raise ValueError("No such file {}".format(file_path))
if len(file_path) == 1:
file_path = file_path[0]
return file_path
|
7f7dad61a2faddd4ab6e6735419abb0b50196d67
| 34,598 |
from typing import List
def parse_tags(s: str) -> List[str]:
"""
Parse comma separated tags str into list of tags.
>>> parse_tags('one tag')
['one tag']
>>> parse_tags(' strip left and right ends ')
['strip left and right ends']
>>> parse_tags('two, tags')
['two', 'tags']
>>> parse_tags('"quoted, string" is one tag')
['quoted, string is one tag']
>>> parse_tags(', empty tags, , will be skipped, ')
['empty tags', 'will be skipped']
Args:
s: The comma separated tags str.
Returns:
The parsed tags.
"""
tags = []
buf = []
in_quoted = None
for c in s:
if in_quoted:
if c == in_quoted:
in_quoted = None
else:
buf.append(c)
elif c == '"' or c == '\'':
in_quoted = c
elif c == ',':
if buf:
tag = ''.join(buf).strip()
if tag:
tags.append(tag)
buf.clear()
else:
buf.append(c)
if buf:
tag = ''.join(buf).strip()
if tag:
tags.append(tag)
return tags
|
7529f0b6746bdfe7996eb4a963ae4e07622183aa
| 34,599 |
def encipher_kid_rsa(msg, key):
"""
Here ``msg`` is the plaintext and ``key`` is the public key.
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_kid_rsa, kid_rsa_public_key)
>>> msg = 200
>>> a, b, A, B = 3, 4, 5, 6
>>> key = kid_rsa_public_key(a, b, A, B)
>>> encipher_kid_rsa(msg, key)
161
"""
n, e = key
return (msg*e) % n
|
37ccf5d80e10c5f90e1b2cfb0a085f718ba3d845
| 34,602 |
def _ansible_verbose(verbose_level=1):
"""
Return an ansible verbose flag for a given Cliff app verbose
level to pass along desired verbosity intent.
"""
flag = ''
if verbose_level > 1:
flag = '-{}'.format("v" * (verbose_level - 1))
return flag
|
0313b5f7c41858c6d2ecaba2275cb56cd89b628a
| 34,603 |
import math
def neg_exp_distribute(mean_time, U):
"""
Generate series satisfied negative exponential distribution
X = -mean*lnU
Parameters:
-----------
mean_time: mean time
U: a list as a parameter for negative exponential time
Return:
-------
X: Generated time (interarrival time or service time)
"""
return [-1.0*mean_time*math.log(u) for u in U]
|
680c92d629208268aae9c8835da9e70a6a9263d3
| 34,605 |
def figshare_metadata_readme(figshare_dict: dict) -> dict:
"""
Function to provide shortened dict for README metadata
:param figshare_dict: Figshare API response
:return: README metadata based on Figshare response
"""
readme_dict = {}
if 'item' in figshare_dict:
print("figshare_metadata_readme: Using curation responses")
readme_dict['article_id'] = figshare_dict['item']['id']
readme_dict['curation_id'] = figshare_dict['id']
figshare_dict = figshare_dict['item']
else:
readme_dict['article_id'] = figshare_dict['id']
single_str_citation = figshare_dict['citation']
# Handle period in author list. Assume no period in dataset title
author_list = ([single_str_citation.split('):')[0] + ').'])
author_list += [str_row + '.' for str_row in
single_str_citation.split('): ')[1].split('. ')]
readme_dict.update({
'title': figshare_dict['title'],
'description': figshare_dict['description'],
'doi': f"https://doi.org/{figshare_dict['doi']}",
'preferred_citation': author_list,
'license': figshare_dict['license'],
'summary': figshare_dict['description'],
'references': figshare_dict['references'],
})
return readme_dict
|
5c7a5559d4e09767032888465156eb9ea291d6c2
| 34,608 |
def add_response_tokens_to_option(_option, response_tokens, response_tokens_from_meta):
"""
:param _option: (delivery_api_client.Model.option.Option) response option
:param response_tokens: (list<str>) list of response tokens from decisioning context
:param response_tokens_from_meta: (list<str>) list of response tokens from decisioning rule meta
:return: (delivery_api_client.Model.option.Option) Returns option updated with response tokens
"""
_option.response_tokens = dict(response_tokens_from_meta)
_option.response_tokens.update(response_tokens)
return _option
|
7c30f7cdfda5db9cc4e3e9ea3eb8d887ad491747
| 34,609 |
def render_string(s, f, colour, background, antialiasing = True):
"""
Create pygame.Surface and pygame.Rect objects for a string, using a
given font (f) and colour.
Parameters:
s: the string to render.
f: the font in which to render s.
colour: the colour of text to use, expressed as an RGB list or tuple.
background: the background colour.
Keyword Parameters:
antialiasing: indicates whether text is rendered with antialiasing;
defaults to True.
Returns:
s: the pygame.Surface object.
r: the pygame.Rect object.
"""
s = f.render(s, antialiasing, colour, background)
r = s.get_rect()
return s, r
|
5f6c72d55a864fd607503ff887edc132cfdd5e3c
| 34,613 |
def replace_entities(df, col_name, entity_dict):
""" A function to replace values in a Pandas df column given an entity dict, as created in associate_entities()
Args:
df (DataFrame) : A Pandas DataFrame
col_name (string) : A column in the Pandas DataFrame
entity_dict (dict) : A dictionary as created in the associate_entities() function
"""
if col_name not in df.columns:
raise ValueError("Column does not exist!")
df[col_name].replace(entity_dict, inplace=True)
return df
|
71aa5bbf5f8a42a6fa7a85d51c280307dec2ee96
| 34,614 |
from typing import List
def clean_strings(string_list: List[str]) -> List[str]:
"""
Clean up a list of strings ready to be cast to numbers.
"""
clean_string = []
for string in string_list:
new_string = string.strip()
clean_string.append(new_string.strip(","))
return clean_string
|
59b7653f36771b79588381ba255acf89c0294c02
| 34,616 |
def vertical_path(size):
"""
Creates a generator for progressing vertically through an image.
:param size: A tuple (width, height) of the image size
:return: A generator that yields a set of columns through the image.
Each column is a generator that yields pixel coordinates.
"""
width, height = size
return (((x, y) for y in range(height)) for x in range(width))
|
91d42be4bdd8f501405f226a0a158491932d6b2b
| 34,617 |
def evaluate(f, x ,y):
"""Uses a function f that evaluates x and y"""
return f(x, y)
|
894bea46653312e7a600788df268af0e9e26fbee
| 34,635 |
def compare(initial, candidate):
"""
Compares two shingles sequence and returns similarity value.
:param initial: initial sentence shingles sequence
:param candidate: compared sentence shingles sequence
:return: similarity value
"""
matches = 0
for shingle in initial:
if shingle in candidate:
matches += 1
return matches * 2 / float(len(initial) + len(candidate)) * 100
|
07bd224c422db70382875647028cb159a2810686
| 34,649 |
def field_to_int(field):
"""
Return an integer representation. If a "-" was provided return zero.
"""
if field == "-":
return 0
return int(field)
|
1cb3910a77abce808fd35a208af91945a5759322
| 34,651 |
def stress_model(strain, modulus):
"""
Returns the linear estimate of the stress-strain curve using the strain and estimated modulus.
Used for fitting data with scipy.
Parameters
----------
strain : array-like
The array of experimental strain values, unitless (or with cancelled
units, such as mm/mm).
modulus : float
The estimated elastic modulus for the data, with units of GPa (Pa * 10^9).
Returns
-------
array-like
The estimated stress data following the linear model, with units of Pa.
"""
return strain * modulus * 1e9
|
5e84742805ecfcfda0299d88ed28e439adbfbadc
| 34,652 |
import pickle
def load_dictionary(file_path):
"""
Loads a categorical variable dictionary that was saved in pickle format.
"""
with open(file_path, "rb") as dictionary_file:
return pickle.load(dictionary_file)
|
ea500d9739d725f2889f83a3d935f708600eb52e
| 34,658 |
def getRawName(path):
"""
Given a filename with no path before it, returns just the name, no type
:returns filename
"""
loc = path.rfind(".")
if loc == -1:
return loc
else:
return path[0:loc]
|
b3d48fe92f52899346beae0929eedab48bd7dfef
| 34,662 |
from pathlib import Path
def load_html(filename):
"""
Load HTML from file
"""
return Path(filename).read_text()
|
50ddc08e7fc7a90bc9e1f3818d7b7eb564b1c98b
| 34,664 |
import uuid
def is_uuid_like(val):
"""
Check if value looks like a valid UUID.
"""
try:
uuid.UUID(val)
except (TypeError, ValueError, AttributeError):
return False
else:
return True
|
0f5113f9fe6e04e2377a0921257030b7c116aa25
| 34,665 |
def get_summary_of_old_data(old_data):
"""Return a string summarizing the OLD resources that will be created.
"""
summary = [u'\nOLD resources to be created.']
for resource_name in sorted(old_data.keys()):
resource_list = old_data[resource_name]
summary.append(u' %s: %d' % (resource_name, len(resource_list)))
return u'\n'.join(summary)
|
7af2b605ccc2d131b6841d586f0c7f9a49728481
| 34,667 |
def shares(shares):
"""Returns integer with comma notation"""
try:
shares = int(shares)
except (ValueError, TypeError, UnicodeEncodeError):
return ''
return '{0:,}'.format(shares)
|
89b2dcc444b32c642c53967f445f6cab94cd50eb
| 34,668 |
def averageGuessesFromGuessMap(guessMap: dict[int, int]) -> float:
"""Return average guesses from map using weighed sum in form <guesses: words>,
e.g <1:20, 3:5> returns 1.75"""
weighedSum = 0
wordsCount = 0
for key,item in guessMap.items():
weighedSum = weighedSum + key*item
wordsCount = wordsCount + item
return weighedSum/wordsCount
|
58d494133386915f7c7c7bc3a75becc129c7ff41
| 34,670 |
import time
def epoch_to_local_date(timestamp: float):
"""Epoch timestamp to `day/month/year - time` representation."""
return time.strftime("%d/%b/%Y - %X", time.localtime(int(timestamp)))
|
cd4036abb4095fcc56cfaf667408ac4befec1766
| 34,671 |
def read_folds(fname):
""" Reads a list of fold index lists.
Format: let the training set indices range from 0... n_samples-1. Each line
in a fold file should contain a subset of these indices corresponding to a
single fold. For example, let n_samples = 11, then:
0 3 4 8
1 5 9 10
2 6 7
would correspond to a fold-file with three folds, with first and second fold
containing 4, and last one 3 instances. The reader would return the list
[[0,3,4,8],[1,5,9,10],[2,6,7]]
Parameters
----------
fname : string
input file name
Returns
-------
folds : a list of lists, each containing the indices corresponding to a single fold
"""
f = open(fname)
folds = []
for i, line in enumerate(f):
#We allow comments starting with #
cstart = line.find("#")
if cstart != -1:
line = line[:cstart]
fold = []
foldset = set([])
line = line.strip().split()
for x in line:
try:
index = int(x)
except ValueError:
raise Exception("Error when reading in fold file: malformed index on line %d in the fold file: %s" % (i + 1, x))
if index < 0:
raise Exception("Error when reading in fold file: negative index on line %d in the fold file: %d" % (i + 1, index))
if index in foldset:
raise Exception("Error when reading in fold file: duplicate index on line %d in the fold file: %d" % (i + 1, index + 1))
fold.append(index)
foldset.add(index)
folds.append(fold)
f.close()
return folds
|
9146f332dc6da9d212f1dbec95e8a9ff229c6220
| 34,673 |
def get_lang_abbr_from_resp(http_resp):
"""
This function takes a requests object containing a response from
detectlanguage.com, parses it, and returns the abbreviation of
the language detected.
"""
return http_resp.json()["data"]["detections"][0]["language"]
|
6635b88306fbc4f149307133c0a118542a8709a9
| 34,677 |
def _reindex_values(new_index, values, initial_value):
"""
Conform values to new index
Parameters
----------
new_index : pandas.Index
values : pandas.Series
initial_value : float
Returns
-------
pandas.Series
"""
first_step = values.index[0]
new_values = values.reindex(new_index, method="ffill")
new_values.loc[new_values.index < first_step] = initial_value
return new_values
|
31b1c197ebb47e2d641db21ea5ea1763e0bddb18
| 34,679 |
def add_file_to_tree(tree, file_path, file_contents, is_executable=False):
"""Add a file to a tree.
Args:
tree
A list of dicts containing info about each blob in a tree.
file_path
The path of the new file in the tree.
file_contents
The (UTF-8 encoded) contents of the new file.
is_executable
If ``True``, the new file will get executable permissions (0755).
Otherwise, it will get 0644 permissions.
Returns:
The provided tree, but with the new file added.
"""
record = {
"path": file_path,
"mode": "100755" if is_executable else "100644",
"type": "blob",
"content": file_contents,
}
tree.append(record)
return tree
|
c1d10dca15cf25f2f638deda423a7807332d4bb0
| 34,689 |
def _is_whitespace_or_comment(directive):
"""Is this directive either a whitespace or comment directive?"""
return len(directive) == 0 or directive[0] == '#'
|
c11f19ca8047194f2fe35d17dc7d058f029ccab9
| 34,690 |
import re
def get_easy_apply(soup, verbose=False):
"""
Check if the ad offers "Easy Apply" (only with LinkedIn)
"""
# Usually looks like this:
# <span class="artdeco-button__text">
tag = soup.find("span", class_="artdeco-button__text", string=re.compile("Easy Apply"))
if verbose: print(tag)
if tag != None:
return True
else:
return False
|
b4350d6a5894a2a6fb7cd70ed96425126561bf3f
| 34,691 |
def clean_data(df, primary_key=None):
""" Drops null & duplicate rows """
if primary_key:
df = df.dropna(subset=[primary_key])
df = df.drop_duplicates(subset=[primary_key], keep='first')
df = df.dropna(how='all')
return df
|
62ede4fc6d72c0c1b579816339d839ffd62a0122
| 34,696 |
def value_to_bool(value):
"""Return bool True/False for a given value.
If value is string and is True or 1, return True
If value is string and is False or 0, return False
Otherwise if value is numeric or None, return bool(value)
"""
if isinstance(value, bool):
return value
elif isinstance(value, str):
return value.lower() in ("true", "1")
elif isinstance(value, int) or value is not None:
return bool(value)
else:
return False
|
271d0a33b09b651a7705751a51aa7eab9cc76e55
| 34,700 |
def intoBinary(num):
"""
bin() function converts decimal to binary but does not return value in 8
bits. So we need to add the remaining zeros in order to make it 8 bits, for
which the 'switcher' dictionary is created. For instance if bin() returns
3 bits data, remaining 5 zeros are concatenated.
Input:
'num' as integer.
Returns:
Corresponding binary value of 'num'
"""
val = bin(num).replace('0b', "")
switcher = {
1:"0000000",
2:"000000",
3:"00000",
4:"0000",
5:"000",
6:"00",
7:"0",
8:""
}
#returns either number of zeros as per length or the value itself
if len(val) > 8:
final_value = val
else:
final_value = switcher.get(len(val), val)+val
print("Binary value of {}: ".format(num),final_value)
return final_value
|
200c13daa1d7a0bf9dd16015c2c9ac6e299869aa
| 34,702 |
import functools
import logging
def require_column(*colnames):
"""Wrapper to coordinate the segment-filtering functions.
Verify that the given columns are in the CopyNumArray the wrapped function
takes. Also log the number of rows in the array before and after filtration.
"""
if len(colnames) == 1:
msg = "'{}' filter requires column '{}'"
else:
msg = "'{}' filter requires columns " + \
", ".join(["'{}'"] * len(colnames))
def wrap(func):
@functools.wraps(func)
def wrapped_f(segarr):
filtname = func.__name__
if any(c not in segarr for c in colnames):
raise ValueError(msg.format(filtname, *colnames))
result = func(segarr)
logging.info("Filtered by '%s' from %d to %d rows",
filtname, len(segarr), len(result))
return result
return wrapped_f
return wrap
|
9f7cba8cb4fca0c7632a9a787d33d9b509573c42
| 34,712 |
import base64
def encode_from_bytes(data: bytes) -> str:
"""
Base64-encodes a sequence of bytes for transmission and storage.
:param data: The byte sequence to encode
:return: string representation of base64-encoded bytes
"""
data_encoded_bytes = base64.b64encode(data)
data_encoded_str = str(data_encoded_bytes, "utf-8")
return data_encoded_str
|
ddb35881394ec18be3832b1181abf0538f60146d
| 34,713 |
def digest_lines(digest_input):
"""
Read the lines of the Digest file output
"""
outlines = []
with open(digest_input, 'r') as infile:
for line in infile:
outlines.append(line)
return outlines
|
fe2627af2a15d51f399364bcfd0c0ef68e4973df
| 34,714 |
def removeprefix(self: str, prefix: str) -> str:
"""
Removes a prefix from a string.
Polyfills string.removeprefix(), which is introduced in Python 3.9+.
Ref https://www.python.org/dev/peps/pep-0616/#specification
"""
if self.startswith(prefix):
return self[len(prefix):]
else:
return self[:]
|
c26b99313e4350adf082be7c32a9e8773ba8101e
| 34,727 |
def asline(iterable, sep=' ', end='\n'):
"""Convert an iterable into a line."""
return sep.join(str(x) for x in iterable) + end
|
b3ce332d8f78089d4df191c06556f7558c48c096
| 34,733 |
def output_passes_filter(data, filter_from, filter_to):
"""
Check if the data passes the given filter.
:param data: The data tuple to check.
:param filter_to: Filter to only values starting from this value...
:param filter_from: ...Filter to only values ending with this value.
:return: True if the data passes the filter, False otherwise.
"""
if filter_from is None or filter_to is None:
return True
return data[1] == filter_from and data[2] == filter_to
|
b2bc203c6e56647240e1d6376a98feda3a8695e8
| 34,734 |
def format_percentage(val: float, suffix: str = ' %') -> str:
"""
Formats a percentage value (0.0 - 1.0) in the standardized way.
Returned value has a constant width and a trailing '%' sign.
Args:
val: Percentage value to be formatted.
suffix: String to be appended to the result.
Returns:
Formatted percentage value with a constant width and trailing '%' sign.
Examples:
>>> print(format_percentage(0.359))
str(' 36 %')
>>> print(format_percentage(1.1))
str('110 %')
"""
return f'{round(val * 100): >3d}{suffix}'
|
682fc3ea39f3de31ace9a72d80a982aea0fe63af
| 34,736 |
def hour_number(N, time):
"""
Takes the day number and time (in hours) and
converts to hour number.
Parameters
----------
N : integer
The day number
time : float
The time in hours (24-hour clock)
Returns
-------
hour : float
The hour number
"""
hour = N * 24 + time
return hour
|
058c6752fe531c0a5e3fd91cf23094facb6e5277
| 34,738 |
def form_field(field):
"""Render the given form field."""
return {'field': field}
|
02a580d99a3a8569d0bcc820013062359f95fd7c
| 34,740 |
from typing import List
def build_branches(program: List[dict],
branches_end_nodes: List[int]) -> List[List[int]]:
"""
Build branches (currently only 2 branches are possible) by iterating through
the program. Stop once all branches_end_nodes are reached.
Parameters
---
program (List[dict])
Functional program
branches_end_nodes (List[int])
Indices of the last nodes in branches before the merge into a single node
Result
---
List[List[int]]
List of branches (only 2) containing indices of nodes.
"""
# not really important since we know it's only 2, but in case
# this changes in the future
num_branches = len(branches_end_nodes)
branches = [[] for i in range(num_branches)]
for branch_idx, end_node_idx in enumerate(branches_end_nodes):
branches[branch_idx].append(end_node_idx)
inputs = program[end_node_idx]["inputs"]
# stop when we reach empty inputs (i.e. scene program)
while inputs:
# there shouldn't be anymore branches
assert len(inputs) == 1
prev_node = inputs[0]
# append current branch with previous node
branches[branch_idx].append(prev_node)
inputs = program[prev_node]["inputs"]
return branches
|
45983c9e0204acdc76dec8572b5f1a74cbc8147f
| 34,748 |
def listify(x):
"""Turn argument into a list.
This is a convenience function that allows strings
to be used as a shorthand for [string] in some arguments.
Returns None for None.
Returns a list for a list or tuple.
Returns [x] for anything else.
:param x: value to be listified.
"""
if x is None:
return None
elif isinstance(x, (list, tuple)):
return x
else:
return [x]
|
d295f85eb6a37fd869c493ffd5da2fdc54927bf4
| 34,749 |
def split_channels(data):
"""
Splits a stereo signal into two mono signals (left and right respectively).
Example:
>>> data_left, data_right = split_channels(data)
"""
if len(data[0]) == 2:
data_l = data[:, 0]
data_r = data[:, 1]
return data_l, data_r
else:
print("Signal should be stereo.")
return data
|
75561b8f4afa7aed727a536dcf0e60c31902f532
| 34,750 |
def fwhm_expr(model):
"""Return constraint expression for fwhm."""
fmt = "{factor:.7f}*{prefix:s}sigma"
return fmt.format(factor=model.fwhm_factor, prefix=model.prefix)
|
7c7b8872904b94c7ac6b67b87ecfbc197cec520a
| 34,751 |
import math
def acos(value):
"""Returns the arc cosinus in radians"""
return math.acos(value)
|
c3c793cb712d17a0da545300ffae7e521ec5ab64
| 34,753 |
def exists(array, previous_arrays):
"""Tests if the array has been seen before"""
for i in previous_arrays:
if array == i:
return True
return False
|
1329da3bacb6ff42e836efc4f056ea48fdfb4fc3
| 34,756 |
def seconds_to_str(seconds):
""" converts a number of seconds to hours, minutes, seconds.ms
"""
(hours, remainder) = divmod(seconds, 3600)
(minutes, seconds) = divmod(remainder, 60)
return "h{}m{}s{}".format(int(hours), int(minutes), float(seconds))
|
edaa063c1d5423c0404a41e83f1a1419891e685a
| 34,763 |
def normalize(img, mean, std):
"""
Normalize image with mean and standard deviation.
Parameters
----------
img : array(float)
The image to normalize
mean : float
The mean used for normalization.
std : float
The standard deviation used for normalization.
Returns
-------
array(float)
The normalized image.
"""
return (img - mean) / std
|
9d85497ef251a98d7630bcea694e6f12ba8ab608
| 34,765 |
def update_process(process, stock, time=1):
"""
Check if process ended
if so, adding it's output to the stock and setting busy to false.
"""
process.update(time)
if (process.done()):
stock.new(process)
process.end()
return process, stock
|
7897ab759cdb2b68239961e11c287f17a8c99687
| 34,769 |
def color_str_green(s):
"""Color string GREEN for writing to STDIN."""
return "\033[1m\033[92m{}\033[00m".format(s)
|
4d1a74d4f7b4af27e51076cf04d51031b007773a
| 34,771 |
def diffa(dist, alpha, r):
"""
Compute the derivative of local-local BDeu score.
"""
res = 0.0
for n in dist:
for i in range(n):
res += 1.0/(i*r+alpha)
for i in range(sum(dist)):
res -= 1.0/(i+alpha)
return res
|
9f5b14da7940eec4a91077b000770f55485cdc45
| 34,773 |
def _qr_R(qr):
"""Extract the R matrix from a QR decomposition"""
min_dim = min(qr.shape)
return qr[:min_dim + 1, :]
|
5d3270cf3b1430e81dc933cec72e9d38d91b1653
| 34,775 |
def get_action_key(action, category):
"""
Return a key for indexing an action.
"""
return (action, category)
|
bc4343e4a00913dd289c1df28602df4410a9b7e4
| 34,781 |
def recuperer_valeur_tag(elem,cle_valeur):
"""
Dans OSM, les attributs d'une relation sont dans les objets tag de l'objet XML de la relation.
Récupérer la valeur associée à la clé cle_valeur
:param elem:
:param cle_valeur:
:return:
"""
# Recherche de tous les éléments tag de l'objet elem
for item in elem.findall('tag'):
if item.get("k") == cle_valeur:
return item.get("v")
return None
|
5b600ce792aeda98b73de879685caa73ef3fd5e3
| 34,793 |
def convert_name(cs):
"""Convert the name of prototype to formal name
"""
def convert_single(_cs):
if isinstance(_cs, str): # convert string name
_cs = cs.lower()
if _cs[0] == "z":
return "zincblende"
elif _cs[0] == "w":
return "wurtzite"
elif _cs[0] in ("n", "r"):
return "rocksalt"
elif _cs[0] == "c":
return "cesiumchloride"
elif _cs[0] == "d":
return "diamond"
elif _cs[0] == "p":
return "perovskite"
else:
return "other"
else:
return ""
if isinstance(cs, str):
return ([convert_single(cs)])
elif isinstance(cs, list):
return tuple([convert_single(c) for c in cs])
|
196b0d95435a77c640bbb8e2392d0735f9fe63e6
| 34,794 |
import math
def largest_prime_factor_square_optimized(number):
"""
Every number n can at most have one prime factor greater than n.
If we, after dividing out some prime factor, calculate the square root of the remaining number
we can use that square root as upper limit for factor.
If factor exceeds this square root we know the remaining number is prime.
"""
factors = []
factor = 2
if number % factor == 0:
number = number // factor
factors.append(factor)
while number % factor == 0:
number = number // factor
factor = 3
max_factor = math.sqrt(number)
while number > 1 and factor <= max_factor:
if number % factor == 0:
factors.append(factor)
number = number // factor
while number % factor == 0:
number = number // factor
max_factor = math.sqrt(number)
factor += 2
return factors
|
5d55de7e9eca6c5e1a8e1a097db0390978496471
| 34,802 |
from unittest.mock import Mock
def get_response_mock(url):
"""Get a mock representing a response to a request.
:param url: response URL
:returns: an instance of mock representing a response
"""
response = Mock()
response.url = url
return response
|
4af8b016fc1c1f227b83ab33edff248eef833e61
| 34,808 |
import torch
def draw_samples_from(gen, device, N=128, rescale=False):
"""
Draws samples from the generator network.
If normalize is True, image pixels are rescaled to [0, 255].
"""
gen.eval()
with torch.no_grad():
noise = torch.randn(N, gen.z_dim, 1, 1, device=device)
image = gen(noise)
if rescale:
image += 1.0
image /= 2.0
image *= 255.
image = torch.clamp(image, 0., 255.).byte().cpu()
return image
|
07914208f99ffae22d6029ffca25edc76dc96ed4
| 34,810 |
def hexToRgb(hex):
"""
Converts hex colour codes eg. #FFF or #00FF0F to rgb array
Args:
hex (string): colour code # followed by 3 or 6 hexadecimal digits
Returns:
Array [r, g, b] each in the range of 0 - 255 inclusive
"""
# strip '#'
if hex[0] == "#":
hex = hex[1:]
if len(hex) == 3:
# Expand shorthand form (e.g. "03F") to full form (e.g. "0033FF")
return [int(hex[i] * 2, 16) for i in (0, 1, 2)]
return [int(hex[i : i + 2], 16) for i in (0, 2, 4)]
|
dbcbde5feda73b6c9a03f0758aa06be14731e86f
| 34,811 |
import re
def regex_prettifier(scraped_data, regex):
"""Prettify the scraped data using a regular expression
Positional Arguments:
scraped_data (list): data scraped from a website
regex (str): a regular expression
Return:
list: the regex modified data
"""
data_list = []
for data in scraped_data:
data_list.append(re.sub(regex, '', data))
return data_list
|
5eb42d0df2a0f93dbc14ec5fb5dd68bc6fe127ca
| 34,814 |
def alias(*aliases):
"""Decorator to add aliases for Cmdln.do_* command handlers.
Example:
class MyShell(cmdln.Cmdln):
@cmdln.alias("!", "sh")
def do_shell(self, argv):
#...implement 'shell' command
"""
def decorate(f):
if not hasattr(f, "aliases"):
f.aliases = []
f.aliases += aliases
return f
return decorate
|
ea94335cfeb1f1e4f02a67df39c834041df41fcf
| 34,817 |
def get_linear(from_interval, to_interval):
""" Get linear transformation that maps one interval to another
Parameters
----------
from_interval : ndarray, tuple or list
sequence of len=2 (llim, ulim) that defines the domain-interval
to_interval : ndarray, tuple or list
sequence of len=2 that defines the image-interval
Returns
-------
function
linear transformation
"""
# compute coeffs of the mapping
llim, ulim = from_interval
new_llim, new_ulim = to_interval
slope = (new_ulim - new_llim) / (ulim - llim)
intercept = new_llim - slope * llim
# define the map
def linear(x):
""" Transformation
"""
return slope * x + intercept
return linear
|
f9ffc4a9d76b9e177d86f730ea7cdabbc18a3b9e
| 34,819 |
import codecs
def is_known_encoding(encoding: str) -> bool:
"""
Return `True` if `encoding` is a known codec.
"""
try:
codecs.lookup(encoding)
except LookupError:
return False
return True
|
2914728aa14ec295fa647051141ed897875f153c
| 34,821 |
import re
def maxxmatches(regex, text, x):
"""Returns the substrings of length x from text, matching regex."""
reg = re.compile(regex)
result = reg.findall(text)
return list(filter(lambda string: len(string) <= x, result))
|
d361a828e941465f138188051a431eb3a81cc5a2
| 34,828 |
import torch
def boolean_mask(img, color):
"""
Returns a Boolean mask on a image, based on the presence of a color.
Arguments:
img {torch.Tensor} -- image tensor [shape = (..., 3)]
color {torch.Tensor} -- RGB color tensor [shape = (3, )]
Returns:
torch.BoolTensor -- boolean mask of image [shape = (..., )]
"""
dim = len(img.shape) - 1
return torch.all(img == color.view(*([1] * dim), 3), dim=dim)
|
39d7a75ac1a47574ebb333b1247813761986e636
| 34,832 |
import io
def _read_file(file_):
"""Reads a file, returns the stripped contents."""
with io.open(file_, "r", encoding="utf-8") as openfile:
return openfile.read().strip()
|
66d0dcf2454ea259ea46cb5295edd7c7b4ffec77
| 34,834 |
def clip(value_before_switch, value_after_switch, t_switch, t):
"""
logical function of time. Changes value at threshold time t_switch.
"""
if t <= t_switch:
return value_before_switch
else:
return value_after_switch
|
103a5aede1c1d0589e0acfc9ef058e011813f789
| 34,835 |
from functools import reduce
def join_bits(byteseq) -> int:
"""
Given a sequence of 0/1 or True/False bits altogether representing a
single byte, joins said bits into an int of the same magnitude
>>> join_bits([1, 1, 0, 1])
13
"""
return reduce(lambda acc, bit: (acc << 1) | int(bit), byteseq)
|
6cb925c4d5acc99e656802738565a957471af62f
| 34,836 |
def metersToInches(meters):
"""Convert meters to inches."""
return meters * 39.3701
|
27061202cb72e5a98be6230e491bef148f6dbd13
| 34,838 |
def isInteger(n, epsilon=1e-6):
"""
Returns True if n is integer within error epsilon
"""
return (n - int(n)) < epsilon
|
8ef0960cffadc063317830dca77d1177569ad178
| 34,840 |
def add_pkg_to_pkgs(pkg, pkgs):
"""Add package to dictionary of packages.
"""
name = pkg["Source"]
version = pkg["Version"]
pkgs[name][version] = pkg
return pkgs
|
296cfd39f56858c548171cddf464435a9832ae74
| 34,842 |
def getNodesByName(parent, name):
"""
Return a list of all of the child nodes matching a given local name
"""
childNodes = parent.xpath("*[local-name() = '%s']" % name)
return childNodes
|
f171c4642b3a129c5ccc092f26ee6d402451873f
| 34,846 |
import random
def random_bytes(n):
"""Return a random bytes object of length n."""
return bytes(random.getrandbits(8) for i in range(n))
|
61af296f8d2272b5100988b345942f21dc78603f
| 34,850 |
def get_serie_group(serie_change_bool):
"""
from boolean serie, make cumulative sum returning serie int
true, false, false, true, false
1, 1, 1, 2, 2
"""
return serie_change_bool.cumsum()
|
038195657fd33eb9a626344b335c7faefaa48a50
| 34,851 |
def parse_bool(section, optionname):
"""
Parses a string option as bool. Possible options are "True"/"False",
"yes"/"no", "1"/"0".
"""
string = section.dict[optionname]
if string.lower() == "true" or string.lower() == "yes":
return True
elif string.lower() == "false" or string.lower() == "no":
return False
elif string.isdigit():
return bool(int(string))
else:
raise ValueError("Option " + optionname + " in section " + section.name
+ " is not a valid boolean!")
|
a16c7eb9169c04bc6cf03309c0b7d7dbfbdd511c
| 34,855 |
def _create_image_path(image_path, image_id):
"""Generates path to a specific image.
Args:
image_path: String with path to the folder containing training images.
image_id: String representing name of the file.
Returns:
String with path to the specific image.
"""
return image_path + image_id
|
2d122666d4dbf1a8efae210dd022bf25af78df87
| 34,863 |
def set_type(values, new_type):
"""Convert string values to integers or floats if applicable. Otherwise, return strings.
If the string value has zero length, none is returned
Args:
values: A list of values
new_type: The type to coerce values to
Returns:
The input list of values modified to match their type. String is the default return value. If the values are
ints or floats, returns the list formatted as a list of ints or floats. Empty values will be replaced with none.
"""
if new_type == str:
coerced_values = [str(x) for x in values]
elif new_type == int or new_type == float:
float_values = [float(x) for x in values]
if new_type == int:
coerced_values = [int(round(x)) for x in float_values]
else:
coerced_values = float_values
else:
raise ValueError("{} not supported for coercing types".format(new_type.__name__))
return coerced_values
|
a1aa1cc74800a1add464e8ac124e0873a455f59a
| 34,866 |
def xyxy2xywh(box):
"""
Convert bounding box from xyxy to xywh format
:param box: array-like, contains (x1, y1, x2, y2)
"""
x1, y1, x2, y2 = box
w, h = x2 - x1, y2 - y1
x_c = x1 + w / 2
y_c = y1 + h / 2
return x_c, y_c, w, h
|
af8b5d4568dfc29a71164ccef58f15b9c06f695a
| 34,874 |
from typing import Any
from typing import Type
def ensure_namespace(obj: Any, name: str = 'orphan') -> Type:
"""Convert a ``dict`` to an object that provides ``getattr``.
Parameters
----------
obj : Any
An object, may be a ``dict``, or a regular namespace object.
name : str, optional
A name to use for the new namespace, if created. by default 'orphan'
Returns
-------
type
A namespace object. If ``obj`` is a ``dict``, creates a new ``type``
named ``name``, prepopulated with the key:value pairs from ``obj``.
Otherwise, if ``obj`` is not a ``dict``, will return the original
``obj``.
Raises
------
ValueError
If ``obj`` is a ``dict`` that contains keys that are not valid
`identifiers
<https://docs.python.org/3.3/reference/lexical_analysis.html#identifiers>`_.
"""
if isinstance(obj, dict):
bad_keys = [str(k) for k in obj.keys() if not str(k).isidentifier()]
if bad_keys:
raise ValueError(
f"dict contained invalid identifiers: {', '.join(bad_keys)}"
)
return type(name, (), obj)
return obj
|
ea83ba109520f2da68ea2b3d823c66fa5c2a6a82
| 34,875 |
import re
def fmtlog(txt):
"""
Reformat the text of the one-line log as LaTeX.
Arguments:
txt: string to reformat.
Returns:
A LaTeX formatted version of the input.
"""
# Replace TeX special characters in the whole text.
specials = ("_", "#", "%", r"\$", "{", "}")
for s in specials:
txt = re.sub(r"(?<!\\)" + s, "\\" + s, txt)
# Remove periods at the end of lines.
txt = re.sub(r"\.$", "", txt, flags=re.MULTILINE)
lines = txt.split("\n")
# Remove reference to HEAD
lines[0] = re.sub(r"\(.*\) ", "", lines[0])
# Use typewriter font for the commit id.
lines = [r"\texttt{" + re.sub(" ", r"} ", ln, count=1) for ln in lines if ln]
return "\\\\\n".join(lines)
|
fbd49446b027c58303edabd60f96978ce19b2c57
| 34,887 |
def login_sysadmin_superuser(self):
"""
Login as a sysadmin superuser.
"""
self.client.login(username='supersysadmin', password='supersysadmin')
return self
|
bd5c14ccb3e917a54897dc3ae1405d0f8739a600
| 34,888 |
def get_longest_orf(orfs):
"""Find longest ORF from the given list of ORFs."""
sorted_orf = sorted(orfs, key=lambda x: len(x['sequence']), reverse=True)[0]
return sorted_orf
|
de5ce7f112aa8b91e5b09c9d2fa63b1da8f3bfd5
| 34,891 |
def is_protected_variable(name: str) -> bool:
"""
Checks if variable has protected name pattern.
>>> is_protected_variable('_protected')
True
>>> is_protected_variable('__private')
False
>>> is_protected_variable('__magic__')
False
>>> is_protected_variable('common_variable')
False
"""
return name.startswith('_') and not name.startswith('__')
|
b484222b655ce8676f4b26b3037dba2041cba84d
| 34,892 |
def truncate(ys, n):
"""Trims a wave array to the given length.
ys: wave array
n: integer length
returns: wave array
"""
return ys[:n]
|
8b632ce326fa25875645fa1bab80d59341183a53
| 34,895 |
def split_wo(s):
"""Remove -seg from WO"""
return s.str.split('-', expand=True)[0]
|
6c9edaf131f38be823fc76e1c9963086705f42d4
| 34,897 |
def quote_plus(s):
"""
Convert some URL elements to be HTTP-safe.
Not the same as in urllib, because, for instance, parentheses and commas
are passed through.
Parameters
----------
s: input URL/portion
Returns
-------
corrected URL
"""
s = s.replace('/', '%2F')
s = s.replace(' ', '%20')
return s
|
b4b2c7a5cb43bcb9a58614257729d3c541e83e2a
| 34,903 |
def get_activation_details(name, layer_type, layer, keyword_arguments):
"""
Creates the layer details data for the activation function
"""
return {
'layer_details': None,
'name': name,
'type': layer_type,
'layer': layer,
"keyword_arguments": keyword_arguments
}
|
9c6bf8f1faa5c2b752e70d3a3d9c3950fe3270b0
| 34,904 |
def getGuessedWord(secretWord: str, lettersGuessed: list) -> str:
"""
secretWord: the word the user is guessing
lettersGuessed: letters that have been guessed so far
returns: string, comprised of letters and underscores that
represents what letters in secretWord have been guessed so far.
"""
return ' '.join(letter if letter in lettersGuessed else '_'
for letter in secretWord)
|
df5214524af174d435eb496c87b3d03ab3170de7
| 34,908 |
def is_palindromic_number(numb: int) -> bool:
"""
Returns whether on not numb
is a palindromic number
https://oeis.org/A002113
"""
return numb == int(str(numb)[::-1])
|
474792d46b6ba2267f64987adf84da2156184561
| 34,910 |
def GetAtomicWeightsForModel(probeMol, fpFunction, predictionFunction):
"""
Calculates the atomic weights for the probe molecule based on
a fingerprint function and the prediction function of a ML model.
Parameters:
probeMol -- the probe molecule
fpFunction -- the fingerprint function
predictionFunction -- the prediction function of the ML model
"""
if hasattr(probeMol, '_fpInfo'):
delattr(probeMol, '_fpInfo')
probeFP = fpFunction(probeMol, -1)
baseProba = predictionFunction(probeFP)
# loop over atoms
weights = []
for atomId in range(probeMol.GetNumAtoms()):
newFP = fpFunction(probeMol, atomId)
newProba = predictionFunction(newFP)
weights.append(baseProba - newProba)
if hasattr(probeMol, '_fpInfo'):
delattr(probeMol, '_fpInfo')
return weights
|
e71976c0969b26d2f514968f11aea6f603b2c559
| 34,911 |
def reverse_builtin(value):
"""Reverse string using the "reversed" function."""
return "".join(reversed(value))
|
7703f4b51db4d4cd73224dc791321b695d475f55
| 34,915 |
def calc_min_vms_for_availability(zk_servers, bk_servers, ss_servers, cc_servers):
"""
This method assumes as input a number of instances per service to tolerate a given number of failures. With this,
we calculate the number of VMs to respect the same failure tolerance, which translates into the maximum number of
instances of any type. This is because having fewer VMs yield that a single VM failure would induce multiple
failures in the Pravega services, making the failure tolerance guarantees ineffective.
:param zk_servers: Number of Zookeeper instances.
:param bk_servers: Number of Bookkeeper instances.
:param ss_servers: Number of Segment Stores.
:param cc_servers: Number of Controllers.
:return: Minimum number of VMs to satisfy the failure tolerance requirements.
"""
return max(zk_servers, bk_servers, ss_servers, cc_servers)
|
6d2e155d246c50e303c31378ccd5cf534f2d6aa9
| 34,916 |
from typing import Dict
from typing import Optional
from typing import Set
def remove_zero_statistics(
statistics: Dict[str, Dict[str, int]],
force_keep: Optional[Set[str]] = None,
) -> Dict[str, Dict[str, int]]:
"""
Any module that has zero for all available statistics is removed from the
set of statistics. This can help declutter the reporting of statistics
if many submodules have zero statistics.
Args:
statistics (dict(str, dict(str, int))) : the statistics to
remove zeros from. Organized as a dictionary over modules,
which are each a dictionary over statistic types.
force_keep (set(str) or None) : a set of modules to always keep, even
if they are all zero.
Returns:
dict(str, dict(str, int)) : the input statistics dictionary,
with submodules removed if they have zero for all statistics.
"""
out_stats = {}
if force_keep is None:
force_keep = set()
for mod, stats in statistics.items():
if not all(val == 0 for val in stats.values()) or mod in force_keep:
out_stats[mod] = stats.copy()
return out_stats
|
45d8cb863ffa6d97d6ed5fe6b8afe9a8049ac8a8
| 34,917 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.