content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def single2float(x):
"""Convert a single x to a rounded floating point number
that has the same number of decimals as the original"""
dPlaces = len(str(x).split('.')[1])
y = round(float(x),dPlaces+2)
# NOTE: Alternative method:
#y = round(float(x),6)
return y
|
4501bd8ebc17102ecbc82f4a26615adc3ef4639f
| 561,721 |
import hashlib
def hash_object(*objects, hasher=None):
"""Hash the given object(s) and return the hashlib hasher instance
If no hasher argument is provided, then automatically created
a hashlib.md5()
"""
if hasher is None:
hasher = hashlib.md5()
for obj in objects:
if isinstance(obj, dict):
for key, value in obj.items():
hash_object(key, hasher=hasher)
hash_object(value, hasher=hasher)
continue
elif isinstance(obj, (list,tuple)):
for e in obj:
hash_object(e, hasher=hasher)
continue
elif isinstance(obj, bytes):
hasher.update(obj)
continue
elif isinstance(obj, str):
hasher.update(obj.encode('utf-8'))
continue
else:
hasher.update(f'{obj}'.encode('utf-8'))
return hasher
|
7e072d27d625f47dde75f5119a5bb1f8e57202fb
| 640,991 |
def find_overlap(bta, btb, r=0.01):
"""
Overlaps two pbt.BedTool objects based on reciprocal overlap (r)
Matches on CNV type (assumes the fifth column = CNV type)
Returns: two lists of interval IDs of the overlapping set
"""
ibt = bta.cut(range(5)).intersect(btb.cut(range(5)), f=r, r=True, wa=True, wb=True).\
filter(lambda x: x[4] == x[9]).saveas()
ids_a = [x[3] for x in ibt]
ids_b = [x[8] for x in ibt]
return ids_a, ids_b
|
7e76f5a90266079f5f4c03d5106af810e8d9c19b
| 411,790 |
import ast
def get_functions(_ast):
"""
Gets all function definitions immediately below an ast node
@param _ast the ast node to search for functions in
@return a tuple of function definition ast nodes
"""
return (node for node in _ast.body if isinstance(node, ast.FunctionDef))
|
8d9816b9ea6c20090e72e833644c69dbf982d436
| 646,166 |
def extract_doi(doi):
"""Ensure that 'doi' is a single string
Occasionally, INSPIRE returns a list of identical DOIs. This just extracts the
first element if it is such a list, or returns the input otherwise.
"""
if isinstance(doi, list) and len(doi)>0:
return doi[0]
return doi
|
cd799512101792c3506ab70b3d29b033e1f1429e
| 528,275 |
def format_line(line):
"""
Format a line of Matlab into either a markdown line or a code line.
Parameters
----------
line : str
The line of code to be formatted. Formatting occurs according to the
following rules:
- If the line starts with (at least) two %% signs, a new cell will be
started.
- If the line doesn't start with a '%' sign, it is assumed to be legit
matlab code. We will continue to add to the same cell until reaching
the next comment line
"""
if line.startswith('%%'):
md = True
new_cell = True
source = line.split('%%')[1] + '\n' # line-breaks in md require a line
# gap!
elif line.startswith('%'):
md = True
new_cell = False
source = line.split('%')[1] + '\n'
else:
md = False
new_cell = False
source = line
return new_cell, md, source
|
2ef8ea7d1c5aaff0a3fc11d7013c2a0722617f97
| 114,958 |
def DuplicateStyleDict(style_dict):
"""Duplicates the style dictionary to make a true copy of
it, as simply assigning the dictionary to two different variables
only copies a reference leaving both variables pointing to the
same object.
@param style_dict: dictionary of tags->StyleItems
@return: a copy of the given styleitem dictionary
"""
new_dict = dict()
for tag in style_dict:
new_dict[tag] = style_dict[tag].Clone()
return new_dict
|
3d7a53c9b3490d21355daaaa88c0ae2221526887
| 434,604 |
def MockMethod(*_args, **_kwargs):
""" Absorbs all arguments, does nothing, returns None.
"""
return None
|
366a14e6964c06e6125fd877c4fc61e95ef404d4
| 512,520 |
import uuid
def is_convertible_to_UUID(value, version=4):
"""Returns True if value is convertible to UUIDs
UUID v.4 General Format:
- xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx
- where x is a hex digit and y is one of (8, 9, A, B)
Using python uuid.UUID and as so,
Supporting these forms:
ca761232ed4211cebacd00aa0057b223
CA761232-ED42-11CE-BACD-00AA0057B223
{CA761232-ED42-11CE-BACD-00AA0057B223}
Not supporting these forms
(CA761232-ED42-11CE-BACD-00AA0057B223)
{0xCA761232, 0xED42, 0x11CE, {0xBA, 0xCD, 0x00, 0xAA, *LINER BREAK*
0x00, 0x57, 0xB2, 0x23}}
Args:
value (str): Value to attempt to convert to an UUID
version (int): UUID version. Default is UUIDv4
Returns:
bool: True if deemed valid UUID
"""
try:
# 1. Check if can be converted to a UUID
uuid.UUID(value, version=version)
except (ValueError, AttributeError):
# 2. Return false if cannot be converted to UUID
return False
else:
# 3. Return true if can be converted to UUID
return True
|
1a42ee06cdddeae977c0a6bdc4aa87a5c02f8b72
| 362,016 |
import re
def strip_numeric(s):
"""
Remove digits from `s` using RE_NUMERIC.
"""
RE_NUMERIC = re.compile(r"[0-9]+", re.UNICODE)
return RE_NUMERIC.sub("", s)
|
095f1adf9ad31f5500d4c2e1a79c31147eb879f4
| 475,570 |
def reformat_timezone_offset(in_date_string):
"""
Reformats the datetime string to get rid of the colon in the timezone offset
:param in_date_string: The datetime string (str)
:return: The reformatted string (str)
"""
out_data = in_date_string
if ":" == in_date_string[-3:-2]:
out_data = out_data[:-3] + out_data[-2:]
return out_data
|
279fbf00ff51f0926f3d284faee66a43327298e4
| 32,006 |
def timedur(x):
"""
Print consistent string format of seconds passed.
Example: 300 = '5 mins'
Example: 86400 = '1 day'
Example: 86705 = '1 day, 5 mins, 5 sec'
"""
divs = [('days', 86400), ('hours', 3600), ('mins', 60)]
x = float(x)
res = []
for lbl, sec in divs:
if(x >= sec):
rm, x = divmod(x, float(sec))
# If exactly 1, remove plural of label
if(rm == 1.0):
res.append((lbl[:-1], int(rm)))
else:
res.append((lbl, int(rm)))
# anything left over is seconds
x = int(x)
if(x == 1):
res.append(("second", x))
elif(x == 0):
pass
else:
res.append(("seconds", x))
return ", ".join(["%d %s" % (w[1], w[0]) for w in res])
|
33ba00c3354174a49b91addd4243f22859433b35
| 239,360 |
def adder(x: int | float, y: int | float) -> int | float:
"""
Simple function that adds two numbers (int or float)
:param x: first number to add (int or float)
:param y: second number to add (int or float)
:return: result of addition (int or float)
"""
return x + y
|
5aa722eec8a71cfd8d62d81ded6aceb3662eb65d
| 329,397 |
import torch
def get_spixel_prob(spixel_x, spixel_y):
"""
generate soft assignment matrix via probability vector in x and y direction
Args:
spixel_x: torch.tensor
A trainable tensor indicating the probability that pixels belong
to superpxiels in x direction
spixel_y: torch.tensor
A trainable tensor indicating the probability that pixels belong
to superpxiels in y direction
Return:
spixel_prob: torch.tensor
A trainable tensor indicating the probability that pixels belong
to superpixels
"""
b, _, h, w = spixel_x.shape
return torch.einsum("bxhw,byhw->bxyhw", spixel_x,
spixel_y).view(b, -1, h, w).contiguous()
|
214916083bc2bbdc153f082be648623e6609f238
| 141,974 |
def expand_unknown_vocab(line, vocab):
"""
Treat the words that are in the "line" but not in the "vocab" as unknows,
and expand the characters in those words as individual words.
For example, the word "Spoon" is in "line" but not in "vocab", it will be
expanded as "<S> <p> <o> <o> <n>".
"""
ts = set(line.split(' '))
unknowns = ts
unknowns -= set(vocab)
unknowns -= set([''])
for t in unknowns:
spaced = ''.join([f"<{c}> " for c in t])
line = line.replace(t, spaced)
return line
|
9c20267e7bf6a31f7f21411ec1db701e088268a1
| 636,625 |
def sources_list(sources, params):
"""
Adds defined list of sources to params
Parameters
----------
sources : list
Payment sources
params : dict
Default params
Returns
-------
dict
params with sources
"""
if isinstance(sources, list):
for source in sources:
params['sources[{0}]'.format(sources.index(source))] = source
else:
raise TypeError('You should use list Type for sources')
return params
|
7d056479fcc2144b1b886d60f5eca749c7c0303a
| 361,793 |
def docutils_node_to_jinja(list_item, only_pages=False, numbered=False):
"""Convert a docutils node to a structure that can be read by Jinja.
Parameters
----------
list_item : docutils list_item node
A parent item, potentially with children, corresponding to the level
of a TocTree.
only_pages : bool
Only include items for full pages in the output dictionary. Exclude
anchor links (TOC items with a URL that starts with #)
numbered: bool
Whether to add section number to title
Returns
-------
nav : dict
The TocTree, converted into a dictionary with key/values that work
within Jinja.
"""
if not list_item.children:
return None
# We assume this structure of a list item:
# <list_item>
# <compact_paragraph >
# <reference> <-- the thing we want
reference = list_item.children[0].children[0]
title = reference.astext()
url = reference.attributes["refuri"]
active = "current" in list_item.attributes["classes"]
secnumber = reference.attributes.get("secnumber", None)
if numbered and secnumber is not None:
secnumber = ".".join(str(n) for n in secnumber)
title = f"{secnumber}. {title}"
# If we've got an anchor link, skip it if we wish
if only_pages and '#' in url:
return None
# Converting the docutils attributes into jinja-friendly objects
nav = {}
nav["title"] = title
nav["url"] = url
nav["active"] = active
# Recursively convert children as well
# If there are sub-pages for this list_item, there should be two children:
# a paragraph, and a bullet_list.
nav["children"] = []
if len(list_item.children) > 1:
# The `.children` of the bullet_list has the nodes of the sub-pages.
subpage_list = list_item.children[1].children
for sub_page in subpage_list:
child_nav = docutils_node_to_jinja(sub_page, only_pages=only_pages,
numbered=numbered)
if child_nav is not None:
nav["children"].append(child_nav)
return nav
|
487ddc50fd69fb39a37d3bea48acf7221f81c0b8
| 407,246 |
def is_weekend_worked(x, wkend_type, i):
"""
Determine if i'th weekend is worked (full or half) for a given
weekend pattern x and weekend type,
:param x: list of 2-tuples representing weekend days worked. Each list
element is one week. The tuple of binary values represent the
first and second day of the weekend for that week. A 1 means
the day is worked, a 0 means it is off.
:param wkend_type: 1 --> weekend consists of Saturday and Sunday
2 --> weekend consists of Friday and Saturday
:type wkend_type: int
:param i: which weekend to check
:type i: int
:return: True if weekend i is worked, False otherwise
Example:
b = is_weekend_worked([(0,1),(1,0),(0,1),(1,0)],1,1)
# b = True
b = is_weekend_worked([(0,1),(1,0),(0,1),(1,0)],1,2)
# b = False
b = is_weekend_worked([(1,0),(1,0),(1,1),(1,0)],2,1)
# b = True
b = is_weekend_worked([(0,1),(1,0),(0,1),(0,0)],2,4)
# b = False
"""
if wkend_type == 2:
if sum(x[i - 1]):
return True
else:
return False
else:
n_weeks = len(x)
if i < n_weeks:
sunday_idx = i
else:
sunday_idx = 0
if (x[i - 1][1] + x[sunday_idx][0]) > 0:
return True
else:
return False
|
e93069b0ebffdc32f1a5d415508e1f19c9f5a9db
| 353,351 |
from typing import List
import string
def format_int_alpha(value: int) -> str:
"""Format a number as lowercase letters a-z, aa-zz, etc."""
assert value > 0
result: List[str] = []
while value != 0:
value, remainder = divmod(value - 1, len(string.ascii_lowercase))
result.append(string.ascii_lowercase[remainder])
result.reverse()
return "".join(result)
|
be83a2d76aa7f983a8603b4dfd46e46a5d05df5c
| 149,983 |
def LowerCamelCase(upperCamelCaseStr):
"""
Return the lowerCamelCase variant of an upper camel case string.
"""
return upperCamelCaseStr[0].lower() + upperCamelCaseStr[1:]
|
321e22b96984a241f5ad79ecd7297f58792bb384
| 61,648 |
def _GetDepsLine(path, js_source):
"""Get a deps.js file string for a source."""
provides = list(js_source.provides)
provides.sort()
requires = list(js_source.requires)
requires.sort()
return 'goog.addDependency(\'%s\', %s, %s);\n' % (path, provides, requires)
|
e4f5cec4d8e6e57bf0d825fc6eea9cb8cc7b940c
| 370,550 |
def rename_ending(file: str, condition: str, ending: str)-> str:
"""
conditional renaming of file name ending
:param file: str file name
:param condition: str file name ending to be compared with
:param ending: str new ending
:return: str
"""
file_name = file.rsplit(".", 1)[0]
return file_name + ending if file.endswith(condition) else file_name + condition
|
ffdb1eb96b35ddcd1d40991bedbb53dcdb211f3a
| 285,487 |
def _prepare_params(params):
"""Convert lists to strings with ',' between items."""
for (key, value) in params.items():
if isinstance(value, list):
params[key] = ','.join([item for item in value])
return params
|
ab012f730e4596618da14986184eee3f3085a27f
| 311,440 |
def get_vad_out_from_rttm_line(rttm_line):
"""
Extract VAD timestamp from the given RTTM lines.
"""
vad_out = rttm_line.strip().split()
if len(vad_out) > 3:
start, dur, _ = float(vad_out[3]), float(vad_out[4]), vad_out[7]
else:
start, dur, _ = float(vad_out[0]), float(vad_out[1]), vad_out[2]
start, dur = float("{:}".format(start)), float("{:}".format(dur))
return start, dur
|
08cc36d23b916411878141824734143a604cf9af
| 451,212 |
def range_str(range_str, sort=True):
"""Generate range of ints from a formatted string,
then convert range from int to str
Example:
>>> range_str('1-4,6,9-11')
['1','2','3','4','6','9','10','11']
Takes a range in form of "a-b" and returns
a list of numbers between a and b inclusive.
Also accepts comma separated ranges like "a-b,c-d,f" which will
return a list with numbers from a to b, c to d, and f.
Parameters
----------
range_str : str
of form 'a-b,c'
where a hyphen indicates a range
and a comma separates ranges or single numbers
sort : bool
If True, sort output before returning. Default is True.
Returns
-------
list_range : list
of int, produced by parsing range_str
"""
# adapted from
# http://code.activestate.com/recipes/577279-generate-list-of-numbers-from-hyphenated-and-comma/
s = "".join(range_str.split()) # removes white space
list_range = []
for substr in range_str.split(','):
subrange = substr.split('-')
if len(subrange) not in [1, 2]:
raise SyntaxError("unable to parse range {} in labelset {}."
.format(subrange, substr))
list_range.extend(
[int(subrange[0])]
) if len(subrange) == 1 else list_range.extend(
range(int(subrange[0]), int(subrange[1]) + 1))
if sort:
list_range.sort()
return [str(list_int) for list_int in list_range]
|
7648683ec79ad93dcddd98378ab40f2dffb1f6e8
| 494,471 |
def isprimer(n: int) -> bool:
"""Is n prime?
>>> isprimer(2)
True
>>> tuple( isprimer(x) for x in range(3,11) )
(True, False, True, False, True, False, False, False)
"""
def isprime(k: int, coprime: int) -> bool:
"""Is k relatively prime to the value coprime?"""
if k < coprime*coprime:
return True
if k % coprime == 0:
return False
return isprime(k, coprime+2)
if n < 2:
return False
if n == 2:
return True
if n % 2 == 0:
return False
return isprime(n, 3)
|
33fccf076604601bd40298f555180ee5388b00af
| 69,618 |
def gmres_params(n_krylov=40, max_restarts=20, tol_coef=0.01):
"""
Bundles parameters for the GMRES linear solver. These control the
expense of finding the left and right environment Hamiltonians.
PARAMETERS
----------
n_krylov (int): Size of the Krylov subspace.
max_restarts (int): Maximum number of times to iterate the Krylov
space construction.
tol_coef (float): This number times the MPS gradient will set the
convergence threshold of the linear solve.
RETURNS
-------
A dictionary storing each of these parameters.
"""
return {"solver": "gmres", "n_krylov": n_krylov,
"max_restarts": max_restarts, "tol_coef": tol_coef}
|
4eabe3f6595141b65e5097c449600089cad099b1
| 370,557 |
def gardner(vp, alpha=310, beta=0.25):
"""
Compute bulk density (in kg/m^3) from vp (in m/s).
"""
return alpha * vp ** beta
|
12c4ca35171b7091dac466a782b438e341fc3d71
| 623,875 |
def escape_sql_id(string):
"""Escape string into a valid SQL identifier."""
return '"' + string.replace('"', '""') + '"'
|
8764d87683da1a5f40a835227ab29cca756fa228
| 246,812 |
def is_overlapping(segment_time, previous_segments):
"""
Checks if the time of a segment overlaps with the times of existing segments.
Arguments:
segment_time -- a tuple of (segment_start, segment_end) for the new segment
previous_segments -- a list of tuples of (segment_start, segment_end) for the existing segments
Returns:
True if the time segment overlaps with any of the existing segments, False otherwise
"""
segment_start, segment_end = segment_time
# Step 1: Initialize overlap as a "False" flag. (≈ 1 line)
overlap = False
# Step 2: loop over the previous_segments start and end times.
# Compare start/end times and set the flag to True if there is an overlap (≈ 3 lines)
for previous_start, previous_end in previous_segments:
if segment_start <= previous_end and segment_end >= previous_start:
overlap = True
return overlap
|
27b817a76829eb7eba63d3fd22376e4164a7bf39
| 694,786 |
import csv
def loadFlags(file):
"""Takes in a filename/path and reads the data stored in the file. If it is
a single column of data returns a 1D list, if it is multiple columns of
data returns a 2D list. Note the first line is skipped as it assumes these
are column labels.
"""
with open(file) as fstrm:
data = csv.reader(fstrm)
vals = list(data)
if all([len(val)==1 for val in vals]):
data = [val[0] for val in vals[1:]]
else:
data = vals[1:]
return data
|
cfd48269ed94b47dfd2c12e6a7f66f8106253f15
| 694,779 |
import logging
def _check_api_error_message(json_data):
"""
Returns an error if one occurred and an empty string if the response
was successful
json_data -- the data returned from the UW API query
"""
#Ensure the response was received
status_code = json_data['meta']['status']
logging.info("Status code from UW API response is: {}".format(status_code))
#Notify if there are errors
if status_code is not 200:
error_message = json_data['meta']['message']
return error_message
return ""
|
3c0c274a599201683d35b48e04b2b27abcdc056f
| 532,236 |
def unflatten_like(vector, likeTensorList):
"""
Takes a flat torch.tensor and unflattens it to a list of torch.tensors
shaped like likeTensorList
Arguments:
vector (torch.tensor): flat one dimensional tensor
likeTensorList (list or iterable): list of tensors with same number of ele-
ments as vector
"""
outList = []
i = 0
for tensor in likeTensorList:
n = tensor.numel()
outList.append(vector[i : i + n].view(tensor.shape))
i += n
return outList
|
ae121ac963df5352d346fb59f8c9560b919e2bb4
| 573,280 |
def read_file(location, chomp=True):
"""
Read the specified file and return the contents. Optionally
the file content could be subjected to a chomp operation
before returning.
Args:
location Location of the file that needs to be read
chomp True if the file content needs to be chomped
prior to returning. This is an optional parameter
and defaults to True.
Raises:
IOError If the specified file does not exist
"""
with open(location) as file_handle:
contents = file_handle.read()
if chomp:
return contents.rstrip('\n')
else:
return contents
|
b736b8bf19ac30608a9f9105a1a2fe49a4a74c74
| 146,172 |
def send_onboarding_message(self, user_email: str) -> dict:
"""
Sends the onboarding message to a user.
:param user_email: The email of the user to be on-boarded.
:type user_email: str
:return: The response from the message request as a dict.
:rtype: dict
"""
user = self.get_user_by_email(email=user_email)["user"]["id"]
response = self.bot_client.conversations_open(users=[user])
if not response["ok"]:
return response
channel = response["channel"]["id"]
message = self.messenger.get_welcome_block(channel=channel)
return self._send_block_message(message=message)
|
a09790df550d68df209220746b48dc7d370f179f
| 305,163 |
def valid_att_in_field(arch):
"""A `name` attribute must be in a `field` node."""
return not arch.xpath('//field[not (@name)]')
|
e61d61bf4e6f38d256d50786bd74b7b772b2ca5d
| 211,143 |
def toklist_print(toklist):
"""
Helper function to print token lists with proper whitespace.
"""
out = []
for tok in toklist:
if not tok:
continue
if tok.prev_white:
out.append(" ")
out.append(str(tok))
return ''.join(out)
|
24c859be10229da0bb0c39b03e79a86f63089072
| 419,118 |
def _convert_1hot_to_str(one_hot):
"""
Convert a one-hot class label vector, shape (1, num_classes), to a string.
Returns:
one_hot_string
"""
one_hot_string = one_hot.tostring()
return one_hot_string
|
dc92d73caa79bd81a2ca14a640b3fe5e6a852da7
| 474,535 |
import ast
def matches_attr(node, name):
"""Determines if the ``ast.Call`` node points to an ``ast.Attribute`` node with a matching name.
Args:
node (ast.Call): a node that represents a function call. For more,
see https://docs.python.org/3/library/ast.html#abstract-grammar.
name (str): the function name.
Returns:
bool: if ``node.func`` is an ``ast.Attribute`` node with a matching name.
"""
return isinstance(node.func, ast.Attribute) and node.func.attr == name
|
fe76ee845fabc965ba15475c2a6c4098cbeae221
| 630,078 |
from typing import TextIO
def read_until(f: TextIO, char: str) -> str:
"""
Returns the text from the current position in the file up to and including the given
char. If we hit the end of the file before finding our character, a ValueError is
raised.
"""
buf = ""
cur = ""
while cur != char:
cur = f.read(1)
buf += cur
if cur == "":
raise ValueError(f"Hit end of file before finding '{char}'")
return buf
|
9750131df37c936d35403fb6c984435d870182e9
| 610,820 |
from typing import Tuple
def to_isometric(position: Tuple[int, int], tile_size: int = 64) -> Tuple[int, int]:
"""Take screen (X,Y) coordinates and translate them into Isometric space
Args:
position: (Tuple[int, int]) - position in top-down cartesian space
tile_size: (int) - size (length & width) of square world tile in cartesian space
Returns
Tuple[int, int] - position of the given point in isometric space
"""
x, y = position
return round((x - y) * tile_size), round((x + y) * tile_size * 0.5)
|
cf9b2b562ec29b42fd349c154df7cf5c790e16ce
| 517,757 |
def six_hump_camelback(x):
"""
Six-hump Camelback function (2-D).
Global Optima: -1.031628453489877
at (-0.08983, 0.7126) and (0.08983, -0.7126).
Parameters
----------
x : array
2 x-values. `len(x)=2`.
`x[i]` bound to [-5, 5] for i=1 and 2.
Returns
-------
float
Value of Six-hump Camelback function.
"""
x1 = x[0]
x2 = x[1]
f = (4 - 2.1*x1**2 + x1**4/3)*x1**2 + x1*x2 + (-4 + 4*x2**2)*x2**2
return f
|
17e63651b686e9505e72774eea6beddb508d72f6
| 154,479 |
def valid_conversion(val, type_to_convert):
"""
Checking whether it is possible to convert val to the specified type
:param val: value
:param type_to_convert: type
:return: boolean
"""
if isinstance(type_to_convert, type):
try:
type_to_convert(val)
res = True
except ValueError:
res = False
else:
raise TypeError
return res
|
6dddf95d633c55b63e1ed96a7efe3e8a7c108045
| 693,184 |
def get_number_of_verb(sentence_token):
"""Return number of verbs in sentence
Args:
sentence_token (tuple): contains length of sentence and list of all the token in sentence
Returns:
int: number of verb in sentence
"""
number_of_verb = 0
for word in sentence_token[1]:
if word.pos == 'VERB':
number_of_verb += 1
return number_of_verb
|
7f9d404e204612ef8a5379566bde802997fc30ef
| 641,055 |
from typing import Any
import importlib
def dynamic_import_from(source_file: str, class_name: str) -> Any:
"""Do a from source_file import class_name dynamically
Args:
source_file (str): Where to import from
class_name (str): What to import
Returns:
Any: The class to be imported
"""
module = importlib.import_module(source_file)
return getattr(module, class_name)
|
90c861f727c8e6f20f89b7af24c2163bc65bd516
| 697,319 |
def _dechunk(raw):
"""
Given a BLE advertisement in hex format, interpret the first
byte as a length byte, return the data indicated by the length
byte, and the remainder of the data in a tuple.
The lenght byte itself is not included in the length.
If the length indicated is longer than the data, raise a ValueError
"""
if len(raw) < 2:
raise ValueError("Data too short")
dlen = int(raw[:2], 16)
if (dlen + 1) * 2 > len(raw):
raise ValueError("Cannot read %d bytes, data too short: %s" % (dlen, raw))
return raw[2:(dlen * 2) + 2], raw[(dlen * 2) + 2:]
|
f04c8c91895a8c8b7b9a3b46461082531cf705c2
| 462,048 |
def add_sub_idx(i1: int, change: int):
"""
Adds or subtracts an index from another one and skips 0 if encountered.
:param i1: The start index
:param change: The change (+ or -)
:return: The new index
>>> add_sub_idx(-10, 10)
1
>>> add_sub_idx(10, -10)
-1
>>> add_sub_idx(-5, 10)
6
"""
return i1 + change + (1 if i1 < 0 < i1+change+1 else -1 if i1+change-1 < 0 < i1 else 0)
|
942db377efb13da01e381ea76833b207e7ec5e0a
| 378,303 |
def unindent(text, skip1=False):
"""Remove leading spaces that are present in all lines of ``text``.
Parameters
----------
test : str
The text from which leading spaces should be removed.
skip1 : bool
Ignore the first line when determining number of spaces to unindent,
and remove all leading whitespaces from it.
"""
# count leading whitespaces
lines = text.splitlines()
ws_lead = []
for line in lines[skip1:]:
len_stripped = len(line.lstrip(' '))
if len_stripped:
ws_lead.append(len(line) - len_stripped)
if len(ws_lead) > skip1:
rm = min(ws_lead)
if rm:
if skip1:
lines[0] = ' ' * rm + lines[0].lstrip()
text = '\n'.join(line[rm:] for line in lines)
return text
|
c787f5176b7b38ab5e6caec5175c4de3bbf1bbf5
| 40,529 |
def count_from_0(index, collection):
"""Numbering function: consecutive integers starting at 0."""
return index
|
bc01bc8e46e913c325e4972b3bd71cd4d2543237
| 653,403 |
def get_cell_name(row):
"""Build cell name from FoV name and cell id."""
cell_name = "{0}_{1}".format(row["fov_name"], int(row["cell_id"]))
return cell_name
|
be051ecf9e07c14227b696aceef2a7942f98bb38
| 335,748 |
import re
def split_into_words(in_str:str) -> list:
"""
Helper function splitting the given string into a list of words. Hyphens are considered to separate words.
:param in_str: The string to split into words
:return: A list of words in the given string
"""
in_str = re.sub('[/ \-\t\n]+', ' ', in_str).strip().lower()
in_str = ''.join([c for c in in_str if c.isalpha() or c == ' '])
return [word.strip() for word in in_str.strip().split(' ') if len(word) > 0]
|
4248f79a7ccf398154cc8b8a0eda9517e2dd9255
| 344,240 |
import torch
def channel_shuffle(x,
groups):
"""Channel Shuffle operation from ShuffleNet [arxiv: 1707.01083]
Arguments:
x (Tensor): tensor to shuffle.
groups (int): groups to be split
"""
batch, channels, height, width = x.size()
channels_per_group = channels // groups
x = x.view(batch, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
|
0a89ee012784328294dedfc11975f06558eb45d2
| 388,871 |
def deep_update(cfg, other):
"""deep_update recursively mutates `cfg`, copying items from `other`,
recursing when both values are dictionaries and overriding otherwise"""
for key in other.keys():
if (
isinstance(other[key], dict)
and key in cfg
and isinstance(cfg[key], dict)
and not key.endswith("_map")
):
deep_update(cfg[key], other[key])
else:
cfg[key] = other[key]
return cfg
|
2665d0174a03d74b3933e15746c9b320f8c44243
| 578,175 |
def valueclass(*members):
"""
A classdecorator that makes a class into a standard value type.
This decorator provides an __init__ method that takes in the supplied
members and stores them in instance variables.
This decorator also provides standard __eq__, __repr__ and __str__
functions that use the supplied members
"""
def _wrong_init_args_message(member_name):
return (
"__init__() missing 1 required positional argument: "
+ "'{arg}'").format(
arg=member_name
)
def _too_many_args_message(args):
if len(members) == 0:
return "object() takes no parameters"
else:
return (
"__init__() takes "
+ "{lenmem} positional arguments but "
+ "{lenargs} were given"
).format(
lenmem=len(members) + 1,
lenargs=len(args) + 1
)
def _multiple_values_message(member_name):
return (
"__init__() got multiple values for argument '{arg}'").format(
arg=member_name
)
def value_init(self, *args, **kwargs):
for (i, m) in enumerate(members):
if i < len(args):
self.__dict__[m] = args[i]
if m in kwargs:
raise TypeError(_multiple_values_message(m))
elif m in kwargs:
self.__dict__[m] = kwargs[m]
else:
raise TypeError(_wrong_init_args_message(m))
if len(args) > len(members):
raise TypeError(_too_many_args_message(args))
for k in kwargs:
if k not in members:
raise TypeError(
(
"__init__() got an unexpected keyword "
+ "argument '{k}'"
).format(k=k)
)
def value_eq(self, other):
if not isinstance(other, type(self)):
return False
for m in members:
if self.__dict__[m] != other.__dict__[m]:
return False
return True
def value_repr(self):
return "{type}({args})".format(
type=type(self).__name__,
args=", ".join(repr(self.__dict__[m]) for m in members)
)
def ret(cl):
cl.__init__ = value_init
cl.__eq__ = value_eq
cl.__repr__ = value_repr
return cl
return ret
|
2c02952f61da94c7b846fbd5d26849e0994b2d49
| 571,713 |
def choose_dimensions(valid_dims, overrides={}):
"""For each dimension, choose a single valid option (except for 'time', where
we use the wildcard '*' to get the whole time-series.)
If not specified, choose the first valid option for each dimension.
Parameters
----------
valid_dims : dict
map of lists of valid dimension values
overrides : dict, optional
selected dimensions
Returns
-------
dict
final choice of dimensions
"""
# By default, choose first valid item for all dimensions; then override where needed:
chosen_dimensions = {k: next(iter(v.keys())) for k, v in valid_dims.items()}
# get whole time-series, not just a single point in time:
chosen_dimensions["time"] = "*"
chosen_dimensions.update(overrides)
return chosen_dimensions
|
e5c732b88852e9a07116818935587f575ab290cf
| 289,318 |
def set_mock_response(requests_mock, response):
"""Register the given MockResponse with requests_mock"""
requests_mock.register_uri(**response.as_dict())
return response
|
06adc17686602b621ebe8ef5417cafccda204e2d
| 152,390 |
def swap_to_title_case(word):
"""
Converts a word from any case to title case. E.g. 'mumbai' will become 'Mumbai'
:param word: A string which is will converted to Title case.
:return: A string in Title case
"""
return word.lower()[0].upper() + word.lower()[1:]
|
e78861e5c933684ed18d3e8aecaf7cdfe86260e7
| 213,361 |
def clean_dalton_label(original_label: str) -> str:
"""Operator/integral labels in DALTON are in uppercase and may have
spaces in them; replace spaces with underscores and make all
letters lowercase.
>>> clean_dalton_label("PSO 002")
'pso_002'
"""
return original_label.lower().replace(" ", "_")
|
e52f4aa75ee4ed0bc25e963b4616d0cd3e9fedc4
| 211,464 |
def toNearest(n, res):
"""
:param n: Number
:param res: Resolution
:return: Number rounded up to the closest multiplicate of res
"""
if n % res == 0:
return n
return (n // res + 1) * res
|
21033e60a4a5059acbe90fcd6f4782b242b55006
| 446,891 |
from datetime import datetime
def get_utc_dt(event_time):
"""Takes a UTC time from API and returns a datetime object"""
return datetime.strptime(event_time['data']['attributes']['starts_at'],
'%Y-%m-%dT%H:%M:%SZ')
|
391a0722e40e6cf53e93662d43caa17e3a6c4560
| 374,872 |
def denormalize(img, mean, std):
"""Denormalize an array of images with mean and standard deviation.
Parameters
----------
img: array
An array of images.
mean: float
Mean of img array.
std: float
Standard deviation of img array.
"""
return (img * std) + mean
|
1a459a5dd76a69e862c404677eefaaaa3c5b54e2
| 609,423 |
def color_to_rgba(color):
"""Converts a color (tuple or string) to an RGBA tuple.
This function does not validate the input, so if the input format
does not match one of the formats listed below, the output format
is not guaranteed.
Args:
color: The color to be converted. It can be:
An RGB tuple. 3 ints/floats between 0.0 and 1.0.
e.g. (1, 0, 0)
An RGBA tuple. 4 ints/floats between 0.0 and 1.0 (in which
case it is returned as is).
e.g. (1, 0, 0, 0.5)
An RGB hex string.
e.g. #ff0000, ff0000, #f00, f00 (these are all
equivalent)
An RGBA hex string.
e.g. #ff0000cc, #f00c, ff0000cc, f00c (these are all
equivalent)
Returns:
A RGBA tuple of 4 ints/floats between the values 0.0 and 1.0.
"""
if isinstance(color, tuple):
if len(color) == 4:
return color
if len(color) == 3:
return (*color, 1)
if isinstance(color, str):
_color = color.lstrip('#')
if len(_color) == 3:
_color = _color[0] * 2 + _color[1] * 2 + _color[2] * 2 + 'ff'
elif len(_color) == 4:
_color = _color[0] * 2 + _color[1] * 2 + _color[2] * 2 + _color[3] * 2
elif len(_color) == 6:
_color += 'ff'
if len(_color) == 8:
return tuple(int(_color[i : i + 2], 16) / 255 for i in range(0, 8, 2))
raise ValueError(f'Invalid color: {color}')
|
cdec624a54e3a1ed3c5d8854dc93c82c453abf57
| 114,051 |
def get_incoming_connections(individual, node):
"""Given an individual and a node, returns the connections in individual that end at the node"""
return list(filter(lambda x, n=node: x.to_node.id == n.id,
individual.enabled_connections()))
|
df5a4acd99866e3b768d54a2d85256887f11091a
| 530,597 |
import re
def remove_punct_tokens(mylist):
"""Takes a tokenized text and removes punctuation tokens"""
returnlist = []
for elem in mylist:
if not re.match(r'^[^\w\s]', elem):
returnlist.append(elem)
return returnlist
|
b430478b1c3da2c63cfad52d82037380a224dd28
| 262,044 |
import collections
def plot_chromosome_dividers(axis, chrom_sizes, pad=None, along='x'):
"""Given chromosome sizes, plot divider lines and labels.
Draws black lines between each chromosome, with padding. Labels each chromosome range with the chromosome name,
centered in the region, under a tick. Sets the axis limits to the covered range.
By default, the dividers are vertical and the labels are on the X axis of the plot. If the `along` parameter is 'y',
this is transposed to horizontal dividers and the labels on the Y axis.
Returns
-------
OrderedDict
A table of the position offsets of each chromosome along the specified axis.
"""
assert isinstance(chrom_sizes, collections.OrderedDict)
if pad is None:
pad = 0.003 * sum(chrom_sizes.values())
dividers = []
centers = []
starts = collections.OrderedDict()
curr_offset = pad
for label, size in list(chrom_sizes.items()):
starts[label] = curr_offset
centers.append(curr_offset + 0.5 * size)
dividers.append(curr_offset + size + pad)
curr_offset += size + 2 * pad
if along not in ('x', 'y'):
raise ValueError('Direction for plotting chromosome dividers and labels along must be either x or y.')
if along == 'x':
axis.set_xlim(0, curr_offset)
for xposn in dividers[:-1]:
axis.axvline(x=xposn, color='k')
# Use chromosome names as x-axis labels (instead of base positions)
axis.set_xticks(centers)
axis.set_xticklabels(list(chrom_sizes.keys()), rotation=90)
axis.tick_params(labelsize='small')
axis.tick_params(axis='x', length=0)
axis.get_yaxis().tick_left()
else:
axis.set_ylim(0, curr_offset)
for yposn in dividers[:-1]:
axis.axhline(y=yposn, color='k')
# Use chromosome names as y-axis labels (instead of base positions)
axis.set_yticks(centers)
axis.set_yticklabels(list(chrom_sizes.keys()))
axis.tick_params(labelsize='small')
axis.tick_params(axis='y', length=0)
axis.get_xaxis().tick_bottom()
return starts
|
2698380cc827e25fe7b5f09ee7fdb683afa320c6
| 336,530 |
import logging
def create_logger() -> logging.Logger:
"""Create logger.
Returns:
logging.Logger: General logger.
"""
logging.basicConfig(format="%(message)s", level=logging.INFO)
logger = logging.getLogger('gyomei_detection')
return logger
|
bfe6d0e06389f0bd8d2500cf09eef426bc755b0a
| 209,365 |
def _mat_vec_dot_fp(x, y):
"""Matrix (list of list) times vector (list)."""
return [sum(a * b for a, b in zip(row_x, y)) for row_x in x]
|
9ab85561ee5eee056489324e5f5c594cb573c979
| 113,244 |
def encode(integer: int) -> bytes:
"""Encodes an integer as an uvarint.
:param integer: the integer to encode
:return: bytes containing the integer encoded as an uvarint
"""
def to_byte(integer: int) -> int:
return integer & 0b1111_1111
buffer: bytearray = bytearray()
while integer >= 0b1000_0000:
buffer.append(to_byte(integer) | 0b1000_0000)
integer >>= 7
buffer.append(to_byte(integer))
return bytes(buffer)
|
da3b6b320ddcc39ecf494fca564d6d3ae06faea9
| 695,434 |
import torch
def log_binom(n, k):
""" Returns of the log of n choose k. """
return torch.lgamma(n+1) - ( torch.lgamma(k+1) + torch.lgamma((n-k)+1))
|
b9275e8043533790cef92dcf9e65226a3e84dc6f
| 76,816 |
def get_partners(bam, bams):
"""
Returns the partner bam files having the same ID
"""
partners = []
for bam_iter in bams:
if bam in bam_iter:
partners.append(bam_iter)
return partners
|
d691918b5f7ac8149c5afd0002b8ef245aed4f15
| 671,065 |
from hashlib import sha3_512
def sha512(obj: bytes) -> bytes:
"""
hash any bytes with sha3-512
:param obj: bytes object
:return: the hash
"""
h = sha3_512()
h.update(obj)
return h.digest()
|
b6bd466589142c5a2dbe90fbd27d7a7c99d688f7
| 229,581 |
def dist_to_num(soup):
"""Extracts number from string e.g. '(0.4 miles)' -> 0.4"""
return float(soup.text.strip().split('(')[-1].replace(')', '').split(' ')[0])
|
c71d3f4dbbe592805be7e1253dbe9dc1bd1a8b4f
| 375,304 |
def hangman(leftovertries):
"""Print out hangman ASCII graphic on incorrect guesses."""
allhangmans = [
""" ______
|/ |
| o
| ´|`
| / \\
__|________""",
""" ______
|/ |
| o
| ´|`
| /
__|________""",
""" ______
|/ |
| o
| ´|`
|
__|________""",
""" ______
|/ |
| o
| ´|
|
__|________""",
""" ______
|/ |
| o
| |
|
__|________""",
""" ______
|/ |
| o
|
|
__|________""",
""" ______
|/ |
|
|
|
__|________""",
""" ______
|/
|
|
|
__|________""",
""" ______
|
|
|
|
__|________""",
"""
|
|
|
|
__|________""",
"""
___________""",
]
return allhangmans[leftovertries]
|
46702936d9e9f3626458ffc9767735c91dbb11d0
| 322,814 |
def int_bounds(signed, bits):
"""
calculate the bounds on an integer type
ex. int_bounds(8, True) -> (-128, 127)
int_bounds(8, False) -> (0, 255)
"""
if signed:
return -(2 ** (bits - 1)), (2 ** (bits - 1)) - 1
return 0, (2 ** bits) - 1
|
ea51d1932d743c00c0d90333efeefcabb5cc2f1b
| 437,548 |
import re
def split_name(name):
"""Split the string name based on a specific regular expression."""
return re.split("_|-| |[0-9]|\(|\)", name)
|
f4343b8c5f2c73e29c19abbb662a9ec3aebb0c24
| 445,490 |
def processHoles(code, holes_decls):
"""Finds all hole symbols in the SMT-LIB code of the program and replaces them with
appropriate references to their synthesis functions. Does nothing in case of
verification.
:param code: (str) Source code (in arbitrary language) of the program.
:param holes_decls: (list[HoleDecl]) Declarations of all holes.
:return: (str) Source code with SMT replacement of holes by appropriate functions.
"""
if holes_decls is None or len(holes_decls) == 0:
return code
else:
code = code.replace(")", " )")
for h in holes_decls:
if h.id in code:
code = code.replace(h.id+" ", h.get_function_call()+" ")
code = code.replace(" )", ")")
return code
|
78daf2434e0638415fbde44f2d9fbeb97bcadab8
| 579,162 |
import random
def random_indexes(a, b, feats_in_plot):
"""Support function for tSNE_vis
Args:
a: start index
b: end index
feats_in_plot: # of featuers to be plotted per class
Returns:
Random list of feats_in_plot indexes between a and b
"""
randomList = []
# Set a length of the list to feats_in_plot
for i in range(feats_in_plot):
# any random numbers from a to b
randomList.append(random.randint(a, b - 1))
return randomList
|
2fad244becdc378dc3cc36ccc786634c7ec0d832
| 60,013 |
def widthHeightDividedBy(image, divisor):
"""Return an image's dimensions, divided by a value."""
h, w = image.shape[:2]
return (int(w / divisor), int(h / divisor))
|
e7de8e4a80e9dcf7b1ca6000671a97de6c56a351
| 299,763 |
import torch
def euler_angles_to_rotation_matrices(angles):
"""
Arguments:
---------
angles: Tensor with size Kx3, where K is the number of Euler angles we
want to transform to rotation matrices
Returns:
-------
rotation_matrices: Tensor with size Kx3x3, that contains the computed
rotation matrices
"""
K = angles.shape[0]
# Allocate memory for a Tensor of size Kx3x3 that will hold the rotation
# matrix along the x-axis
r_x = angles.new_zeros((K, 3, 3))
r_x[:, 0, 0] = 1.0
c = torch.cos(angles[:, 0])
s = torch.sin(angles[:, 0])
r_x[torch.arange(K), 1, 1] = c
r_x[torch.arange(K), 2, 2] = c
r_x[torch.arange(K), 1, 2] = -s
r_x[torch.arange(K), 2, 1] = s
# Similar for the rotation matrices along the y-axis and z-axis
r_y = angles.new_zeros((K, 3, 3))
r_y[:, 1, 1] = 1.0
c = torch.cos(angles[:, 1])
s = torch.sin(angles[:, 1])
r_y[torch.arange(K), 0, 0] = c
r_y[torch.arange(K), 2, 2] = c
r_y[torch.arange(K), 2, 0] = -s
r_y[torch.arange(K), 0, 2] = s
r_z = angles.new_zeros((K, 3, 3))
r_z[:, 2, 2] = 1.0
c = torch.cos(angles[:, 2])
s = torch.sin(angles[:, 2])
r_z[torch.arange(K), 0, 0] = c
r_z[torch.arange(K), 1, 1] = c
r_z[torch.arange(K), 0, 1] = -s
r_z[torch.arange(K), 1, 0] = s
return r_z.bmm(r_y.bmm(r_x))
|
a3fdad7a1496bc1f1a9170cc63c030451fd23812
| 243,632 |
import re
def get_sofile_name(sofilename):
"""
get the .so file name
ie: foo.so.0.0 -> foo.so
"""
return re.findall(r'.*.so', sofilename)[0]
|
a9a492c988e8cd78003616660ddf9a1535371451
| 160,538 |
import pathlib
def last_two(path):
"""Return the last two parts of path."""
return pathlib.Path(*path.parts[-2:])
|
4cd1689b9cbae6b77583cc9fdd8dac60db32770a
| 225,840 |
def typeName(ty):
""" Return the name of a type, e.g.:
typeName(int) => 'int'
typeName(Foo) => 'Foo'
typeName((int,str)) => 'int or str'
@param ty [type|tuple of type]
@return [str]
"""
if isinstance(ty, tuple):
return " or ".join(t.__name__ for t in ty)
else:
return ty.__name__
|
e1af991a1ae75847da8edee86eb688ccee525e4b
| 74,443 |
import six
import warnings
def string_to_text(value, deprecate_msg):
"""
Return input string coverted to text string.
If input is text, it is returned as is.
If input is binary, it is decoded using UTF-8 to text.
"""
assert isinstance(value, (six.text_type, six.binary_type))
if isinstance(value, six.binary_type):
warnings.warn(deprecate_msg, DeprecationWarning)
value = value.decode('utf-8')
return value
|
8facc8e1da3b63dce793e5f70c6226bacdf0b9c4
| 511,422 |
import yaml
def get_tweet_templates(yaml_filename="../data/tweet_content.yaml"):
"""Read the tweet content yaml file and return a dictionary of template sentences to be formed into tweets"""
with open(yaml_filename, 'r', encoding="utf8") as f:
templates_dict = yaml.load(f, Loader=yaml.FullLoader)
return templates_dict
|
7f783dac8bc102d53ca6b909753aeae332c1175f
| 536,965 |
def same_len_lists(A,B):
"""
Create lists of same size
- A: list
- B: list
- returns: list (2 lists of same size)
"""
arrays = [A, B]
max_length = 0
for array in arrays:
max_length = max(max_length, len(array))
for array in arrays:
array += ['------'] * (max_length - len(array))
return arrays
|
b4fba9223f0dc26c0071e6e7eff4d899e9755f23
| 155,357 |
def format_attributes(callable, *args):
"""
Format the results of *callable* in the format expected
by Graphviz.
"""
value = callable(*args)
if not value:
return ""
else:
parts = []
for k in sorted(value):
parts.append(f"{k}={value[k]}")
return f" [{', '.join(parts)}]"
|
3443fff94441b26f446d007e7d3b25e8fe82b8b9
| 448,739 |
def unflatten_dict(dictionary, sep='.'):
"""
unflattens a dictionary into a nested dictionary according to sep
Args:
dictionary: flattened dictionary, i.e. there are not dictionaries as elements
sep: separator to use when nesting. I.e. on what the keys are splot
Returns: nested dictionary
"""
resultDict = dict()
# sorting after longest key prevents issues when nesting is screwed up
# i.e. when there are key key=False, key.another=False
keyvals = sorted(list(dictionary.items()), key=lambda t: t[0])[::-1]
for key, value in keyvals:
parts = key.split(sep)
d = resultDict
for part in parts[:-1]:
if part not in d:
d[part] = dict()
d = d[part]
d[parts[-1]] = value
return resultDict
|
f6125051fde842c04e604e6dcb6e7da82160d1b5
| 290,582 |
def serialize_columns(columns):
"""
Return the headers and frames resulting
from serializing a list of Column
Parameters
----------
columns : list
list of Columns to serialize
Returns
-------
headers : list
list of header metadata for each Column
frames : list
list of frames
"""
headers = []
frames = []
if len(columns) > 0:
header_columns = [c.serialize() for c in columns]
headers, column_frames = zip(*header_columns)
for f in column_frames:
frames.extend(f)
return headers, frames
|
8906f8d2fbb94f4c0ffde147e5f04fddc2b2c515
| 291,576 |
def valid_sort(sort, allow_empty=False):
"""Returns validated sort name or throws Assert."""
assert isinstance(sort, str), 'sort must be a string'
if not (allow_empty and sort == ''):
valid_sorts = ['trending', 'promoted', 'hot', 'created']
assert sort in valid_sorts, 'invalid sort'
return sort
|
cd2debc03b4dde717fd0e4a277b85410af03d508
| 379,444 |
def get_params(**override):
"""Returns default parameters dictionary for model."""
params = dict(
# Model args
num_channels=3,
multiscale='Steerable', # 'Steerable', Laplacian', None
nscales=3,
steerable_filter_type=1,
cnn='Conv',
num_filters=[64, 64, 64, 3],
padding='same',
# Distribution args
num_distribution_encoder_layers=2,
num_marginal_encoder_mixtures=5,
z_dim=10,
)
params.update(override)
return params
|
5dfb12f705ccda9c35103a125f20d8ac9d53e51f
| 621,089 |
def flat_vars(doc):
"""
Flat given nested dict, but keep the last layer of dict object unchanged.
:param doc: nested dict object, consisted solely by dict objects.
:return: flattened dict, with only last layer of dict un-flattened.
"""
me = {}
has_next_layer = False
for key, value in doc.items():
if isinstance(value, dict):
has_next_layer = True
child, is_final_layer = flat_vars(value)
if is_final_layer:
me[key] = child
else:
for child_key, child_value in child.items():
me["%s.%s" % (key, child_key)] = child_value
else:
me[key] = value
return me, not has_next_layer
|
614eea5fe1cbfc1434780ab3fd87faeceb187b94
| 168,203 |
def error_max_retry(num_retries, err_string):
"""error_max_retry message"""
return "Unable to retrieve artifact after {} retries: {}".format(num_retries, err_string)
|
16fd360db6e25fe5b4ce7c34b145c2325e52cd19
| 92,537 |
def send_message(key, channel_loc, message, get_info):
"""
Bot will send a message to Slack
:param channel where the message wants to be send.
:type str
:param message that wants to be sent to Slack
:type str
:param see return
:type boolean
:return will return the action's data only if get_info is True
:type dictonary
"""
sc = key
action = sc.api_call(
"chat.postMessage",
channel=channel_loc,
text=message,
as_user="false"
)
if action["ok"]:
print("Poll has been posted")
elif not action["ok"]:
print("Error posting message")
if get_info:
return action
|
d4c90ca3ac530ee6234da0e15dd44d9ab19e826e
| 448,690 |
def empty_path(tmp_path_factory):
"""Provides a temp directory with no files in it."""
return tmp_path_factory.mktemp('empty_dir')
|
2aeff1c813e4799725da4da7de1a5ec919bfb207
| 191,156 |
def timeframe_search(sensor_df_list):
"""Determines the timeframe for which data should be loaded.
Locates the beginning and end date of each hourly averaged sensor dataframe
and subsequently determines the eariest and latest date within all recorded
sensor datasets
Args:
sensor_df_list (list): List of sensor dataframes
Returns:
(tuple): Two-element tuple containing:
- **overall_begin** (*datetime.date object*): Earliest recorded date
in the passed sensor dataframe list.
- **overall_end** (*datetime.date object*): Latest recorded date in
the passed sensor dataframe list.
"""
# Determine begin and end timestamp for sensor datasets. Choose earliest
# begin and latest end timestamp.
begin_times = []
end_times = []
for df in sensor_df_list:
begin_time = df.index.min()
end_time = df.index.max()
begin_times.append(begin_time)
end_times.append(end_time)
overall_begin = min(begin_times)
overall_end = max(end_times)
return overall_begin, overall_end
|
86ea68acc38b1818e464d3e15ec59d19febf0f14
| 556,087 |
def _calculate_pa(row):
"""
Returns:
The number of plate appearances by a player as an int
based on the following formula:
PA = AB + BB + SF + SH + HBP - IBB
"""
PA = (row['AB'] + row['BB'] + row['SF'] + row['SH'] + row['HBP'] \
- row['IBB'])
return PA
|
2ded4997d3b904616f6d5f6c74c90dd22d8fe334
| 187,342 |
import tempfile
def tsv(df, **kwargs):
"""
Write ``pandas.DataFrame`` to a temporary tab-delimited file.
Works in a ``with`` block (file is deleted at context teardown).
>>> with tsv(df1) as f1, tsv(df2) as f2:
... # something that requires tsv file input (use f or f.name)
"""
fh = tempfile.NamedTemporaryFile(mode='w+t')
df.to_csv(fh, sep=str('\t'), index=False, header=False, na_rep='nan', **kwargs)
fh.flush() # DON'T FORGET TO FLUSH!!!
fh.seek(0)
return fh
|
e57750dc3c05f5b83b25885b700d101a8569797d
| 235,725 |
def createPlotMetaData(
title, xLabel, yLabel, xMajorTicks=None, yMajorTicks=None, legendLabels=None
):
"""
Create plot metadata (title, labels, ticks)
Parameters
----------
title : str
Plot title
xLabel : str
x-axis label
yLabel : str
y-axis label
xMajorTicks : list of float
List of axial position at which to insert major ticks
yMajorTicks : list of float
List of axial position at which to insert major ticks
legendsLabels : list of str
Labels to used in the plot legend
Returns
-------
metadata : dict
Dictionary with all plot metadata information
"""
metadata = {}
metadata["title"] = title
metadata["xlabel"] = xLabel
metadata["ylabel"] = yLabel
metadata["xMajorTicks"] = xMajorTicks
metadata["yMajorTicks"] = yMajorTicks
metadata["legendLabels"] = legendLabels
return metadata
|
d0442babdba33613d218b7be037ff8322d8bbcd7
| 551,567 |
def getFoodNames(cursor):
"""Get dictionary from food id to food name."""
cursor.execute("SELECT id, name FROM Food")
return dict([v.values() for v in cursor.fetchall()])
|
e3bbd6d655747cbb1350bc294e5ce86c0931b209
| 69,149 |
import re
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
|
15ae7faa71a625249cb17cf3804ab576fbe471a0
| 173,724 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.