content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def get_version_all(playbook_configuration_id, client, query_params):
"""Requests a list of versions for the playbook configurations with
certain ID."""
return client.get_playbook_configuration_versions(
str(playbook_configuration_id), **query_params)
|
1eff6a51fe59727ddd2208d64346060e8f93716c
| 360,318 |
def permutation(s):
"""
@s: list of elements, eg: [1,2,3]
return: list of permutations, eg: [[1,2,3], [1,3,2], [2,1,3], [2,3,1], [3,1,2], [3,2,1]]
"""
ret = []
length = len(s)
if length == 0:
return ret
if length == 1:
return [s]
curr = s[0]
for prev in permutation(s[1:]):
for i in range(length):
ret.append(prev[:i] + [curr] + prev[i:])
return ret
|
bb49487012cd54f9c58ac7c1cce87c1c05eb33ed
| 683,353 |
def make_all_author_patches_query(project, owner, status='status:open'):
""" Make a query string for fetching all patches on a project that belong
to author. The default status is open.
returns:
Query string containing all
"""
ret = 'project:' + project
ret += ' AND ' + status
ret += ' AND owner:\"' + owner + '\"'
return ret
|
bac697e08a5aafa6d2a430015a3544ee64d2bfc1
| 375,411 |
def _go_files(input_api, source_file_filter=None):
"""Collects affected go source files, but ignores ones generated by protoc."""
files = []
for f in input_api.AffectedTextFiles(source_file_filter):
if not f.LocalPath().endswith('.go'):
continue
if f.LocalPath().endswith('.pb.go'):
continue
files.append(f)
return files
|
80b549a25a958257b7e90be5f195dcfbfeffaa80
| 477,252 |
def check_for_wav_only(list_values):
"""
Get the list of wav files .wav and .WAV format only
"""
wav_files = []
for each_value in list_values:
if each_value[-3:] == "WAV" or each_value[-3:] == "wav":
wav_files.append(each_value)
return wav_files
|
cfecef563f381bba2474683662c7c5cacfd1b480
| 442,660 |
def PReLU(z, alpha):
"""Parametric rectified linear unit function...
Args:
z (np.array)
Returns:
f(z) = z if z > 0 else alpha * z (np.array)
"""
return z * (z > 0) + alpha * z * (z <= 0)
|
3104198f79d5d3e6d0cc59d50d72680bea4b1b60
| 253,969 |
def rho_to_units(rho, ref_density=1250):
"""Convert dimensionless growth rate to real units for a growth process.
Returns
-------
density : number or numpy array (dtype float)
Time in units (hours, days, etc.)
Parameters
----------
rho : number or numpy array
Cell density in dimensionless units.
ref_density : number or numpy array
The cell density at a dimensionless density of `rho = 1`.
Defaults to 1250 cells / mm^2.
"""
return rho * ref_density
|
7fc34c72fa80f99d14ef5ab0c809b012fad45b05
| 589,833 |
import hashlib
def file_hash(file_path, buff_size):
"""Compute md5 hash for a given file"""
h = hashlib.md5() # create a hash object
with open(file_path, 'rb') as f: # rb - binary mode
while True:
data = f.read(buff_size)
if not data:
break
h.update(data)
return h.hexdigest()
|
546b1c42aa96102389c5c0c00b6ed5992306fad9
| 431,559 |
def mi2nm(mi):
"""
Converts miles to nautical miles
"""
if mi == None:
return None
return mi * 0.868976
|
4b596ca03e2cc079c2e4caa5b98d1b3488377c10
| 248,599 |
from typing import List
def median(data_list:List[str]) -> float: # Função baseada no que estudei no Livro Data Science do Zero
""" Calcula a mediana dos valores de uma lista.
Por meio de uma lista se encontra os pontos médios e retorna a mediana da lista.
Baseado no que aprendi no Livro **Data Science do Zero: Primeiras Regras com o
Python**, *Alta Book*, Joel Grus 2016.
:param data_list: Lista com os valores a serem calculados.
:returns: Inteiro contendo o valor no ponto médio em caso de tamanho ímpar, ou a média dos pontos médios em caso de lista com tamanho par.
"""
sorted_list:List[str] = sorted(data_list, key=int)
n:int = len(data_list)
mid:int = n // 2
if n % 2 == 1: # odd
val:float = float(sorted_list[mid])
else: # even
hi:float = float(sorted_list[mid])
low:float = float(sorted_list[mid-1])
val:float = (hi+low) / 2
return val
|
9de8b6d609c082962e43b7f0bc2dfc44ab37b9a4
| 518,795 |
def sort_sflats(butler, sflat_files, exptime_cut=20.):
"""Sort a set of superflat image filenames into low and high exposures
Parameters
----------
butler : `Butler` or `None`
Data Butler (or none)
sflat_files : `list`
List of superflat files
exptime_cute : `float`
Cut between low and high exposures
Returns
-------
sflats_l : `list`
Low exposure superflats
sflats_h : `list`
High exposure superflats
"""
sflats_l = []
sflats_h = []
for sflat in sflat_files:
if butler is None:
if sflat.find('_L_') >= 0:
sflats_l.append(sflat)
elif sflat.find('flat_L') >= 0:
sflats_l.append(sflat)
elif sflat.find('_H_') >= 0:
sflats_h.append(sflat)
elif sflat.find('flat_H') >= 0:
sflats_h.append(sflat)
else:
exp_time = butler.queryMetadata('raw', 'EXPTIME', sflat)[0]
if exp_time < exptime_cut:
sflats_l.append(sflat)
else:
sflats_h.append(sflat)
return (sflats_l, sflats_h)
|
e22a0502a43f23e281431aef54b46e4aff660049
| 139,804 |
import typing
def get_number(s: typing.Any) -> typing.Union[int,typing.Any]:
""" Check that s is number
This function checks to make sure an input value is able to be converted
into an integer. If it cannot be converted to an integer, the original
value is returned.
Args:
s: An input string or number
Returns:
Either ``int(s)`` or return the value if s cannot be cast
"""
try:
return int(s)
except ValueError:
return s
|
0d1880c60606f15309d204b0ed8f018d24987789
| 271,554 |
import typing
import json
def _get_sse_event_payload(data: str) -> typing.Union[str, dict]:
"""Maps incoming event JSON payload to it's pythonic type.
"""
try:
return json.loads(data)
except json.JSONDecodeError:
return data
|
063a4692a4162e85813bdd3dd20cb8e6f280e272
| 267,352 |
def has_variable(formula, variable):
"""
Function that detects if a formula contains an ID. It traverses the
recursive structure checking for the field "id" in the dictionaries.
:param formula: node element at the top of the formula
:param variable: ID to search for
:return: Boolean encoding if formula has id.
"""
if 'condition' in formula:
# Node is a condition, get the values of the sub classes and take a
# disjunction of the results.
return any([has_variable(x, variable) for x in formula['rules']])
return formula['id'] == variable
|
7ff27e91ae012d9dd99464c8333204d85c229d43
| 180,954 |
def parse_outcar_time(lines):
"""Parse the cpu and wall time from OUTCAR.
The mismatch between wall time and cpu time represents
the turn-around time in VaspInteractive
returns (cpu_time, wall_time)
if the calculation is not finished, both will be None
"""
cpu_time = None
wall_time = None
for line in lines:
if "Total CPU time used (sec):" in line:
cpu_time = float(line.split(":")[1].strip())
if "Elapsed time (sec):" in line:
wall_time = float(line.split(":")[1].strip())
return cpu_time, wall_time
|
ca4dda6649afa05643072ca73d36fd5182c245f3
| 505,793 |
import itertools
def xpath_soup(element):
"""
Generate xpath of soup element.
:param element: bs4 text or node.
:return: xpath as string.
"""
components = []
child = element if element.name else element.parent
for parent in child.parents:
"""
@type parent: bs4.element.Tag
"""
previous = itertools.islice(parent.children, 0, parent.contents.index(child))
xpath_tag = child.name
xpath_index = sum(1 for i in previous if i.name == xpath_tag) + 1
components.append(xpath_tag if xpath_index == 1 else '%s[%d]' % (xpath_tag, xpath_index))
child = parent
components.reverse()
return '/%s' % '/'.join(components)
|
3738743875fa332177699959ab7e5fc375941ec4
| 345,787 |
def component_masses_to_chirp_mass(mass_1, mass_2):
"""
Convert the component masses of a binary to its chirp mass.
Parameters
----------
mass_1: float
Mass of the heavier object
mass_2: float
Mass of the lighter object
Return
------
chirp_mass: float
Chirp mass of the binary
"""
return (mass_1 * mass_2) ** 0.6 / (mass_1 + mass_2) ** 0.2
|
c80e5619ec2f75ff8beae36a31a6b6fb9bf9627f
| 534,516 |
def getDeviceNameFromFileName(name):
"""Parses the filename and returns device name, or false if not found."""
if '10754' in name:
return '10754'
elif '13976' in name:
return '13976'
elif '15502' in name:
return '15502'
elif 'ST530' in name:
return 'ST530'
elif 'ST534' in name:
return 'ST534'
else:
return False
|
457a56a5e896e108d6105f019e3bafd647ee2bf2
| 382,484 |
def compare_version(lhs, rhs):
"""Compare two versions.
Parameters
----------
lhs : tuple
tuple of three integers (major, minor, patch)
rhs : tuple
tuple of three integers (major, minor, patch)
Returns
-------
int
Returns 0 if lhs and rhs are equal, 1 if lhs is bigger than rhs, otherwise -1.
"""
if lhs == rhs:
return 0
elif lhs > rhs:
return 1
else:
return -1
|
fd61215efa578b31d4c14502b16b8793a3fa7a28
| 625,514 |
def _get_subsection_percentage(subsection_grade):
"""
Returns the percentage value of the given subsection_grade.
"""
return subsection_grade.percent_graded * 100.0
|
89e18f8deb2023522b8289236bd130ad00996b1d
| 425,245 |
def test_quadruple_args_in_call(x):
"""Test that duplicated arguments still cause no problem even if
there are four of them."""
def g(a, b, c, d):
return a * b * c * d
return g(x, x, x, x)
|
4180adadde9de76d43f2534e795e76f343b35d71
| 497,730 |
def _get_start(loc):
"""Get the starting location position of a feature in the contig (accounting for the strand)"""
if loc.strand == '+':
return loc.start
if loc.strand == '-':
st = loc.start - (loc.length - 1)
if st < 0:
raise ValueError(f'{loc} is not a valid location on this assembly')
return st
return 0
|
40bba838e3ba8bfc310f803dc8f52614a57ca6db
| 302,641 |
def get_spacing_groups(font):
"""
Return a dictionary containing the ``left`` and ``right`` spacing groups in the font.
"""
_groups = {}
_groups['left'] = {}
_groups['right'] = {}
for _group in list(font.groups.keys()):
if _group[:1] == '_':
if _group[1:5] == 'left':
_groups['left'][_group] = font.groups[_group]
if _group[1:6] == 'right':
_groups['right'][_group] = font.groups[_group]
return _groups
|
778a27b49ce9d869d7867774df8dfe22a3cd6140
| 691,106 |
import math
def straightness_imperfection(z, length, delta_global, oos_axis, **kwargs):
""" Returns the out-of-straightness imperfection of the node.
:param float z: z coordinate of node
:param float length: total length of the component
:param float delta_global: maximum amplitude of the out-of-straightness global imperfection
:param np.ndarray oos_axis: direction of out-of-straightness wrt to the global coordinates
:return list: [float] nodal imperfection [x, y, z] in the global coordinates
Notes:
- Assumes that the z-direction is along the length of the member
"""
w = -1.0 * (math.cos(2.0 * math.pi * z / length) - 1.0) * delta_global / 2.
return list(w * oos_axis)
|
6148eb8d9f24b55a5ee1efa5eb43f23f4476b02a
| 634,041 |
def get_interfaces_from_domain(domain_xml):
"""
From the ElementTree of a domain, get a map of all network interfaces.
Parameters
----------
domain_xml: ElementTree
The xml representation of the domain.
Returns
-------
dict
All the network interfaces, as {mac_address: device_name}.
"""
if domain_xml is None:
return {}
devices = domain_xml.find('./devices')
if devices is None:
return {}
ifaces = {}
for iface in devices.findall('./interface'):
mac = iface.find('./mac')
source = iface.find('./source')
ifaces[mac.attrib['address'].lower()] = source.attrib.get('dev', '')
return ifaces
|
093c1db7e463e8bffa2d71d259f60d3504b6bcfe
| 285,813 |
def js_add_el_to_div(class_name_source, class_name_target):
"""
Js function to append source element to the target one.
"""
js = f"const el = document.getElementsByClassName('{class_name_target}')[0];"
js += "if (el) "
js += f"el.appendChild(document.getElementsByClassName('{class_name_source}')[0])"
return js
|
3b0309e648efc8c5f4ed7cebb02e9f8500d4686a
| 299,641 |
import re
def is_hex_color(input_string):
"""Check if a string has the format of a hex color (#fff or #ffffff)"""
hex_color_regex = r"^#?([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$"
regexp = re.compile(hex_color_regex)
if regexp.search(input_string):
return True
else:
return False
|
855f8b02688d266935028e3e4169c0fa4fa6cf19
| 191,337 |
def preprocess(text):
"""
Pre-process text for use in the model. This includes lower-casing, standardizing newlines, removing junk.
:param text: a string
:return: cleaner string
"""
if isinstance(text, float):
return ''
return text.lower().replace('<br />', '\n').replace('<br>', '\n').replace('\\n', '\n').replace('
', '\n')
|
1d3044e145d938e70e828f0f13cce24dd7134a7c
| 570,514 |
def first_inside_quotes(s):
"""
Returns the first substring of s between two (double) quotes
A quote character is one that is inside a string, not one that
delimits it. We typically use single quotes (') to delimit a
string if want to use a double quote character (") inside of it.
Examples:
first_inside_quotes('A "B C" D') returns 'B C'
first_inside_quotes('A "B C" D "E F" G') also returns 'B C',
because it only picks the first such substring
Parameter s: a string to search
Precondition: s is a string containing at least two double quotes
"""
start = s.index('"')+1
end = s.index('"',start)
insidequotes = s[start:end]
return insidequotes
|
f7ed6c79ee1d6415a06e96ac63298c5b419eac3f
| 70,037 |
def gassmann(K0, Kin, Kfin, Kfout, phi):
"""
Use Gassmann's equation to perform fluid substitution. Use the bulk modulus
of a rock saturated with one fluid (or dry frame, Kfin=0) to preduct the
bulk modulus of a rock second with a second fluid.
:param K0: Frame mineral modulus (Gpa)
:param Kin: Input rock modulus (can be fluid saturated or dry)
:param Kfin: Bulk modulus of the pore-filling fluid of the inital rock
(0 if input is the dry-rock modulus)
:param Kfout: Bulk modulus of the pore-filling fluid of the output
(0 if output is dry-rock modulus)
:param phi: Porosity of the rock
"""
A = Kfout / (phi*(K0 - Kfout))
B = Kin / (K0 - Kin)
C = Kfin / (phi*(K0 - Kfin))
D = A + B - C
Kout = K0*D / (1 + D)
return(Kout)
|
5421459125533a73d13de1d2063b8332dc8dac31
| 53,307 |
def check_distortion(list_data):
"""
Check if the distortion is significant or not. If the number of dots
having the residual greater than 1 pixel is greater than 15% of the total
number of dots, there's distortion.
Parameters
----------
list_data : array_like
List of [radius, residual] of each dot.
Returns
-------
bool
"""
check = False
res_list = list_data[:, 1]
perc_err = (
1.0 * len(res_list[res_list > 1.0]) / len(res_list))
if perc_err > 0.15:
check = True
return check
|
84f67aea2fb2c0dd807be198b4aa9e33755ce116
| 69,014 |
import re
def removeTrailingComma(text):
"""Remove comma at end of line if it is the last item"""
return re.sub(r",(\s*\n\s*)([\]}])", "\g<1>\g<2>", text)
|
2a1cd73da7dcc99661756bf29d77b96a588da69c
| 551,938 |
def ceasar_cipher(text, alphabet, k, code):
"""
Will encode or decode given text based on the provided alphabet by transposition
PARAMS:
text (string): The text you would like to encode or decode
alphabet (array[char] / string): List of all the available characters in order. May be one long string, or array of single characters
k (int): The encryption key
code (int): Whether you want to encode(0) or decode(1)
RETURNS:
string: Encoded ciphertext or decoded plaintext
"""
if code == 0:
encodingDictionairy = {}
for i in range(0, len(alphabet)):
encodingDictionairy[alphabet[i]] = alphabet[(i+k)%len(alphabet)]
plaintext = text
ciphertext = ""
for plainChar in plaintext:
ciphertext += encodingDictionairy[plainChar]
return ciphertext
elif code == 1:
decodingDictionairy = {}
for i in range(0, len(alphabet)):
decodingDictionairy[alphabet[i]] = alphabet[(i-k)%len(alphabet)]
ciphertext = text
plaintext = ""
for cipherChar in ciphertext:
plaintext += decodingDictionairy[cipherChar]
return plaintext
|
c8dd973a1a8b493899ae4f798f6008ddff470149
| 534,567 |
def show_in_nav_for(level=0, icon=None):
"""
Use as a class / method decorator to flag an exposed object in the site navigation
@show_in_nav_for(users.guest)
@expose
class SubPage:
...
"""
def decorate(f):
f.show_in_nav = level
f.icon = icon
f.exposed = True
return f
return decorate
|
030ef8e38ef398f7e3461a11807e12dc15460848
| 647,333 |
import logging
def create_augmented_examples(orig_examples,
converter,
split,
log_every = 1000):
"""Creates AugmentedExamples from the raw JSONLs.
Args:
orig_examples: An Iterable of deserialzied JSONs.
converter: A subclass of BaseExampleConverter.
split: Split name (used only for logging).
log_every: Logging frequency.
Returns:
a list of AugmentedExamples.
"""
examples = []
for i, orig_example in enumerate(orig_examples):
if i % log_every == 0:
logging.info("[%s:%d] Produced %d examples", split, i, len(examples))
converter.verify_exemplars(orig_example)
examples.extend(converter.convert(orig_example))
logging.info("[%s] Produced %d examples total.", split, len(examples))
return examples
|
8c4ed405d6f9f5138db9c71cd6fe2b5c3aff9c54
| 235,682 |
from typing import Mapping
from typing import Any
def is_nested(dictionary: Mapping[Any, Any]) -> bool:
"""Returns if passed 'contents' is nested at least one-level.
Args:
dictionary (dict): dict to be tested.
Returns:
bool: indicating whether any value in the 'contents' is also a
dict (meaning that 'contents' is nested).
"""
return any(isinstance(v, dict) for v in dictionary.values())
|
a38159fd0a83192b3f19a1a86ce979e54d251a0d
| 620,435 |
def __ask_int(message):
"""
Asks User for an Integer Value
Args:
message = str - Input from Console
"""
value = ''
while not isinstance(value, int):
value = input(message)
try:
value = int(value)
return value
except:
print("The Value you Provided is not a Integer. Input: '{0}'".format(value))
|
d88c1d1f9794b6ac653d95899d6d9472e9276718
| 639,353 |
def request_to_dict(request):
"""
Transform request into usable metadata
"""
dump = {}
dump['X-Founder-Ip'] = request.remote_addr
for k,v in request.headers.to_list():
dump[k] = v
return dump
|
8e86a78880991851a1232d970cf2c91b46ad56fb
| 544,619 |
def extract_request_body(requests_text: str):
"""
Parse change requests, a text of RPSL objects along with metadata like
passwords or deletion requests.
Returns a dict suitable as a JSON HTTP POST payload.
"""
passwords = []
overrides = []
rpsl_texts = []
delete_reason = ""
requests_text = requests_text.replace("\r", "")
for object_text in requests_text.split("\n\n"):
object_text = object_text.strip()
if not object_text:
continue
rpsl_text = ""
# The attributes password/override/delete are meta attributes
# and need to be extracted before parsing. Delete refers to a specific
# object, password/override apply to all included objects.
for line in object_text.strip("\n").split("\n"):
if line.startswith("password:"):
password = line.split(":", maxsplit=1)[1].strip()
passwords.append(password)
elif line.startswith("override:"):
override = line.split(":", maxsplit=1)[1].strip()
overrides.append(override)
elif line.startswith("delete:"):
delete_reason = line.split(":", maxsplit=1)[1].strip()
else:
rpsl_text += line + "\n"
if rpsl_text:
rpsl_texts.append(rpsl_text)
result = {
"objects": [{"object_text": rpsl_text} for rpsl_text in rpsl_texts],
"passwords": passwords,
"overrides": overrides,
"delete_reason": delete_reason,
}
return result
|
a14d9b016294c7b2b777f872a6b45394f58e76c4
| 645,643 |
import re
def sanitize_release_group(string):
"""Sanitize a `release_group` string to remove content in square brackets.
:param str string: the release group to sanitize.
:return: the sanitized release group.
:rtype: str
"""
# only deal with strings
if string is None:
return
# remove content in square brackets
string = re.sub(r'\[\w+\]', '', string)
# strip and upper case
return string.strip().upper()
|
641c5940fb375ef8bb7c2dc9ddc29535ba84abdf
| 308,578 |
from typing import Dict
from typing import Any
import uuid
def create_boxes_ndjson(datarow_id: str, schema_id: str, top: float,
left: float, bottom: float,
right: float) -> Dict[str, Any]:
"""
* https://docs.labelbox.com/data-model/en/index-en#bounding-box
Args:
datarow_id (str): id of the data_row (in this case image) to add this annotation to
schema_id (str): id of the bbox tool in the current ontology
top, left, bottom, right (int): pixel coordinates of the bbox
Returns:
ndjson representation of a bounding box
"""
return {
"uuid": str(uuid.uuid4()),
"schemaId": schema_id,
"dataRow": {
"id": datarow_id
},
"bbox": {
"top": int(top),
"left": int(left),
"height": int(bottom - top),
"width": int(right - left)
}
}
|
0ae19e68fc2e0af754fb3bb30203188f85a758f7
| 176,526 |
import torch
def so3_log_abs_det_jacobian(x):
"""
Return element wise log abs det jacobian of exponential map
:param x: Algebra tensor of shape (..., 3)
:return: Tensor of shape (..., 3)
Removable pole: (2-2 cos x)/x^2 -> 1-x^2/12 as x->0
"""
x_norm = x.double().norm(dim=-1)
mask = x_norm > 1e-10
x_norm = torch.where(mask, x_norm, torch.ones_like(x_norm))
ratio = torch.where(
mask, (2 - 2 * torch.cos(x_norm)) / x_norm ** 2, 1 - x_norm ** 2 / 12
)
return torch.log(ratio).to(x.dtype)
|
82d22cef96578a90d677d4d631dfb2ed7c783d05
| 49,029 |
from typing import Tuple
def range_contains(a: Tuple[int, int], b: Tuple[int, int]) -> bool:
"""if range `a` completely contains range `b`
Parameters
----------
a : Tuple[int, int]
[description]
b : Tuple[int, int]
[description]
Returns
-------
[type]
[description]
"""
return b[0] >= a[0] and b[1] < a[1]
|
0beb69bb39f44d5495f0fbf1b11f0b93aa61d10d
| 363,533 |
def get_tasks_at_level(workflow, level):
"""
return the tasks at given level
Args:
workflow (DAG): DAG object of workflow
level (int): level which output tasks needed to be
Returns:
list(Node)
"""
tasks = []
for task in workflow.get_nodes():
if workflow.get_nodes()[task].get_level() == level:
tasks.append(workflow.get_nodes()[task])
return tasks
|
997ac52d3c2fdcd0857b1f77446c3006cceea6af
| 610,306 |
def tm_format_currency(value):
"""
Helper function to convert values from string to float values
Parameters:
value (str): raw value of fee or market value
Returns:
float: converted value
"""
# Remove currency
currencies = ['€', '£', '$']
for c in currencies:
value = value.replace(c, '')
# Determine multiplier
mult = 1000000 if value[-1] == 'm' else 1000
# Convert to float
value = float(value.replace('Th.', '').replace('m','').strip()) * mult
return value
|
a9d61674626044d8cd89fbc98ab229f99ceef079
| 352,520 |
def squared_array(x):
"""Squared an array."""
return x**2
|
a06ec38f3d52c121887220fad11c53f5f2961cf5
| 501,864 |
def unwrap_array(data, period, in_place=False):
"""
Removes jumps (due to fixed bit-width windowing, or something)
from a data array. "data" should be an array of signed values,
with possible jumps of size "period" in its right-most dimension.
With in_place=True, the data is unwrapped in place, rather than
creating a new buffer for the unwrapped data.
"""
ddata = data[...,1:] - data[...,:-1]
ups = (ddata > period/2).astype('int').cumsum(axis=-1)
dns = (ddata < -period/2).astype('int').cumsum(axis=-1)
if not in_place:
data = data.astype('float')
data[...,1:] += float(period) * (dns - ups)
return data
|
8c24180949e37c3dc3c8feca248bb74177264fb7
| 590,304 |
def armstrong_number(number):
"""
Check if number is Armstrong number
"""
calc = number
sum_ = 0
while calc > 0:
dig = calc % 10
sum_ += dig ** 3
calc //= 10
if number == sum_:
return True
else:
return False
|
d18b4623848497635296395d416bcd1e6f8166da
| 211,283 |
def seperate_attributes_signatures(meta_df):
"""
Splits the meta file into the catchment attributes and the hydrological
signatures and returns them in seperate dataframes.
We only selected the attributes also used by Addor et al 2018
"""
att_df = meta_df.loc[:, ["area_gages2","elev_mean", "slope_mean", "p_seasonality",
"frac_snow", "aridity","high_prec_freq", "soil_depth_pelletier",
"sand_frac", "clay_frac", "frac_forest", "lai_max", "gvf_max",
"geol_1st_class", "geol_porostiy", "geol_permeability"
]]
# Rename the columns
att_df.columns = ["Area", "Mean elevation", "Mean slope", "Precipitation seasonality",
"Fraction of precipitation\nfalling as snow", "Aridity", "Frequency of high\nprecipitation events",
"Depth to bedrock", "Sand fraction", "Clay fraction",
"Forest fraction", "LAI maximum", "Green vegetation\nfraction maximum",
"Dominant geological class",
"Subsurface porosity", "Subsurface permeability"]
# Select the 6 best signatures
sig_df = meta_df.loc[:, ["mean_ann_dis", "mean_win_dis", "hfd_mean", "q95", "runoff_ratio", "mean_sum_dis"]]
# Rename the columns
sig_df.columns = ["Mean annual discharge", "Mean winter discharge", "Mean half-flow date", "Q95 (high flow)", "Runoff ratio", "Mean summer discharge"]
return att_df, sig_df
|
6ba3e48ca2ce04e14b192e2a55a9ea0609e508ef
| 188,746 |
def list_users(iam_client):
""" List all IAM users """
return iam_client.list_users()['Users']
|
da3509377968d642469e384bd6143783528e7f76
| 24,394 |
def not_(x):
"""Implement `not_`."""
return not x
|
71e61b0895001200b1bd50c16177ab3718a075d5
| 280,139 |
def has_min_length(entities, length):
"""Check if a term has the min required length."""
return entities.apply(lambda x: len(x) > length - 1)
|
7406b1d0d2a776b12ac89b624669d739a4831ffa
| 45,888 |
def get_anchors(paragraphs):
""" Get all of the anchors within the article's paragraph.
Args:
paragraphs: List of paragraphs (p tags) containing anchors (a tags)
Returns:
anchors: A list all anchors found within paragraphs.
"""
return [paragraph.find_all("a") for paragraph in paragraphs]
|
afa0303aedd2e0a27f17ff9bd9dfa244f67a5a3e
| 432,852 |
def _PortValue(value):
"""Returns True if port value is an int within range or 'default'."""
try:
return value == 'default' or (1 <= int(value) <= 65535)
except ValueError:
return False
|
bb5faa061d5659a43e8a278d2601e2df4feed802
| 156,673 |
def check_stop_flag(con) -> bool:
"""
Check the global stop flag. Present is True, absent is False.
"""
k, v = con.kv.get("service/rebootmgr/stop")
if v:
return True
return False
|
c245414adce7c10e6214fe81f17bbadd0428663b
| 522,995 |
def straight_line_from_points(a, b):
"""
Generate a geojson LineString object from two geojson points.
Parameters
----------
a : geojson
Point A
b : geojson
Point B
Returns
-------
line : geojson
A geojson LineString object.
"""
line = {
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': [
(
a['geometry']['coordinates'][0],
a['geometry']['coordinates'][1]
),
(
b['geometry']['coordinates'][0],
b['geometry']['coordinates'][1]
),
]
},
'properties': {
'id': 'terrain path'
}
}
return line
|
39d0b9dfd5aa9b96a237a76f4653fdc02475dc99
| 517,117 |
def _get_ws_location(water_surface_elev, zmax, zmin):
"""
Return one of three values depending on the location of the water surface
relative to the elevations of the discretized cross-section points.
Vectorized below.
Returns:
(str) one of the following: 'below' if above zmax (and automatically
zmin), 'triangle' if between zmax and zmin, and 'trapezoid'
if below zmin. This corresponds to the geometry that will be used
to calculate the wetted perimeter and area of the induced polygon.
"""
if water_surface_elev > zmax:
return 'trapezoid'
elif water_surface_elev <= zmax and water_surface_elev > zmin:
return 'triangle'
else:
return 'below'
|
7ff5c08abbea8d3f0563513e8cb1f2e1c4a55a8c
| 174,544 |
from typing import IO
from typing import Tuple
import base64
import click
def read_public_key(f: IO[str]) -> Tuple[str, str, str, str]:
"""
Read a public SSH key.
This function does some basic verification to see if the content
looks like a public key.
"""
data = f.read()
try:
kind, key, comment = data.split(" ")
if kind.startswith("ssh-") and comment:
base64.b64decode(key)
return (kind, key, comment, data)
except ValueError:
pass
raise click.ClickException("{} is not a valid SSH key".format(f.name))
|
6104d25f3ca32720ab680113fcc30622feec1796
| 612,218 |
import glob
def exists(pattern, proj_dirs) :
"""test if at least one matching config exists
:param pattern: config name pattern (e.g. 'linux-make-*')
:param proj_dir: array of toplevel dirs to search (must have /configs subdir)
:returns: True if at least one matching config exists
"""
for curDir in proj_dirs :
if len(glob.glob('{}/configs/{}.yml'.format(curDir, pattern))) > 0 :
return True
return False
|
99b306dddfcb311adb3e23c7967285dd176bb830
| 188,695 |
def merge2first(attrs):
"""Compress a sequence by discard all but the first element
This function can be useful as 'attrfx' argument for an FxMapper.
Parameters
----------
attrs : sequence, arbitrary
Returns
-------
First element of the input sequence.
"""
return attrs[0]
|
641595e3c05b7c2ecd1d90d5dd4744e7dea71393
| 226,198 |
def stream_resampling(stream, sampling_rate=100.0):
"""
To resample the input seismic data.
Parameters
----------
stream : obspy stream
input seismic data.
sampling_rate : float
required sampling rate in Hz, default is 100 Hz.
Returns
-------
stream : obspy stream
output seismic data after resampling.
"""
for tr in stream:
if tr.stats.sampling_rate != sampling_rate:
if (len(tr.data) > 10):
# perform resampling
try:
if tr.stats.sampling_rate > sampling_rate:
# need lowpass filter before resampling
tr.filter('lowpass',freq=0.5*sampling_rate,zerophase=True)
tr.resample(sampling_rate=sampling_rate)
except:
try:
tr.interpolate(sampling_rate, method="linear")
except:
stream.remove(tr)
else:
# remove the trave if it only contains too few data points
stream.remove(tr)
return stream
|
8858d5296646077dc18ffcaf890cd29c7813964f
| 55,504 |
import math
def dynamic_order(x: int, q: int) -> int:
"""
Returns the number of steps that will exist if a range(0, x)
is quantized into steps of q.
"""
return math.ceil(x / q)
|
ca11585ddb42b725ce47875a95804ff76afea558
| 190,455 |
def kodi_to_ascii(string):
"""Convert Kodi format tags to ascii"""
if string is None:
return None
string = string.replace('[B]', '')
string = string.replace('[/B]', '')
string = string.replace('[I]', '')
string = string.replace('[/I]', '')
string = string.replace('[COLOR gray]', '')
string = string.replace('[COLOR yellow]', '')
string = string.replace('[/COLOR]', '')
return string
|
46c738174ed79dabab932ee77438b3e6574801fb
| 353,718 |
def convertBinListToInt(bin_list):
"""
Convert a binary list ([1, 0, 1, 0]) to an integer
:param bin_list: Binary list
:return: Integer representation of the binary list (integer)
"""
dec = int("".join(map(str, bin_list)),2)
return dec
|
86412fff718c2e800c22d4c67f97610a1975dddf
| 314,618 |
def int_option(options, name, default):
"""
Returns an integer value of a command line option with `name` from `options` dictionary.
Parameters
----------
options : dict
A dictionary containing command line options, where keys are the option names and values are the values.
name : str
The name of the command line option.
default: int
The default value of the command line option to use if it is missing in `options`.
Returns
-------
int
The value of the option.
"""
if name in options:
value = options[name]
try:
return int(value)
except ValueError:
print(f"ERROR: option '{name}' needs to be an integer number.")
exit(1)
else:
return default
|
e11bdfb9fa4488c8b5809a71a621376cac513a93
| 393,176 |
import struct
def set_usint(bytearray_: bytearray, byte_index: int, _int: int) -> bytearray:
"""Set unsigned small int
Notes:
Datatype `usint` (Unsigned small int) consists on 1 byte in the PLC.
Maximum posible value is 255.
Lower posible value is 0.
Args:
bytearray_: buffer to write.
byte_index: byte index from where to start writing.
_int: value to write.
Returns:
Buffer with the written value.
Examples:
>>> data = bytearray(1)
>>> snap7.util.set_usint(data, 0, 255)
bytearray(b'\\xff')
"""
_int = int(_int)
_bytes = struct.unpack('B', struct.pack('>B', _int))
bytearray_[byte_index] = _bytes[0]
return bytearray_
|
53e43cd1d17d8d008d4264b5529a9002646a29a2
| 135,838 |
import torch
def compute_gaussian_kl(z_mean, z_logvar):
"""Compute KL divergence between input Gaussian and Standard Normal."""
return 0.5 * torch.mean(
torch.square(z_mean) + torch.exp(z_logvar) - z_logvar - 1, [0])
|
41cda8f84c7bf74a86c398ee38f5e65195e6b2fb
| 611,851 |
def pluralize(amount: int, noun: str, *, suffix: str = "s") -> str:
"""
Get a pluralized noun with its appropriate quantifier.
"""
quantifier = amount or "no"
return f"{quantifier} {noun if amount == 1 else noun + suffix}"
|
5c730672df0bd75f51e93177929bba275c35e781
| 210,144 |
import re
def youtube(content):
"""
Looks for any youtube url patterns in content, and replaces it with
the youtube video
"""
regex = re.compile(r"(http://)?(www\.)?((youtu\.be/)|(youtube\.com/watch\?v=))(?P<id>[A-Za-z0-9\-=_]{11})")
return regex.sub('''
<iframe width="480" height="390"
src="http://www.youtube.com/embed/\g<id>" frameborder="0"
allowfullscreen></iframe>
''', content)
|
c1dd9507ffd71bf1d519f62e657cec1b1068f894
| 223,543 |
def _format_secs(secs: float):
"""Formats seconds like 123456.7 to strings like "1d10h17m"."""
s = ""
days = int(secs / (3600 * 24))
secs -= days * 3600 * 24
if days:
s += f"{days}d"
hours = int(secs / 3600)
secs -= hours * 3600
if hours:
s += f"{hours}h"
mins = int(secs / 60)
s += f"{mins}m"
return s
|
19293e8f9f54557253908c4f75bc9f4f8950231a
| 156,562 |
import pprint
def format_for_leo(obj):
""" Convert obj to string representiation (for editing in Leo)"""
return pprint.pformat(obj)
|
f552bbbaafa0c229bad6ef6c5a8e2fed89330f08
| 63,001 |
def grid_enc_inv (w,l,k):
"""
Note: i, j, and c all start at 1
"""
i = ((k-1) % w)+1
j = (((k - i) % (w*l)))/w + 1
c = (k - i - ((j - 1) * w))/(w*l) + 1
return (i,j,c)
|
4463ff07cb657fcc58659f4b9074baf333285ec7
| 455,639 |
def write_readmap(fh, rmap, namedic=None):
"""Write a read map to a tab-delimited file.
Parameters
----------
fh : file handle
Output file.
rmap : dict
Read-to-taxon(a) map.
namedic : dict, optional
Taxon name dictionary.
"""
# sort subjects by count (high-to-low) then by alphabet
def sortkey(x): return -x[1], x[0]
for read, taxa in rmap.items():
row = [read]
if isinstance(taxa, dict):
for taxon, count in sorted(taxa.items(), key=sortkey):
if namedic and taxon in namedic:
taxon = namedic[taxon]
row.append(taxon + ':' + str(count))
elif namedic and taxa in namedic:
row.append(namedic[taxa])
else:
row.append(taxa)
print('\t'.join(row), file=fh)
|
e7da4b9fe8ea551d8d2fd4e35b514b60abee9d84
| 589,030 |
def array_to_string(array):
"""
Converts a numeric array into the string format in mujoco.
Examples:
[0, 1, 2] => "0 1 2"
Args:
array (n-array): Array to convert to a string
Returns:
str: String equivalent of @array
"""
return " ".join(["{}".format(x) for x in array])
|
8ae2a9e60fb250c5608621fd7fd67cdbc4936e5a
| 375,911 |
import math
def hex_shape(x, y, size):
"""
Generate the hexagon shape with specific size
"""
x_set = [math.cos(math.pi/6 * (i*2)) * size + x for i in range(0, 6)]
y_set = [math.sin(math.pi/6 * (i*2)) * size + y for i in range(0, 6)]
return list(map(lambda x,y: (x,y), x_set, y_set))
|
371262dd3156747db9598637a21f37e32d316ab7
| 71,645 |
def escape_rst(string: str) -> str:
"""Escape RST symbols and disable Sphinx smart quotes."""
return (
string.replace("\\", "\\\\")
.replace("*", "\\*")
.replace("|", "\\|")
.replace("`", "\\`")
.replace("'", "\\'")
.replace('"', '\\"')
)
|
a9f060524b900a1931229a244721e14e83ab00a8
| 137,305 |
def _norm_id(id):
"""Default id normalizer does nothing."""
return id
|
501dd5b20f893462f2d9a9b8edb0b87e0b867741
| 493,952 |
def make_xerr(star):
"""
returns a vector of xerr = [parallax_error, pmra_error, pmdec_error]
"""
err_names = ['parallax_error', 'pmra_error', 'pmdec_error']
return star.loc[err_names].values.astype('f')
|
3e15624fc16bc7706d74dae66c5963029acfcda6
| 92,893 |
def mape(a, p):
"""Calculate the mean absolute percentage error."""
return abs(p-a) / a
|
86a78f15685fbb04c95b31529ec4dec0760c8d25
| 265,223 |
def _IsLower(c):
"""Returns True if c is lower case or a caseless ideograph."""
return c.isalpha() and (c.islower() or not c.isupper())
|
85594c30b650104196184436552f18fe27ec4803
| 347,782 |
def _ensure_unix_line_endings(path):
"""Replace windows line endings with Unix. Return path to modified file."""
out_path = path + "_unix"
with open(path) as inputfile:
with open(out_path, "w") as outputfile:
for line in inputfile:
outputfile.write(line.replace("\r\n", "\n"))
return out_path
|
c811eed0860f92dc58b2a184ec6d2a866ef6ee46
| 656,643 |
import re
def replace_multiple_strings_one_pass(s, repDict):
"""
Replace multiple strings in **one pass**, taking as argument a dictionary of {'old_text':'new_text'}.
Note that the replacement is made in one pass, such that we avoid problems of unwanted
replacements due to iterative modification of the string, such as:
"spamham sha".replace("spam", "eggs").replace("sha","md5") being "eggmd5m md5" instead of "eggsham md5"
See https://stackoverflow.com/questions/6116978/how-to-replace-multiple-substrings-of-a-string
"""
pattern = re.compile("|".join([re.escape(k) for k in repDict.keys()]))
return pattern.sub(lambda match: repDict[match.group(0)], s)
|
6aba89a84e121081bbdc568f11657211c5c7f12e
| 394,797 |
import copy
def kfold_cross_validation(X, n_splits=5):
"""Split dataset into cross validation folds.
Args:
X(list of list of obj): The list of samples
The shape of X is (n_samples, n_features)
n_splits(int): Number of folds.
Returns:
X_train_folds(list of list of int): The list of training set indices for each fold
X_test_folds(list of list of int): The list of testing set indices for each fold
Notes:
The first n_samples % n_splits folds have size n_samples // n_splits + 1,
other folds have size n_samples // n_splits, where n_samples is the number of samples
(e.g. 11 samples and 4 splits, the sizes of the 4 folds are 3, 3, 3, 2 samples)
Loosely based on sklearn's KFold split(): https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html
"""
X_train_folds = []
X_test_folds = []
# We're going to make a list of the split-up folds to start.
n = 0
folds = []
for i in range(n_splits):
folds.append([])
# Just get the indices
while n < len(X):
folds[n % n_splits].append(n)
n += 1
# Now we need to use these folds to generate n_splits sets for each
for i in range(n_splits):
new_train_fold = []
new_test_fold = []
for j in range(n_splits):
if i == j:
for instance in folds[j]:
new_test_fold.append(copy.deepcopy(instance))
else:
for instance in folds[j]:
new_train_fold.append(copy.deepcopy(instance))
X_train_folds.append(copy.deepcopy(new_train_fold))
X_test_folds.append(copy.deepcopy(new_test_fold))
return X_train_folds, X_test_folds
|
c4e0dbf4c10e5cb36022e66bcc30a2def15bee77
| 584,436 |
import collections
def list_compare(a: list, b: list) -> bool:
"""Check if two lists contain the same elements."""
return collections.Counter(a) == collections.Counter(b)
|
05a15f46b5e00f6e17e81f4b67332a196154f4e7
| 31,848 |
def plus1( N ):
""" returns a number one larger than its input """
return N+1
|
4641f4b5819660093abb02ac36f9ce1da83cdc1d
| 378,014 |
import json
def flat_unique(path='data/arxiv_metadata.json'):
"""Flatten metadata json file and remove duplicate papers.
Parameters:
path: str
Returns:
all_unique_papers: list of dicts
"""
with open(path) as f:
paper_meta = json.load(f)
all_papers = []
ids = []
for papers in paper_meta.values():
for paper in papers:
all_papers.append(paper)
# Store Arxiv ID
ids.append(paper['id'])
# Truth map for unique list
# Way to deal with unhashable dicts issue
filled_ids = {id_: False for id_ in ids}
all_unique_papers = []
for paper in all_papers:
if not filled_ids[paper['id']]:
all_unique_papers.append(paper)
filled_ids[paper['id']] = True
return all_unique_papers
|
f044336e0a77d3348ee546eece74ddda17a1db9a
| 485,585 |
from typing import TextIO
import json
def load_mb_tags(fhandle: TextIO) -> dict:
"""Load track metadata
Args:
fhandle (str or file-like): path or file-like object pointing to musicbrainz metadata file
Returns:
Dict: metadata of the track
"""
return json.load(fhandle)
|
2e53983bf743fd284095a017b1d6e277db80e69c
| 76,469 |
def is_collections_type(type_):
""" Checks if the given type is a ``collections`` module type
:param type_: The type to check
:return: True if the type is part of the ``collections`` module, otherwise False
:rtype: bool
"""
return (
isinstance(type_, type) and getattr(type_, "__module__", None) == "collections"
)
|
9adafde3ea822d7c2f1c3211e60cdf8d65bed162
| 532,618 |
import time
def datetime_to_seconds_since_epoch(dt):
"""Converts a Python datetime to seconds since the epoch."""
return time.mktime(dt.timetuple())
|
7f9b48591c0199aa1c1882fe84ecea3a7bd9526f
| 35,138 |
def read_file(filename: str, offset: int, size: int) -> str:
"""Read the specified interval content of text file"""
with open(filename, 'r') as f:
f.seek(offset)
return f.read(size)
|
7df16f51af937967861b9bbf315f15a7181809bf
| 548,484 |
def reverse(list_a: list):
"""Problem 5: Reverse a List.
Parameters
----------
list_a : list
The input list
Returns
-------
list_a : list
The input list reversed
Raises
------
TypeError
If the given argument is not of `list` type
"""
if not isinstance(list_a, list):
raise TypeError('The argument given is not of `list` type.')
list_a.reverse()
return list_a
|
ae7d40fc6e520367d62b2dcf4ceeca9597a7c770
| 608,429 |
import ast
def _to_ast_call(module_name, func_name, args=[], kwds=[]):
"""Creates an AST equivalent sympy call."""
# if module_name not in globals():
# globals()[module_name] = importlib.import_module(module_name)
return ast.Call(
func=ast.Attribute(
value=ast.Name(id=module_name, ctx=ast.Load()),
attr=func_name,
ctx=ast.Load(),
),
args=args,
keywords=kwds,
)
|
59182d9417dea444a5a513df7960152d39c2daf7
| 360,606 |
def readSinks(sinkFile):
"""Get the list of compounds to consider as sink."""
sinks = set()
for line in open(sinkFile):
m = line.rstrip().split('\t')
sinks.add(m[0])
return sinks
|
82e29af81834898aed4924fba697f0c7dd82e85d
| 597,214 |
def all_prod_replicates_done(job):
"""Check if all prod replicate simulations completed."""
try:
a = job.doc.get("prod_replicates_done")
b = job.doc.get("num_prod_replicates")
if a >= b:
print("simulation complete for {} job".format(job))
return a >= b
except (AttributeError, KeyError) as e:
return False
|
24182bf5ddeb68284dd4a0abda4cf19f6088062f
| 202,212 |
def get_vpc_routers(cloud_driver, vpc_id):
"""
Returns the list of VPC Router objects.
"""
vpc_routers = cloud_driver.ex_list_routers(vpc_id=vpc_id)
if vpc_routers:
return vpc_routers
|
21990f178b821ee14394188c6831657a67609911
| 298,191 |
def get_stat(typ_in, typ_out):
"""
Return recovery rate and false rate for stars
"""
all_in, all_recov = len(typ_in), len(typ_out)
stars_in = len(typ_in[typ_in == 'point'])
stars_recov = len(typ_out[typ_out == 'point'])
recovery_rate = (stars_recov / stars_in)
false_rate = 1 - (stars_recov / all_recov)
return recovery_rate, false_rate
|
532aa038ab9ba81bb72aa0c7a65b41ba8d4336b6
| 512,626 |
def remove_underscore_from_end_prefix(es_index_prefix: str) -> str:
"""
Remove the last underscore from index prefix if existing
:param es_index_prefix: the index prefix may having underscore at the end
:return: the 'cleaned' index prefix
"""
if es_index_prefix.endswith("_"):
str_len = len(es_index_prefix)
es_index_prefix = es_index_prefix[0:str_len - 1]
return es_index_prefix
|
7c0619a9f50d6379f4b024a5a44eceb6cc15765c
| 338,996 |
import six
def _FormatForCommand(token):
"""Replaces underscores with hyphens, unless the token starts with a token.
This is because we typically prefer hyphens to underscores at the command
line, but we reserve hyphens at the start of a token for flags. This becomes
relevant when --verbose is activated, so that things like __str__ don't get
transformed into --str--, which would get confused for a flag.
Args:
token: The token to transform.
Returns:
The transformed token.
"""
if not isinstance(token, six.string_types):
token = str(token)
if token.startswith('_'):
return token
else:
return token.replace('_', '-')
|
023b0ead0d22bdb3574dfad11d69f22f2ec5a507
| 346,231 |
def str2int(s):
"""return the int value of string, handles strings like 1e6 too"""
rv = None
try:
rv = int(s)
except ValueError:
rv = int(float(s))
return rv
|
1234c7eb4523a8d65f8e933c117611e333c174f7
| 348,003 |
def dedup_list(list):
"""
deduplicate list
"""
new_list = []
for item in list:
if item not in new_list:
new_list.append(item)
return new_list
|
712e236576d1dbfde1a56914aefcd78dcfbd6acc
| 67,275 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.