content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def find_pivot(input_arr, min_idx, max_idx):
"""
Find the the pivor index of an rotated array
Time complexity: O(1og2(n))
Space Complexity: O(1)
Args:
input_array(array): rotated array
Returns:
pivot_idx(int)
"""
mid = (min_idx + max_idx) // 2
# if mid element is higher than the next one, we found an pivot
if mid < max_idx and input_arr[mid] > input_arr[mid + 1]:
return mid
# if mid-1 element is higher than the next one (mid element), we found an pivot
if mid > min_idx and input_arr[mid] < input_arr[mid - 1]:
return (mid-1)
# if the first element is higher than the current (mid) element,
# call recrusion for the lower interval
if input_arr[min_idx] >= input_arr[mid]:
return find_pivot(input_arr, min_idx, mid-1)
# else if the first element is lower than the current (mid) element,
# call recrusion for the higher interval
else:
return find_pivot(input_arr, mid + 1, max_idx)
|
888170f099f78fda36b4832757f11fe9d0d66d83
| 44,512 |
def opv(d, func, *args):
"""
Apply func to all values of a dictionary.
:param d: A dictionary.
:param func: Callable accepting a value of `d` as first parameter.
:param args: Additional positional arguments to be passed to `func`.
:return: `dict` mapping the keys of `d` to the return value of the function call.
"""
return {i: func(v, *args) for i, v in d.items()}
|
418abaf1e7a843a1c280d2c55fa7ac22f9496d2d
| 44,513 |
from typing import List
from typing import Any
def _list_split(
lst: List[Any],
split_point: Any
) -> List[List[Any]]:
"""
Splits a given lists into multiple lists based on the provided split points.
:param lst: Given list that needs to be split.
:param split_point: Element in the list that is used as a delimiter to split the list into different lists.
:return: A list of lists containing the separated lists.
"""
temp = []
final = []
for element in lst:
if split_point in element:
final.append(temp)
temp = [element]
else:
temp.append(element)
final.append(temp)
final.remove([])
return final
|
e3eb6613e56757f11a755bd38e3d5acbba8eec75
| 44,514 |
def pk_encode_public_key(key):
"""Creates an ASN1 representation of a public key for external storage."""
return key.encode_key(1)
|
c3b769d96a990346f2445b0e373363244202838f
| 44,515 |
def path2handle(path):
""" Translates a full path to a handle. """
return path.split('/')[-1]
|
644328243d494707a02f1746d8d8e2987342761b
| 44,517 |
import csv
def _read_csv(
filepath: str, delimiter: str = ",", quotechar: str = '"', escapechar: str = ""
) -> dict:
"""Process a given csv into a python dictionary
Arguments:
filepath: string pointing to csv file
delimiter: string that denotes what char seperates values in csv, default is ','
quotechar: string that denotes what char surrounds fields containing delimeter char, default is '"'
escapechar: string that denotes what char escaptes the delimeter char, default is no escape char.
Returns:
csv_dict: dictionary whose keys are column numbers and values are column lists
"""
csv_dict = {}
with open(filepath, "r") as csv_file:
csv_reader = csv.reader(
csv_file, delimiter=delimiter, quotechar=quotechar, escapechar=escapechar
)
header = next(csv_reader)
num_cols = len(header)
for num in range(num_cols):
csv_dict[num] = [header[num]]
for row in csv_reader:
for num in range(num_cols):
csv_dict[num].append(row[num])
return csv_dict
|
4a57d54bb569f85ba6721682c9edb501a70b95cb
| 44,520 |
def sort_dict_by_value(m_dict):
"""Sort the dict by value"""
list_tuple = [(x, m_dict[x]) for x in m_dict]
list_tuple.sort(key = lambda x : -x[1])
return list_tuple
|
f4e84eed9e4353ddc67822ecccef5ed0aa2a7dc2
| 44,521 |
def timTrangSinh(cucSo):
"""Tìm vị trí của Tràng sinh
Theo thứ tự cục số
vị trí Tràng sinh sẽ là Dần, Tỵ, Thân hoặc Hợi
*LƯU Ý* Theo cụ Thiên Lương: Nam -> Thuận, Nữ -> Nghịch
Args:
cucSo (int): số cục (2, 3, 4, 5, 6)
Returns:
int: Vị trí sao Tràng sinh
Raises:
Exception: Description
"""
if cucSo == 6: # Hỏa lục cục
return 3 # Tràng sinh ở Dần
elif cucSo == 4: # Kim tứ cục
return 6 # Tràng sinh ở Tỵ
elif cucSo == 2 or cucSo == 5: # Thủy nhị cục, Thổ ngũ cục
return 9 # Tràng sinh ở Thân
elif cucSo == 3: # Mộc tam cục
return 12 # Tràng sinh ở Hợi
else:
# print cucSo
raise Exception("Không tìm được cung an sao Trường sinh")
|
fe187369531f864fe5bc8a9a76dc5e5641e9d891
| 44,522 |
def ct_neq_u32(val_a, val_b):
"""
Return 1 if val_a != val_b, 0 otherwise. Constant time.
:type val_a: int
:type val_b: int
:param val_a: an unsigned integer representable as a 32 bit value
:param val_b: an unsigned integer representable as a 32 bit value
:rtype: int
"""
val_a &= 0xffffffff
val_b &= 0xffffffff
return (((val_a-val_b)&0xffffffff) | ((val_b-val_a)&0xffffffff)) >> 31
|
57f9b86232c45d2d271f9f7800519494f3802c2f
| 44,527 |
def file_system_arn(arn):
"""
Converts an ARN to a file-system friendly string, so that it can be used for directory &
file names
"""
for source, dest in {":": "#", "/": "_", " ": "_"}.items():
arn = arn.replace(source, dest)
return arn
|
2c355a91e48a5ad87682e945d37f3b9c61311e46
| 44,528 |
def getIfromRGB(rgb):
"""
Converts rgb tuple to integer
:param rgb: the rgb tuple n 255 scale
:return: the integer
"""
red = rgb[0]
green = rgb[1]
blue = rgb[2]
RGBint = (red << 16) + (green << 8) + blue
return RGBint
|
b5135d62f9c602997bed5c8feabf4224d41e85ee
| 44,532 |
def fix_count(count):
"""Adds commas to a number representing a count"""
return '{:,}'.format(int(count))
|
c0a85f118447a0643952ae0ebc02a0fe117de102
| 44,538 |
import math
def rotate(x, y, degree):
"""Rotate a coordinate around point (0,0).
- x and y specify the coordinate.
- degree is a number from 0 to 360.
Returns a new coordinate.
"""
radian = float(degree) * 2 * math.pi / 360.0
newx = math.cos(radian) * x - math.sin(radian) * y
newy = math.sin(radian) * x + math.cos(radian) * y
return (newx, newy)
|
6dd39e71d5fece2de6a829c5f62975e4f271cc30
| 44,549 |
import random
import string
def random_string(strlen=10):
"""Generating a random string of a certain length"""
return "".join([random.choice(string.ascii_letters) for _ in range(strlen)])
|
87b9ed7dae2a1ae341b64a764b1e467287e0e194
| 44,560 |
from typing import Union
from typing import SupportsInt
def int_or_default(value: Union[str, bytes, SupportsInt], default: int) -> int:
"""
Transforms the value given in parameter into a int, is possible. Otherwise, use the default value.
:param value: the value to transform into an int
:type value: object
:param default: the default value to use, if the conversion fails.
:type default: int
:return: the converted value, or the default one.
:rtype: int
"""
try:
value = int(value)
except ValueError:
value = default
return value
|
a9628080194b62e5cfbecca717f2763ef8c6254b
| 44,577 |
def get_node_instance(node_instance_id, rest_client):
""" Get a node instance object.
:param node_instance_id: The ID of the node instance.
:type node_instance_id: str
:param rest_client: A Cloudify REST client.
:type rest_client: cloudify_rest_client.client.CloudifyClient
:return: request's JSON response
:rtype: dict
"""
return rest_client.node_instance.get(node_instance_id=node_instance_id)
|
1c9554307d5a4552d7233d3a48025e624efa3c29
| 44,582 |
import binascii
def encode_domain(domain):
"""Given a domain with possible Unicode chars, encode it to hex."""
try:
return binascii.hexlify(domain.encode('idna'))
except UnicodeError:
# Some strange invalid Unicode domains
return None
|
ae2d761adcf5956b9657ea8d60d3ea202f19f241
| 44,583 |
from datetime import date
def votar(ano=2000):
"""
-> Verifica a situação de voto de acordo com o ano de nascimento da pessoa
:param ano: ano de nascimento da pessoa, (padrão ano 2000)
:return: Retorna a situação da pessoa
"""
idade = date.today().year - ano
print(f'Com {idade} anos, sua situação se voto é ', end='')
if idade < 16:
return 'NEGADO!'
elif 18 > idade or idade > 65:
return 'OPCIONAL'
else:
return 'OBRIGATÓRIO!'
|
5de81e2473a1c8037a9cfe62f54db6ce1e1a14c7
| 44,584 |
def join_population_data(daily_data, population_data):
"""
Merges daily_data and population_data dataframes
Parameters
----------
daily_data : df
dataframe of daily observation
population_data : df
dataframe of population
Returns
-------
merged df
merged dataframe from daily_data and population_data
"""
return daily_data.merge(population_data, how = 'left', on = 'Country/Region')
|
56086e59a60342b1c994bba09ccf66d6fa02f379
| 44,585 |
import re
def is_allowed_anonymous_path(path, method):
"""Checks if a given path and method is allowed for accessing without being authenticated"""
allowed_regex_paths = [['/assignments/.*', ['POST']]]
for item in allowed_regex_paths:
regex_path, allowed_methods = item[0], item[1]
pattern = re.compile(regex_path)
if pattern.match(path) and method in allowed_methods:
return True
return False
|
8a8566c321b8657d345ada8910793b2f515f81a3
| 44,588 |
def join(*parts):
"""
Join path name components, inserting ``/`` as needed.
If any component looks like an absolute path (i.e., it starts with
``hdfs:`` or ``file:``), all previous components will be discarded.
Note that this is *not* the reverse of :func:`split`, but rather a
specialized version of os.path.join. No check is made to determine
whether the returned string is a valid HDFS path.
"""
try:
path = [parts[0].rstrip("/")]
except IndexError:
raise TypeError("need at least one argument")
for p in parts[1:]:
p = p.strip("/")
if p.startswith('hdfs:') or p.startswith('file:'):
path = [p]
else:
path.append(p)
return "/".join(path)
|
e1d478740417df0b30dcda33e8893f7cb37a0159
| 44,590 |
from typing import Dict
def _create_ensg_pkg_map() -> Dict:
"""Reads the text file that was generated when installing ensg R
packages, and returns a map whose keys are chip names and values are
the corresponding BrainArray ensg package name.
"""
ensg_pkg_filename = "/home/user/r_ensg_probe_pkgs.txt"
chip2pkg = dict()
with open(ensg_pkg_filename) as file_handler:
for line in file_handler:
tokens = line.strip("\n").split("\t")
# tokens[0] is (normalized) chip name,
# tokens[1] is the package's URL in this format:
# http://mbni.org/customcdf/<version>/ensg.download/<pkg>_22.0.0.tar.gz
pkg_name = tokens[1].split("/")[-1].split("_")[0]
chip2pkg[tokens[0]] = pkg_name
return chip2pkg
|
01c542e025e558cf319228a84cbfeb89d2786ac1
| 44,591 |
def attr(name):
"""Produces a function that accesses an attribute
:param name: Name of an attribute
:returns: A function that when applied to an instance returns the
value for the attribute 'name'
:rtype: function
"""
def _attr(obj):
"""Wrapped function for accessing an attribute
The attribute 'name' is defined in the enclosing closure.
:param dictionary: an object
:returns: Value of the 'name' attribute or ''
"""
return getattr(obj, name) if hasattr(obj, name) else ''
return _attr
|
5f6a1e1eb3a789a4828aaf54b25c246b8bfee4f8
| 44,593 |
def is_record_package(data):
"""
Returns whether the data is a record package.
A record package has a required ``records`` field. Its other required fields are shared with release packages.
"""
return 'records' in data
|
c55734ef447f74c9f36d8c18d0cfe642aaf37178
| 44,595 |
from typing import Tuple
from typing import Union
import requests
def check_url(url: str) -> Tuple[bool, Union[str, requests.Response]]:
"""Returns information on the availability of the url
Parameters
----------
url : str
The url to test
Returns
-------
Tuple[bool, Union[str, Response]]
Whether the url is available and a string reponse
"""
try:
response = requests.head(url, allow_redirects=False)
return True, response
except requests.exceptions.SSLError:
return False, "SSL error"
except requests.exceptions.ConnectionError:
return False, "Connection error"
except requests.exceptions.InvalidSchema:
return False, "Invalid schema"
except requests.exceptions.MissingSchema:
return check_url("https://" + url)
|
09ed074bd8f71288788a4265e98f23aa953a6969
| 44,599 |
def getAtList(l, idx, default=None):
"""
Safe .get for lists
"""
try:
return l[idx]
except IndexError:
return default
|
06a168f4fec0573355a93500f6591491438e6452
| 44,601 |
def _annuity_pv_factor(r,n):
"""Present value factor for an annuity. Formula equivalent to C/r + r + C/(1+r)**2 + ... + C/(1+r)**n
Parameters
----------.
r: float
Interest rate
n: int
Number of payments
"""
return (1 - (1/(1+r)**n)) / r
|
4216ba927975ef2313a41ef135957119e1930e8d
| 44,604 |
from typing import List
def read_list_from_file(fpath: str, skip_header: bool=False) -> List[str]:
"""Parse a file into an array of strings, splitting on newline, and optionally skipping the first row.
Args:
fpath (str): File to read.
skip_header (bool, optional): If True, the first line is skipped as a header. Defaults to False.
Returns:
List[str]: Lines of the file, one list entry per line.
"""
with open(fpath, 'r') as fp:
lines = fp.read().splitlines()
if skip_header:
return lines[1:]
else:
return lines
|
1283fe1c8ea6847516153271c4dd0695ab5b60da
| 44,605 |
import re
def camel_to_snake(text_string):
"""Transform a CamelCase string into snake_case
"""
FIRST_CAP_RE = re.compile('(.)([A-Z][a-z]+)')
ALL_CAP_RE = re.compile('([a-z0-9])([A-Z])')
s1 = FIRST_CAP_RE.sub(r'\1_\2', text_string)
return ALL_CAP_RE.sub(r'\1_\2', s1).lower()
|
d3c06d6a380546e0fcea606690338dae9212e168
| 44,606 |
def select_daily_mean(daily_mean, gw_data):
"""
Select the lakes in the daily_mean file that are retained in the final growth window.
Input:
daily_mean: dataframe with all compiled daily mean water quality data
gw_data: growth window dataframe (output from the growth_window_means function)
Output:
selected_daily_mean: Dataframe of daily mean data for all lakes within the growth window dataset
"""
final_lakes_list = gw_data.lake.unique()
boolean_series = daily_mean.lake.isin(final_lakes_list)
selected_daily_mean = daily_mean[boolean_series]
return selected_daily_mean
|
331bead7dcbe17086f52b247f807be87d5fe0e43
| 44,607 |
def compute_flesch_reading_ease(total_syllables, total_words, total_sentences):
"""
Computes readability score from summary statistics
:param total_syllables: number of syllables in input text
:param total_words: number of words in input text
:param total_sentences: number of sentences in input text
:return: A readability score: the lower the score, the more complex the text is deemed to be
"""
return (
206.85
- 1.015 * (total_words / total_sentences)
- 84.6 * (total_syllables / total_words)
)
|
8b0bc43274766dd0f2e3f7b585f79bf1ccd427dc
| 44,609 |
import yaml
def load_from_yml(filename):
"""Load an experiment configuration from a yaml file. For examples,
take a look at `data` folder
Args:
filename (str): The name of the data file (use full or relative path)
Returns:
tuple: The first value in the tuple is an iterable of cars in a
sequence. The second returned value is a mapping with ensemble and
number of black cars as keys and values, respectively.
"""
with open(filename, 'r') as file_handle:
data = next(iter(yaml.safe_load_all(file_handle)))
sequence = data['sequence']
k = data['counts']
return sequence, k
|
281f2440e1d9b2e513aa38faf732ca3ec1c478ea
| 44,610 |
def solicitar_entero_valido(mensaje):
"""
Solicita un número entero y lo sigue solicitando
mientras no sea un entero válido
"""
while True:
try:
posible_entero = int(input(mensaje))
return posible_entero
except ValueError:
continue
|
c576e418e559d7c6f50a03d767e001b8613ea89c
| 44,612 |
def make_param_name_multiple_index(param_parts):
"""
Make the key name from param parts.
For example, ("param", "tag", "2", "1") -> ("param2", "1").
"""
return (param_parts[0] + param_parts[-2], param_parts[-1])
|
cc3cbad59bc89273bc35ba8811f1f5d202bc8c77
| 44,619 |
def unique_species(ini0):
"""
Return the list of different chemical elements there are in the
current structure.
"""
natom = ini0["natom"]
elmlist = []
for ii in range(0,natom):
symbol = ini0["symbol"][ii]
if not symbol in elmlist:
elmlist.append(symbol)
return elmlist
|
3c4da68118cf057ec33e46cbe204a8706e75938d
| 44,620 |
from typing import Any
def get_cls_name(obj: Any, package_name: bool = True) -> str:
"""
Get name of class from object
Args:
obj (Any): any object
package_name (bool): append package origin at the beginning
Returns:
str: name of class
"""
cls_name = str(obj.__class__)
# remove class prefix
cls_name = cls_name.split('\'')[1]
# split modules
cls_split = cls_name.split('.')
if len(cls_split) > 1:
cls_name = cls_split[0] + '.' + cls_split[-1] if package_name else cls_split[-1]
else:
cls_name = cls_split[0]
return cls_name
|
6eb9a5b8b2ac4b33b988a90ba5f1988633179295
| 44,624 |
def spacydoc2tokens(doc):
"""
Transform spaCy doc to tokens list.
:param doc: spaCy doc
:return: list of token texts
"""
return [token.text for token in doc]
|
23ca1cdf9395cac883719dedcf34748701484f3c
| 44,625 |
def in_inner_list(item, item_list):
"""
判断 item 是否在列表内的列表里
:param item: 需要判断的对象
:param item_list: <list of list of item>
:return:
"""
for item_ in item_list:
# 若 item 在其中一个列表 item_ 中
# 则返回 item_
if item in item_:
return item_
# 不存在则返回 False
return False
|
bc4ad9ea415f76c22630b20ca8b5eed0428f3a18
| 44,627 |
def base26(w):
"""Convert string into base26 representation where a=0 and z=25."""
val = 0
for ch in w.lower():
next_digit = ord(ch) - ord('a')
val = 26*val + next_digit
return val
|
b929ec5c9dcfbc4254fc0f4d646ac24537c22b72
| 44,636 |
def grade_distribution(parsed_list, max_grade, bin_number=10):
"""
This funtion calculates the distribution of the given grades by splitting
them into 'n' equal bins (intervals) and finding the number of grades
corresponding to each bin. The bins are left-closed, right-open:
[a, b) = x | a <= x < b, except from the last one that is closed:
[c, d] = x | c <= x <= d.
:param parsed_list: the parsed list of the grades
:param max_grade: the maximum grade that you can score
:param bin_number: the number of bins that is calculated in the
distribution, default is 10
:return: a list of the number of grades in each bin
"""
bin_length = max_grade / bin_number
grade_distribution_list = [0] * bin_number
for item in parsed_list:
index = int(item / bin_length)
if index == bin_number:
grade_distribution_list[index-1] = (
grade_distribution_list[index-1] + 1
)
else:
grade_distribution_list[index] = grade_distribution_list[index] + 1
return grade_distribution_list
|
73fbd33cd1c7f9fb043f62d949749a39c2db33d1
| 44,637 |
import warnings
def get_weight(target_easy_negative, easy_negative, hard_negative):
"""
Args:
target_easy_negative ([type]): [description]
easy_negative ([type]): [description]
hard_negative ([type]): [description]
Returns:
w_h, w_e: scaling factor for hard and easy and negative for achieving the
target_easy_negative
"""
w_e = target_easy_negative / easy_negative
transfer_weight = easy_negative - target_easy_negative
if transfer_weight < 0:
warnings.warn(
"Transfering weight from hard negative to easy negative")
w_h = 1 + transfer_weight / hard_negative
return w_h, w_e
|
56d8bcbcfc21145a164ca1882d245225e13cf5c7
| 44,647 |
def _parse_single_key(keystr: str) -> str:
"""Get a keystring for QKeySequence for a single key."""
return 'Shift+' + keystr if keystr.isupper() else keystr
|
2e9d54622871b6c347d7569bdc67d63754381b4f
| 44,650 |
def normalize_empty_to_none(text):
"""Return verbatim a given string if it is not empty or None otherwise."""
return text if text != "" else None
|
4d15f1101efe87e58e3855cef5d131b1c5eacef6
| 44,651 |
def run_if_true(function, boolean, data):
"""
Run the given function if the boolean is true.
Else return the data.
"""
if boolean:
return function(data)
return data
|
ea9facfed120a8fdd88717375c76c93b77edbfcf
| 44,652 |
def get_synplas_morph_args(config, precell=False):
"""Get morphology arguments for Synplas from the configuration object.
Args:
config (configparser.ConfigParser): configuration
precell (bool): True to load precell morph. False to load usual morph.
Returns:
dict: dictionary containing morphology arguments.
"""
# load morphology path
if precell:
morph_path = config.get("Paths", "precell_morph_path")
else:
morph_path = config.get("Paths", "morph_path")
return {
"morph_path": morph_path,
"do_replace_axon": config.getboolean("Morphology", "do_replace_axon"),
}
|
3410bf9aa3987dc626310affb8ebbd13b382c5d9
| 44,659 |
def checkPointsButtonClicked(game, clickX, clickY):
"""
Returns true if points button was clicked, else false
"""
return (game.pointsButtonObj is not None and
game.pointsButtonObj.collidepoint(clickX, clickY))
|
f95c7190db951b072d7517a3ec16641358a8a027
| 44,663 |
def seq_to_list(seq):
""" Convert non-sequence to 1 element sequence, tuples to lists
"""
if not isinstance(seq, (list, tuple)):
return [seq]
return list(seq)
|
460275a31e023f47dd0b0fba0289e0c3804c6f34
| 44,664 |
def mock_purge_success(url, request):
"""
Mock a success Purge request
"""
return {'status_code': 201,
'content-type': 'application/json',
'server': 'Apache',
'content-location': '/ccu/v2/purges/1234-456-7890',
'content': {
"estimatedSeconds": 420,
"progressUri": "/ccu/v2/purges/1234-456-7890",
"purgeId": "1234-456-7890",
"supportId": "123456789",
"httpStatus": 201,
"detail": "Request accepted.",
"pingAfterSeconds": 420}
}
|
eef7a0325bf81a98339fa82ba022bd994675de8e
| 44,665 |
import re
def should_filter(line, num):
"""Test the given line to see if it should be included. Excludes shebang
lines, for now.
"""
# Filter shebang comments.
if num == 0 and line.startswith('#!'):
return True
# Filter encoding specification comments.
if num < 2 and line.startswith('#') and re.search('coding[:=]', line):
return True
return False
|
3c82c67ce9b569d99c26fea37b37a4e1dbc51cd4
| 44,666 |
from typing import Tuple
def decode_seat_of(pass_: list, n_rows: int = 128, n_cols: int = 8) -> Tuple[int, int]:
"""Return row and column number from boarding pass information."""
row_range = [1, n_rows]
col_range = [1, n_cols]
for char in pass_[:7]: # decode row
if char == "F":
row_range[1] -= (row_range[1] - row_range[0] + 1) // 2
elif char == "B":
row_range[0] += (row_range[1] - row_range[0] + 1) // 2
for char in pass_[7:]: # decode column
if char == "L":
col_range[1] -= (col_range[1] - col_range[0] + 1) // 2
elif char == "R":
col_range[0] += (col_range[1] - col_range[0] + 1) // 2
return row_range[0] - 1, col_range[0] - 1
|
65a5502ad4eea05db08b4127bb6af6db1ad41db2
| 44,667 |
import re
def is_valid_zcs_container_id(zcs_container_id):
"""
Validates Zadara Container Services (ZCS) container IDs, also known as the
ZCS container "name". A valid ZCS container name should look like:
container-00000001 - It should always start with "container-" and end with
8 hexadecimal characters in lower case.
:type zcs_container_id: str
:param zcs_container_id: The ZCS container name to be validated.
:rtype: bool
:return: True or False depending on whether zcs_container_id passes
validation.
"""
if zcs_container_id is None:
return False
match = re.match(r'^container-[0-9a-f]{8}$', zcs_container_id)
if not match:
return False
return True
|
9b8ef0e6281def4d09e787123e9582c9c4be9c84
| 44,675 |
def get_hmt_balance(wallet_addr, token_addr, w3):
""" Get hmt balance
Args:
wallet_addr: wallet address
token_addr: ERC-20 contract
w3: Web3 instance
Return:
Decimal with HMT balance
"""
abi = [
{
"constant": True,
"inputs": [{"name": "_owner", "type": "address"}],
"name": "balanceOf",
"outputs": [{"name": "balance", "type": "uint256"}],
"type": "function",
}
]
contract = w3.eth.contract(abi=abi, address=token_addr)
return contract.functions.balanceOf(wallet_addr).call()
|
4f968d48908ed0739aa3ab5cc7316e756d51bdb2
| 44,680 |
def recursive_merge_dicts(a, b):
"""Recursively merge two dictionaries.
Entries in b override entries in a. The built-in update function cannot be
used for hierarchical dicts, see:
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth/3233356#3233356
Parameters
----------
a : dict
dictionary to be merged
b : dict
dictionary to be merged
Returns
-------
c : dict
merged dict
Examples
--------
>>> from gammapy.utils.scripts import recursive_merge_dicts
>>> a = dict(a=42, b=dict(c=43, e=44))
>>> b = dict(d=99, b=dict(c=50, g=98))
>>> c = recursive_merge_dicts(a, b)
>>> print(c)
{'a': 42, 'b': {'c': 50, 'e': 44, 'g': 98}, 'd': 99}
"""
c = a.copy()
for k, v in b.items():
if k in c and isinstance(c[k], dict):
c[k] = recursive_merge_dicts(c[k], v)
else:
c[k] = v
return c
|
c4ed405ae2de4b7c7567a68cb25fb2d62cc5a196
| 44,682 |
def double_char(s):
"""Return word with double characters."""
word = ''
for i in s:
word += i * 2
return word
|
7460bf28267dafa68b776c4b24d32c9084272731
| 44,684 |
def _is_git_index_exists(repo_ctx, repo_local_cache_path):
""" Checks that a local git index exists at the repository cache path
Args:
repo_ctx: Repository context of the calling rule
repo_local_cache_path: Path of the repository cache folder
Returns:
Boolean indicating if local git index exists for the repository
"""
return repo_ctx.path(repo_local_cache_path + "/.git").exists
|
8064cd6afedd429d6b8ff83bea7c0cdd6a906437
| 44,686 |
def is_path_like(obj, attr=('name', 'is_file', 'is_dir', 'iterdir')):
"""test if object is pathlib.Path like"""
for a in attr:
if not hasattr(obj, a):
return False
return True
|
858cfc52144e19d539f4074d50776cf03281fcc4
| 44,687 |
def read_label_map(path_to_labels):
"""Read from the label map file and return a class dictionary which
maps class id (int) to the corresponding display name (string).
This functions expects a txt, not pbtxt file
"""
with open(path_to_labels,'r') as f:
return {i: c for i,c in enumerate(f.read().split('\n'))}
|
9e7d1e3445cbc21b48389f0d0666a281e2b0eee2
| 44,688 |
from typing import List
def config_noisy(shots: int, optimization_level: int, transpiler_seed: int,
initial_layout: List[int], seed_simulator: int, private_config: dict
) -> dict:
"""The configuration loading for the noisy simulation.
Args:
shots: Number of shots to be used in the noisy simulation of the qcircuit.
optimization_level: Level of optimisation for the qiskit transpiler.
transpiler_seed: Seed for the transpilation process of the quantum circuit.
initial_layout: Initial layout used by a quantum computer, in case the noisy
simulation tries to mimic a certain quantum computer (e.g., IBM Cairo).
seed_simulator: The seed for the overarching simulation.
private_config: Dictionary specifying mainly the ibmq token to access ibm
real quantum computers, in case the simulation should mimic a certain
real quantum computer.
Returns:
Dictionary of the configuration for the noisy simulation of the quantum circuit.
"""
config_noisy = {
"backend_config": {"shots": shots,
"optimization_level": optimization_level,
"transpiler_seed": transpiler_seed,
"initial_layout": initial_layout,
"seed_simulator": seed_simulator
},
"ibmq_api": private_config["IBMQ"]
}
return config_noisy
|
2937ee790e7c9f91874e3b6581b1345cfce52c7f
| 44,692 |
import torch
def sig(gain: float, offset: float, x: torch.Tensor) -> torch.Tensor:
"""Computes element-wise sigmoid function.
Args:
gain: The sigmoid function gain.
offset: The sigmoid function offset.
Returns:
The sigmoid function evaluated for each element in the tensor.
"""
return 1 / (1 + (offset * (1 - x) / x)**gain)
|
2fb626d5a75b5859003bf0960afbce4b6a9b5f52
| 44,694 |
def default_not_in_transformer(row, lhs, rhs):
"""
Performs the in check of the lhs is not in the rhs. If the lhs has an
`is_not_in` method this is used, if not the `not in` operator is used.
:param row: The row being checked (not used)
:param lhs: The left hand side of the operator
:param rhs: The right hand side of the operator
:return: True if lhs is not in right, False otherwise
"""
if hasattr(lhs, "is_not_in"):
return lhs.is_not_in(rhs)
else:
return lhs not in rhs
|
d0a1974a9f127a014acbd92ed64cbf8aea8181cb
| 44,697 |
def get_list(env, name, default=None):
"""Get a list from the environment
The input is assumed to be a comma-separated list of strings.
If ``name`` is not found in ``env``, return ``default``. Note
that ``default`` is returned as-is, so you should usually specify it as a
list::
ALLOWED_HOSTS = get_list(os.environ, 'ALLOWED_HOSTS', ['localhost', ])
"""
if default is None:
default = []
if name not in env:
return default
return env[name].split(',')
|
efbab4f49b065293101b1f42aecac002a53ebf98
| 44,698 |
def filter_title(job):
"""Filter out job title and link to listing."""
job_title = job.find('a', attrs={'class': 'job-link'}).text
link = job.find('a', href=True).attrs['href']
job_link = 'https://stackoverflow.com{}'.format(link)
return(job_link, job_title)
|
cdda84b4e8dd35ba233b60bd76d2a190c583a5a8
| 44,704 |
def bytes_to_int(bs):
""" converts a big-endian byte array into a single integer """
v = 0
p = 0
for b in reversed(bs):
v += b * (2**p)
p += 8
return v
|
275801405b629a68c77bd07e5ddd10db376c2963
| 44,706 |
def zipl(*lists):
"""
Equivalent to python2 zip, return a list instead
of a generatory in python3
"""
return list(zip(*lists))
|
1e2d7fb73782b678e172fda6cb1ac5e9fce625d0
| 44,709 |
from datetime import datetime
def get_FY_short(the_date=datetime.now()):
"""Return 2-digit current fiscal year as integar"""
if the_date.month > 6:
return the_date.year - 2000
else:
return the_date.year - 2001
|
8bbd206f8e5ef0dfc10ea2c416c4e41a285ee9ab
| 44,711 |
def to_download_url(path):
"""Returns an url to filebrowser views.py download function """
return '/filebrowser/option?name=download_resource&path=' + path
|
3b7a8e88ec476906fec1294f690a4267f80da32c
| 44,712 |
def coll_types(coll):
"""
Return iterator over types in collection
"""
return coll.types()
|
eb66707a0c3397973aff16e4893c6d33b3b6c333
| 44,722 |
def get_words_from_dictionary(lemmas):
"""
Get original set of words used for analysis.
:param lemmas: A dictionary where keys are lemmas and values are sets
or lists of words corresponding to that lemma.
:type lemmas: dict(str): list(str)
:return: Set of words that exist as values in the dictionary
:rtype: set(str)
"""
words = set()
for lemma in lemmas:
words.update(set(lemmas[lemma]))
return words
|
2ddf017811cbe91a5cfcf4fbc37730554ec77d2a
| 44,725 |
import hashlib
def hash_list(argv, size=8):
"""
Proposes a hash for the list of arguments.
@param argv list of arguments on the command line.
@param size size of the hash
@return string
"""
st = "--".join(map(str, argv))
hash = hashlib.md5()
hash.update(st.encode("utf-8"))
res = hash.hexdigest()
if len(res) > 8:
return res[:8]
return res
|
b9371185889fd37408ff9f46b41e874f8dd9a029
| 44,727 |
import json
def _get_example(line):
"""Extract relevant fields from json.
Args:
line: String for json line.
Returns:
example_id: integer.
question: string.
contexts: List of strings.
context_indices: List of (int32, int32) tuples.
"""
json_example = json.loads(line)
example_id = json_example["example_id"]
long_answer_candidates = json_example["long_answer_candidates"]
contexts = []
context_indices = []
for candidate in long_answer_candidates:
if candidate["top_level"]:
# Get candidate start and end indices.
start_index = candidate["start_token"]
end_index = candidate["end_token"]
context_indices.append((start_index, end_index))
# Get candidate contexts.
candidate_tokens = json_example["document_tokens"][start_index:end_index]
candidate_tokens = [t["token"] for t in candidate_tokens]
candidate_str = u" ".join(candidate_tokens)
candidate_str = candidate_str.lower()
contexts.append(candidate_str)
# Get question.
question = u" ".join(json_example["question_tokens"])
question = question.lower()
return example_id, question, contexts, context_indices
|
f57cf475c55566bf5a4f19699aab63d54839535d
| 44,735 |
from typing import List
from typing import Union
def make_it_fit(cell_list: List[str], limit: int) -> Union[None, List[str]]:
"""
This function attempts to shorten a list of strings by finding and
elimininating empty string elements from left to rigth. If succesfull
it will return the modified list. Otherwise it will return None.
"""
if len(cell_list) <= limit:
return cell_list
else:
while sum([len(x) == 0 for x in cell_list]):
cell_list.remove("")
if len(cell_list) == limit:
return cell_list
else:
return None
|
48884f977074b82f391c7fb989e08dab1d285bf7
| 44,736 |
import time
def _generate_container_name() -> str:
"""Generate a Docker container name for use in DockerPolicy."""
return 'ros2launch-sandboxed-node-{}'.format(time.strftime('%H%M%S'))
|
28eb30dd5c4a49e4342feeec94d09dda9f3f4c53
| 44,737 |
def getSigGenIDN(sg):
"""
This function returns the Sig Gen IDN
Parameters:
sg : socket connection
Returns:
IDN : Sig gen IDN response as string
"""
sg.sendall(b'*IDN?\r\n') # Get system identification
response = sg.recv(1024)
return response.decode('utf8')
|
a9478286a2273a6be1c17a5b06d3d83641d80f8f
| 44,738 |
def get_ds(dc, name):
"""
Pick a datastore by its name.
"""
for ds in dc.datastore:
try:
if ds.name == name:
return ds
except: # Ignore datastores that have issues
pass
raise Exception("Failed to find %s on datacenter %s" % (name, dc.name))
|
964a4c997e46d2d655659962479baab53cbb765d
| 44,739 |
from typing import Dict
def dauphin_info(dauphin_county_dict) -> Dict[str, str]:
"""
Returns a dict with basic info for Dauphin County, including its name and mailing list ID.
"""
return dauphin_county_dict["42043"]
|
26b35449a0177171a5ce499dd7af340278a91209
| 44,744 |
from typing import Callable
from typing import Type
from typing import Optional
from typing import Tuple
def make_metaclass(metaclass_callback: Callable[[Type], None], exclude: Optional[Tuple[str, ...]] = None) -> Type:
"""
make a metaclass
:param metaclass_callback: called when a new class is made using the metaclass
:param exclude: names of inheriting classes to not trigger the callback on
:return: the metaclass
"""
exclude = exclude or ()
class Metaclass(type):
def __new__(mcs, name, bases, kwargs):
klass = super().__new__(mcs, name, bases, kwargs)
if name not in exclude:
metaclass_callback(klass)
return klass
return Metaclass
|
18193fb296215c77a8f385ba1bb3c4ce888eb866
| 44,746 |
def layer_rows(layer):
"""
Given a layer return each row as a list.
"""
els = range((2*(layer-1)-1)*(2*(layer-1)-1) + 1, (2*layer-1)*(2*layer-1) + 1)
side_length = len(els) / 4
return [els[:side_length], els[side_length:2*side_length], els[2*side_length:3*side_length], els[3*side_length:]]
|
ecab272deb2f7d990b094c2b9a2036c0f8990855
| 44,748 |
def sample_mocked_geolocation(**params):
"""Create and return a sample mocked geolocation object"""
mock_data = {
'ip': '10.0.0.1',
'country_name':'Mars',
'region_code':'SOL512',
'city': 'RedSand',
'latitude':49.02342,
'longitude':40.34342,
'zip':'1052'
}
mock_data.update(params)
return mock_data
|
8506bfd2f9b602e027660401b4fdeb2cea9e92b8
| 44,751 |
def community2name(community):
"""
Given a list of concepts (words), return a string as its name.
"""
return " ".join(community)
|
c2454a26704bd6930c3c7fdf3bf17619ede918c6
| 44,754 |
def readFile(filepath):
"""Gets string representing the path of the file to be processed
and returns it as a list. It will omit blank lines.
Parameters
----------
filepath : str
The path of the filename to be processed
Returns
-------
lines
a list that contains the lines of the file
"""
try:
with open(filepath) as f_in:
lines = list(line for line in (l.strip() for l in f_in) if line)
except IOError as err:
print(err)
return None
if not lines:
print("Empty file")
exit(1)
return lines
|
d1dc54b48f7012cbf0f253154af96d673cd00259
| 44,755 |
def drop_users_to_ignore(ignore, license_lists):
"""This function drops the users to ignore during the comparison from each license type list.
Parameters
----------
ignore (DataFrame) : Users to ignore during comparison
license_lists (dict) : dictionary of DataFrames, one for each license type
Returns
-------
license_lists (dict) : same as input minus DataFrame records whose email matched an email in the *ignore* DataFrame
"""
for license in license_lists.keys():
license_lists[license] = license_lists[license][ ~license_lists[license]['User principal name'].isin(ignore['email']) ]
return license_lists
|
3aaee5d9c49ee776f6fc5dbd2a8062e104c78845
| 44,756 |
def delete_cookie(cookieName: str, url: str) -> dict:
"""Deletes browser cookie with given name, domain and path.
Parameters
----------
cookieName: str
Name of the cookie to remove.
url: str
URL to match cooke domain and path.
**Experimental**
"""
return {
"method": "Page.deleteCookie",
"params": {"cookieName": cookieName, "url": url},
}
|
ff233a7e1e4048a35b6ad13f804cbfe088b6a66c
| 44,760 |
def find_range(index, window, max):
""" find the left and right endpoints of a window in an array
:param index: index window is to be centered at
:param window: the length of the window
:param max: the size of the array
:return: left and right endpoints of the window
"""
half_window = int(window / 2)
return (
(index - half_window, index + half_window) # in range
if max - half_window >= index >= half_window else
(max - window, max) # too far on right
if max - half_window < index else
(0, window) # to far on left
)
|
f4bc4cd41b0c3d30c906bbf8f2e7449b0ca118de
| 44,763 |
def MSD(Z, Y):
"""Compute the mean square distortion (MSD) between Z and Y
Parameters
----------
Z: torch.Tensor of shape (n_samples, n_features)
Tensor to be compared
Y: torch.Tensor of shape (n_samples, n_features)
Tensor to be compared
Returns
-------
msd: float
Mean square distance between Z and Y
"""
msd = ((Z - Y) ** 2).sum()
msd /= Z.shape[0]
return msd
|
7b13824a4898cbc20609d1a67600298a44cff7c3
| 44,767 |
import torch
def avg_disp(y_pred, y_true):
""" Average displacement error. """
y_true, masks = y_true
seq_lengths = masks.sum(1)
batch_size = len(seq_lengths)
squared_dist = (y_true - y_pred)**2
l2_dist = masks * torch.sqrt(squared_dist.sum(2))
avg_l2_dist = (1./batch_size) * ((1./seq_lengths) * l2_dist.sum(1)).sum()
return avg_l2_dist.item()
|
a97ca860dbc13d857ebfd46a5f831e6f07dc8c5f
| 44,770 |
def count(items: list) -> list:
"""
Generate a list of counts for each item in the input list. Outputs with highest counted item at index 0
Args:
items (list): A list of objects to be sorted
Returns:
list: A list of unique items in the input and the number of occurances for each item
"""
def row2(k):
return k[1]
if isinstance(items[0], list):
items = [tuple(x) for x in items]
uniqs = [[x, items.count(x)] for x in set(items)]
uniqs.sort(key=row2, reverse=True)
return uniqs
|
a13697f72c67cffb2c59d30565924bab99b746de
| 44,774 |
import json
def to_json(dict_):
"""
serialize dict to json
:param dict_: dict_
:returns: JSON string representation
"""
return json.dumps(dict_)
|
a0ee283fbcb5d56d8032a25259a01a13a0782858
| 44,775 |
def null_count(df):
"""
Returns total amount of null values in the DataFrame.
"""
nulls = df.isnull().sum()
return nulls
|
2e923c56b9048781aec39e68758bbdeebb39362e
| 44,781 |
def remove_blank_lines(lines):
"""Get all non-blank lines out of a list of lines"""
return [line_out for line_out in (line_in.strip() for line_in in lines) if len(line_out) > 0]
|
6725f3f602da4ca0dc5e4d250cfd0d8b95181c15
| 44,783 |
def perovskite_order_param(Rs_order_param=None):
""" Define an order parameter for peroskite.
"""
if Rs_order_param is None:
Rs_order_param = [(1, 0, 1), (1, 0, -1), (1, 1, 1), (1, 1, -1), (1, 2, 1), (1, 2, -1)]
Rs_in = [(3, 0, 1)] + Rs_order_param
return Rs_in
|
0525349d0e0bc3cb6068d38c3e1e71732d3c8a44
| 44,792 |
def format_time(time_units):
""" Returns a formated time value (hh:mm:ss:ms). """
return ":".join(["{0}".format(time_units[inx]) for inx in range(4, len(time_units))])
|
28c622043a9b39ae1fe1e1c61ee441a5884a03a3
| 44,798 |
def first(xs, fn=lambda _: True, default=None):
"""Return the first element from iterable that satisfies predicate `fn`,
or `default` if no such element exists.
Args:
xs (Iterable[Any]): collection
fn (Callable[[Any],bool]): predicate
default (Any): default
Returns:
Any
"""
return next((i for i in xs if fn(i)), default)
|
f4931e9d6b9044f4cffdb23961c6e33dea627083
| 44,799 |
from typing import Sequence
from typing import List
def bubble_sort_rec(seq: Sequence) -> List:
"""
Sort a sequence with the recursive bubble sort algorithm.
Parameters
----------
seq : Sequence
Returns
-------
List
"""
def rbubblesort(seq: List, length: int):
"""Recursive bubble sort algorithm."""
if length in (0, 1):
return
for j in range(length - 1):
cur, nxt = seq[j], seq[j + 1]
if cur > nxt:
seq[j], seq[j + 1] = nxt, cur
rbubblesort(seq, length - 1)
lst: List = list(seq)
rbubblesort(lst, len(lst))
return lst
|
e9ba43daa21cf58165efce7a60dc21f0ce455076
| 44,803 |
def PerpProduct2D(a,b):
"""Computes the the 2D perpendicular product of sequences a and b.
The convention is a perp b.
The product is:
positive if b is to the left of a
negative if b is to the right of a
zero if b is colinear with a
left right defined as shortest angle (< 180)
"""
return (a[0] * b[1] - a[1] * b[0])
|
94f6184c8c5bfb83f29d4adc40ca030931522e66
| 44,804 |
def try_key(d, key, val):
"""
d: dict
key: str
val: object
the default value if the key does not exist in d
"""
if key in d:
return d[key]
return val
|
7a28f1aaf0c989675bab32f9deacc39e1f6ca9d0
| 44,809 |
def is_subset(l1: list, l2: list) -> bool:
"""
Test if l2 is a subset of l1, i.e. all elements of l2 are contained in l1 and return True if it the case, False
otherwise.
:param l1: main list
:param l2: list whose elements are to be checked (if they're in l1 or not)
:return: True if l2 is a subset of l1, False otherwise.
"""
set1 = set(l1)
set2 = set(l2)
return set1.issubset(set2)
|
09831684f55af2670ac4fd7e930c574f0265483c
| 44,813 |
def _looks_like_numpy_function(func_name, numpy_module_name, node):
"""
Return True if the current node correspond to the function inside
the numpy module in parameters
:param node: the current node
:type node: FunctionDef
:param func_name: name of the function
:type func_name: str
:param numpy_module_name: name of the numpy module
:type numpy_module_name: str
:return: True if the current node correspond to the function looked for
:rtype: bool
"""
return node.name == func_name and node.parent.name == numpy_module_name
|
e343f39d2388ce31f2d749d50b1c8f4e47475780
| 44,816 |
from typing import Callable
import functools
def to(data_type) -> Callable:
"""
Apply a data type to returned data from a function.
Args:
data_type: The data type to apply. Eg: list, int etc.
Returns:
Decorator that applies the data type on returned data
"""
def decorator(func) -> Callable:
@functools.wraps(func)
def inner(*args, **kwargs):
return data_type(func(*args, **kwargs))
return inner
return decorator
|
b0b5c43c96ea6081888ed8e212d43c8206966ddb
| 44,817 |
def list_nodes(base):
"""Utility function that lists Senlin nodes."""
res = base.client.list_objs('nodes')
return res['body']
|
09c42f361be4e2abcb5685b63ce202cc71822e16
| 44,822 |
def GetCost(term):
"""Calculate the cost of a term.
Quota is charged based on how complex the rules are rather than simply
limiting the number of rules.
A firewall rule tuple is the unique combination of IP range, protocol, and
port defined as a matching condition in a firewall rule. And the cost of a
firewall rule tuple is the total number of elements within it.
Args:
term: A Term object.
Returns:
int: The cost of the term.
"""
protocols = len(term.protocol) or 1
ports = len(term.destination_port) or 1
if term.destination_address:
addresses = len(term.destination_address) or 1
else:
addresses = len(term.source_address) or 1
return addresses * protocols * ports
|
b21755d3bb6a32dc7b1b23718b940d3169d050fc
| 44,825 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.