content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def boundaries_intersection(boundaries):
"""
compute the intersections inside a boundary
:param boundaries: list of list of vertex indices corresponding to the path
:return: list of common vertices between each tuple
"""
bound_conn = []
for bound_ind1 in range(len(boundaries) - 1):
for bound_ind2 in range(bound_ind1 + 1, len(boundaries)):
common = set(boundaries[bound_ind1]).intersection(
set(boundaries[bound_ind2]))
if common:
bound_conn.append([bound_ind1, bound_ind2, list(common)])
return bound_conn
|
7419d388f917650abd0717d67db39b1379f2f765
| 601,292 |
def atbash_slow(sequence: str) -> str:
"""
>>> atbash_slow("ABCDEFG")
'ZYXWVUT'
>>> atbash_slow("aW;;123BX")
'zD;;123YC'
"""
output = ""
for i in sequence:
extract = ord(i)
if 65 <= extract <= 90:
output += chr(155 - extract)
elif 97 <= extract <= 122:
output += chr(219 - extract)
else:
output += i
return output
|
1f38ab498cd808df9c313608d694363dba439d19
| 569,333 |
import click
def prompt(string, default=None):
"""
Outputs a prompt-flavored string.
"""
return click.prompt(click.style(string, fg='magenta'), default=default)
|
6400eb70bdf93be44e6355940fdbca10bd741fd8
| 206,447 |
def CStringIo_to_String(string_io_object):
"""Converts a StringIO.StringIO object to a string.
Inverse of String_to_CStringIo"""
return string_io_object.getvalue()
|
21f2b027f1eb43063bc24df25db2c2098d894d46
| 695,488 |
from typing import List
from datetime import datetime
def times_are_valid(times: List[str]) -> bool:
"""
Verify if times are valid (both of them).
If at least one of them is invalid, will return False.
A time is valid when respect the pattern HH:MM and is on 24hrs format.
:param times: A list with start time and end time.
Ex.: ['10:15','20:02']
:return: True or False
"""
valid_dates = False
for time in times:
try:
datetime.strptime(time, '%H:%M')
valid_dates = True
except ValueError:
valid_dates = False
break
return valid_dates
|
30a0d9d4b8815a8e272723c1b581099933e26156
| 269,264 |
def process_nomis(
df, indicator_name, value_column, source, indicator_column="MEASURES_NAME"
):
"""Fetch nomis data
Args:
df (df): nomis table
indicator_name (str): name of indicator
value_column (str): value column
source (str): data source
indicator_column (str): column that contains the indicator
Returns:
A clean table with secondary data
"""
return (
df.query(f"{indicator_column}=='{indicator_name}'")[
["DATE", "GEOGRAPHY_NAME", "GEOGRAPHY_CODE", value_column, "OBS_VALUE"]
]
.reset_index(drop=True)
.rename(columns={"OBS_VALUE": "VALUE", value_column: "VARIABLE"})
.assign(source=source)
.rename(columns=str.lower)
)
|
f934e66069b36be831318850c394ca2f6b333b4e
| 166,230 |
def RecursiveDictionaryExtraction(Dictionary):
"""
WARNING: This function is for internal use.
This function goes into a tree structures as embedded dictionaries and returns the sum of all the leaves
"""
if isinstance(Dictionary, dict):
Values = [RecursiveDictionaryExtraction(
Dictionary[x]) for x in Dictionary.keys()]
return sum(Values)
else:
return Dictionary
|
1337cff22cd06c5efa564a5716c964ca4396103c
| 213,750 |
def voter_notification_settings_update_doc_template_values(url_root):
"""
Show documentation about voterNotificationSettingsUpdate
"""
required_query_parameter_list = [
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
{
'name': 'email_subscription_secret_key',
'value': 'string', # boolean, integer, long, string
'description': 'A long string which tells us which email we want the notification options updated for. ',
},
{
'name': 'interface_status_flags',
'value': 'integer', # boolean, integer, long, string
'description': 'An integer whose bits represent several flags for the user, such as the ',
},
{
'name': 'flag_integer_to_set',
'value': 'integer', # boolean, integer, long, string
'description': 'Sets the corresponding bit represented by this integer\'s bit, '
'in interface_status_flags bit',
},
{
'name': 'flag_integer_to_unset',
'value': 'integer', # boolean, integer, long, string
'description': 'Unsets the corresponding bit represented by this integer\'s bit, '
'in interface_status_flags bit',
},
{
'name': 'notification_settings_flags',
'value': 'integer', # boolean, integer, long, string
'description': 'An integer whose bits represent several flags for the user, such as the ',
},
{
'name': 'notification_flag_integer_to_set',
'value': 'integer', # boolean, integer, long, string
'description': 'Sets the corresponding bit represented by this integer\'s bit, '
'in notification_settings_flags bit',
},
{
'name': 'notification_flag_integer_to_unset',
'value': 'integer', # boolean, integer, long, string
'description': 'Unsets the corresponding bit represented by this integer\'s bit, '
'in notification_settings_flags bit',
},
]
potential_status_codes_list = [
{
'code': 'UPDATED_VOTER',
'description': 'Successfully saved',
},
]
try_now_link_variables_dict = {
'email_subscription_secret_key': '',
'interface_status_flags': 'False',
'flag_integer_to_set': 'False',
'flag_integer_to_unset': 'False',
'notification_settings_flags': 'False',
'notification_flag_integer_to_set': 'False',
'notification_flag_integer_to_unset': 'False',
}
api_response = '{\n' \
' "status": string (description of what happened),\n' \
' "success": boolean (True as long as no db errors),\n' \
' "voter_found": boolean (True if voter found from secret key),\n' \
' "voter_updated": boolean (True if save executed successfully),\n' \
' "email_subscription_secret_key": string (88 characters long),\n' \
' "sms_subscription_secret_key": string (88 characters long),\n' \
' "interface_status_flags": integer,\n' \
' "flag_integer_to_set": integer,\n' \
' "flag_integer_to_unset": integer,\n' \
' "notification_settings_flags": integer,\n' \
' "notification_flag_integer_to_set": integer,\n' \
' "notification_flag_integer_to_unset": integer,\n' \
'}'
template_values = {
'api_name': 'voterNotificationSettingsUpdate',
'api_slug': 'voterNotificationSettingsUpdate',
'api_introduction':
"Update profile-related information for a voter based on secret key. If the string 'False' is passed "
"(or the boolean value), do not update the field.",
'try_now_link': 'apis_v1:voterNotificationSettingsUpdateView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
|
ad2fda6cfb0d2d228b6fc6ae6b18d71026728157
| 655,203 |
def uniquifier(seq, key=None):
"""
Make a unique list from a sequence. Optional key argument is a callable
that transforms an item to its key.
Borrowed in part from http://www.peterbe.com/plog/uniqifiers-benchmark
"""
if key is None:
key = lambda x: x
def finder(seq):
seen = {}
for item in seq:
marker = key(item)
if marker not in seen:
seen[marker] = True
yield item
return list(finder(seq))
|
ab834043538b4207a4b124977c97ba89fa9adcb7
| 111,547 |
def is_authoring_source(view):
"""
Given a view object, tells you if that view represents a help source file.
"""
if view.match_selector(0, "text.hyperhelp.help"):
return not view.is_read_only()
return False
|
82570676060ffdd4649bd847be065576ab583cad
| 234,336 |
def parse_csv(columns, line):
"""
Parse a CSV line that has ',' as a separator.
Columns is a list of the column names, must match the number of
comma-separated values in the input line.
"""
data = {}
split = line.split(',')
for idx, name in enumerate(columns):
data[name] = split[idx]
return data
|
ff42251c5be595cc749ccc91d419e2ef105b9b49
| 44,150 |
def _try_rsplit(text, delim):
"""Helper method for splitting Email Received headers.
Attempts to rsplit ``text`` with ``delim`` with at most one split.
returns a tuple of (remaining_text, last_component) if the split was
successful; otherwise, returns (text, None)
"""
if delim in text:
return [x.strip() for x in text.rsplit(delim, 1)]
else:
return (text, None)
|
ecc57abd82c8a4c63100d2305af1cd21853fa86e
| 653,134 |
def binary_search(lst: list, x) -> int:
"""Return the index of the first element of the sorted list `lst` equal to
`x`, or -1 if no elements of `lst` are equal to `x`.
Design idea: Compare the given element with the midpoint element of the
list. If the element is less than the midpoint, then recursively search the
left half. If the element is greater, then recursively search the right
half.
Complexity: O(log n) time, O(1) space.
"""
# `lo` is the first index of the list still under consideration, and `hi` is
# one past the last index under consideration. `hi - lo` will always be the
# number of elements under consideration.
lo = 0
hi = len(lst)
while lo < hi:
mid = (lo + hi) // 2
# Use the comparison with the midpoint to rule out half the list.
if x == lst[mid]:
return mid
elif x < lst[mid]:
hi = mid
else:
lo = mid + 1
return -1
|
da84a8c3759f4e35b1882a90f5ad4d58a59ce27f
| 134,992 |
def has_fields(passport):
"""
Check whether all required fields are in the passport.
"""
for field in ["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"]:
if not field in passport: return False
return True
|
d4776de4066dd220515566bbed843576927e366b
| 137,639 |
def update_note(client, note_id, title, tags, content):
"""Update an existing note"""
return client.post("/update", data=dict(
note_id=note_id,
title=title,
tags=tags,
content=content
), follow_redirects=True)
|
254bb8869f1ab0e0a5c42729b03c316479652e58
| 517,635 |
def _isSet(theme, keys):
"""
Given a theme dict, recursively check that all the keys are populated
and that the associated value is truthy
"""
obj = theme
for key in keys:
if not obj or key not in obj:
return False
obj = obj[key]
return bool(obj)
|
43743104c014cb09550e447a199f208055440655
| 300,381 |
import math
def floatRgb(mag, cmin, cmax):
"""
Return a tuple of floats between 0 and 1 for the red, green and
blue amplitudes.
"""
try:
# normalize to [0,1]
x = float(mag-cmin)/float(cmax-cmin)
except:
# cmax = cmin
x = 0.5
blue = min((max((4*(0.75-x), 0.)), 1.))
red = min((max((4*(x-0.25), 0.)), 1.))
green= min((max((4*math.fabs(x-0.5)-1., 0.)), 1.))
return (red, green, blue)
|
04550303688836c6b54b51a75d1ba20d665f3e96
| 475,277 |
def fix_tid(src_tid):
""" There are currently four ways to describe indices, which have
to be unrolled into a tuple:
'x:y' -> [x, x + 1, .. y], e.g. '2:5' -> [2,3,4,5]
'x_y_z' -> [min, min + 1, .. , max], e.g. '6_7_9' -> [6,7,8,9]
'x,y,z' -> [x,y,z], e.g. '1,2,3' -> [1,2,3]
'x' -> [x], e.g. '42' -> [42]
The unrolled span is sorted and returnd as a tuple.
Args:
src_tid (str): tid to fix
Returns:
tuple: The unrolled tid
"""
if ':' in src_tid:
start, _, end = src_tid.partition(':')
span = list(range(int(start), int(end) + 1))
elif '_' in src_tid:
data = [int(x) for x in src_tid.split('_')]
min_el, max_el = min(data), max(data)
span = list(range(min_el, max_el + 1))
elif ',' in src_tid:
span = [int(x) for x in src_tid.split(',')]
else:
span = [int(src_tid)]
span.sort()
return tuple(span)
|
ee273b5762a26a4e1a9c9603c272eb3f60d313e2
| 158,949 |
def max_decimal_value_of_binary(num_of_bits):
"""
get max decimal value of a binary string with a fixed length
:param num_of_bits: # of bits
:type num_of_bits: int
:return: max decimal value
:rtype: int
"""
return int('1'*num_of_bits, base=2)
|
c1dc4a3b9a1af511821ef432467b8b275084e055
| 98,107 |
import base64
def base64_encode(value):
"""Encode the value in base64"""
return base64.b64encode(value)
|
e8073be8e7b02f36937258f8cbfd2e6eb635a72b
| 615,267 |
def compute_polynomial(coefficients, value):
"""
Horner's method of computation for polynomial functions.
Returns the result of a polynomial function given as coefficients vector 'coefficients' for value 'value'.
:param coefficients: vector of coefficients. ex: [3, 2, 1, 0] for polynomial function
f(x) = 3*(x**3) + 2*(x**2) + 1*(x**1) + 0*(x**0)
:param value: number
:return result: the result of polynomial function f(x) in which x = value
"""
result = None
for coefficient in coefficients:
if not result:
result = coefficient
else:
result *= value
result += coefficient
return result
|
98740f0cecdad972adb726a2c8d8a90484a33880
| 129,195 |
import collections
def duplicate_cards(tuple_of_cards):
"""Return a list of the cards that are duplicated in the tuple.
If a card is duplicated N times, the card should be in the result N times
so that the caller knows how many times it's been duplicated.
If is_standard_deck() returns false for a list, this function may help the
caller determine what's wrong with their deck of cards."""
c = collections.Counter(tuple_of_cards)
return [card for card in tuple_of_cards if c[card] > 1]
|
6446452bc5340c9aa509cd0ea2268992ffcd553d
| 564,507 |
def active_users(account, days_back):
""" Returns query for finding active users (since days_back value)."""
query_string = f"""SELECT DISTINCT useridentity.arn
FROM behold
WHERE account = '{account}'
AND useridentity.type = 'IAMUser'
AND useridentity.arn IS NOT NULL
AND from_iso8601_timestamp(eventtime) > date_add('day', -{days_back}, now());"""
return (query_string, f"athena_results/active_users/{account}")
|
6935a5ebc24682c8d7932bae8e755659f4615bd7
| 447,332 |
from typing import Union
def squared_number(n: Union[int, float]) -> int:
"""
Return square root of n if n is a perfect square using Babylonian algorithm.
n must be a positive integer. Otherwise, if n is a positive float number,
it must have zero decimal places.
"""
if n < 1:
raise ValueError("n must be a positive integer.")
if isinstance(n, float) and not n.is_integer(): # type: ignore
return 0
x = n // 2
if x == 0:
if n == 1:
return 1
return 0
cache = set((x,))
while x * x != n:
x = (x + (n // x)) // 2
if x in cache:
return 0
cache.add(x)
return int(x)
|
1aa15667ba6540db4b78e995903e2ea978ec7a16
| 394,703 |
def es_cif(doi):
"""
Validate a Spanish CIF.
Each company in Spain prior to 2008 had a distinct CIF and has been
discontinued. For more information see `wikipedia.org/cif`_.
The new replacement is to use NIF for absolutely everything. The issue is
that there are "types" of NIFs now: company, person[citizen vs recident]
all distinguished by the first character of the DOI. For this reason we
will continue to call CIF NIFs that are used for companies.
This validator is based on `generadordni.es`_.
.. _generadordni.es:
https://generadordni.es/
.. _wikipedia.org/cif:
https://es.wikipedia.org/wiki/C%C3%B3digo_de_identificaci%C3%B3n_fiscal
Examples::
>>> es_cif('B25162520')
True
>>> es_cif('B25162529')
ValidationFailure(func=es_cif, args=...)
.. versionadded:: 0.13.0
:param doi: DOI to validate
"""
doi = doi.upper()
if len(doi) != 9:
return False
table = 'JABCDEFGHI'
first_chr = doi[0]
doi_body = doi[1:8]
control = doi[8]
if not doi_body.isdigit():
return False
odd_result = 0
even_result = 0
for index, char in enumerate(doi_body):
if index % 2 == 0:
# Multiply each each odd position doi digit by 2 and sum it all
# together
odd_result += sum(map(int, str(int(char) * 2)))
else:
even_result += int(char)
res = (10 - (even_result + odd_result) % 10) % 10
if first_chr in 'ABEH': # Number type
return str(res) == control
elif first_chr in 'PSQW': # Letter type
return table[res] == control
elif first_chr not in 'CDFGJNRUV':
return False
return control == str(res) or control == table[res]
|
63f4c456ed96410cedb94edad303d1d62164d318
| 441,594 |
def _import_symbol(symbol_path):
"""Imports the symbol defined by 'symbol_path'.
'symbol_path' is a string in the form 'foo.bar.baz' which is turned
into an import statement 'from foo.bar import baz' (ie. the last
component of the name is the symbol name, the rest is the package/
module path to load it from).
"""
components = symbol_path.split(".")
module_name = ".".join(components[:-1])
symbol_name = components[-1]
module = __import__(module_name, globals(), locals(), [symbol_name])
symbol = getattr(module, symbol_name)
return symbol
|
0b465c87650e80b76d60f47abf6186984399dbd1
| 551,625 |
def get_from_dom(dom, name):
"""
safely extract a field from a dom.
return empty string on any failures.
"""
try:
fc = dom.getElementsByTagName(name)[0].firstChild
if fc is None:
return ''
else:
return fc.nodeValue
except Exception as e:
return ''
|
cecd859fee83db50ecbf627306fa2f554daebf7b
| 550,341 |
def DictToGypDefines(def_dict):
"""Convert a dict to GYP_DEFINES format."""
def_list = []
for k, v in def_dict.iteritems():
def_list.append("%s='%s'" % (k, v))
return ' '.join(def_list)
|
3c3a333f67fe910169d02d6f0d4fe7e480907a3a
| 135,669 |
from pathlib import Path
import json
from typing import OrderedDict
def read_json(fileName):
"""
Function to read a JSON file.
Parameters
----------
fileName : str
Name of the JSON file
Returns
-------
content : OrderedDict
JSON file content
"""
fileName = Path(fileName)
with fileName.open('r') as jsonFile:
return json.load(jsonFile, object_hook=OrderedDict)
|
0d972fcceb1f48c61c85e892809ae06b422d6831
| 527,866 |
def hex_to_rgb(hexx):
"""
Utility function to convert hex to (r,g,b) triples.
http://ageo.co/1CFxXpO
Args:
hexx (str): A hexadecimal colour, starting with '#'.
Returns:
tuple: The equivalent RGB triple, in the range 0 to 255.
"""
h = hexx.strip('#')
l = len(h)
return tuple(int(h[i:i+l//3], 16) for i in range(0, l, l//3))
|
1135f6dab743abeb60e3971bbb47b59087a8a59f
| 137,940 |
def va_from_string(value):
"""
Convert HEX string to int
:param str value: int as string
:return int: virtual address
"""
try:
return int(value.rstrip("L"), 16)
except ValueError:
return 0
|
ef0e502aa3d1b815554981c62fcaf7c41b724cf0
| 497,106 |
def cmpTup2(tupA, tupB):
"""A comparator function that compares two tuples on the basis of the
value of their second element."""
if (tupA[1] < tupB[1]):
return -1
elif (tupA[1] == tupB[1]):
return 0
else:
return 1
|
b41dcdd85711027116b503d2a68b2a97c16013b2
| 43,677 |
import math
def format_float_with_standard_uncertainty(value, standard_uncertainty, minimum=1e-12):
"""
Formats a float, including the uncertainty in its value.
Parameters
----------
value : float
standard_uncertainty : float
minimum : float
Returns
-------
str
Examples
--------
>>> format_float_with_standard_uncertainty(5e-3, 1e-3)
'0.0050(10)'
>>> format_float_with_standard_uncertainty(5e-3, 1e-6)
'0.0050000(10)'
"""
if standard_uncertainty <= minimum:
dp = -int(math.log10(minimum))
return str(round(value, dp))
precision = -int(round(math.log10(standard_uncertainty)))
if precision > -1:
su = standard_uncertainty * math.pow(10, precision)
if round(su, 1) < 2:
su *= 10
precision += 1
return "{value:.{precision}f}({irsu})".format(
value=value, precision=precision, irsu=int(round(su))
)
else:
precision += 1
su = int(round(standard_uncertainty, precision))
fmt_str = "%.0f(%i)"
return fmt_str % (round(value, precision), su)
|
23540d178c6dfbe564dd8d63cdee259c2487e445
| 152,176 |
import re
def _filter_name(stack_name,regex_str):
"""
filter a list of string by regex
:param stack_name:
:param regex_str:
:return: if stack name is in regex
"""
return bool(re.search(regex_str, stack_name))
|
051737b20e2f5fe9ca8696942bb4ea80198b15fd
| 475,982 |
def getitem(base, index):
"""Get an item from the base value"""
return base[index]
|
8ad3cf9923ab49b1277143f1ba8eb17d9b5fd50e
| 622,186 |
def count_digits_recursion(number: int) -> int:
"""
>>> count_digits_recursion(-123)
3
>>> count_digits_recursion(-1)
1
>>> count_digits_recursion(0)
1
>>> count_digits_recursion(123)
3
>>> count_digits_recursion(123456)
6
"""
number = abs(number)
return 1 if number < 10 else 1 + count_digits_recursion(number // 10)
|
1f00e5d9ff8a8f4402369477507a192aeb392d79
| 322,524 |
def job_tasks(conf):
# type: (dict) -> list
"""Get all tasks for job
:param dict config: configuration object
:rtype: list
:return: list of tasks
"""
return conf['tasks']
|
60a9cf40228bc2a54c7039fd8b1c5d8161179240
| 104,059 |
def extract_categories_from_categories_column(categories_column):
""" Extracts the set of all categories that appear in the specified categories column (pandas series)."""
result = []
for (index, categories_column_text) in categories_column.iteritems():
for category in categories_column_text.split("|"):
if category not in result:
result.append(category)
result.sort()
return result
|
746d0d7876961f1ddb35a0213acab32c4c971194
| 460,439 |
def get_headers(context_data) -> list:
"""
Arrange the headers by importance - 'name' and 'id' will appear first
Args:
context_data: list or dict containing the context data
Returns: headers arrange by importance
"""
if isinstance(context_data, dict):
context_data = [context_data]
headers = list(context_data[0].keys())
if 'name' in headers:
headers.remove('name')
headers.insert(0, 'name')
if 'id' in headers:
headers.remove('id')
headers.insert(1, 'id')
return headers
|
a466d6c10070c4d2e7296a79f1e514d45d37901d
| 580,162 |
def increment(x):
"""increments input by one"""
return x+1
|
416451d93765ee148de9b69bd3e1af0e846d6fb5
| 699,575 |
def _all_dependencies(node, dg):
"""Gets all the dependencies for the passed in node.
:param str node: The node to lookup dependencies for.
:param dict dg: The graph to lookup.
:return: Tuple of a list of pairs from parent to child dependencies and
the list of direct_dependencies.
:rtype: list[tuple[str, str]], list
"""
visited = set()
queue = [node]
edges = []
direct_dependencies = []
while queue:
current_node = queue.pop(0)
if current_node in visited:
continue
visited.add(current_node)
for child in dg[current_node]:
if child not in visited:
assert child != current_node
edges.append((current_node, child))
if current_node == node:
direct_dependencies.append(child)
queue.append(child)
return edges, direct_dependencies
|
509da298ad0e1356fb4932a06bdbd02965aa2b45
| 589,031 |
def get_fromtos_line(linestring_coords):
"""
Converts a list of linestring coordinates to a list of 'from-to'
dictionaries.
"""
line_coordinates_list_froms = linestring_coords[:-1]
line_coordinates_list_tos = linestring_coords[1:]
i = 0
fromtos = []
for i, val in enumerate(line_coordinates_list_froms):
fromto_dict = {}
fromto_dict['from'] = val
fromto_dict['to'] = line_coordinates_list_tos[i]
fromtos.append(fromto_dict)
return fromtos
|
2a05362815a71a718c300b4c05e2a3d4ef8796d0
| 634,258 |
def find_median(arr):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/find-the-median/problem
Return an integer that represents the median of the array.
Args:
arr: list of integers
Returns:
int: the value of the median in the array
"""
return sorted(arr)[(len(arr)-1)//2]
|
2ff7486ee0844d315e2688df1e4eaec60a30b5a1
| 96,519 |
def normal_response(response):
"""
Construct non-error response for API from giving response
dictionary. Return a dictionary.
"""
response = dict(response)
response["error"] = None
return response
|
6462efba6c2900107b468760b15904750f7f4a7c
| 350,810 |
def ranks_dict(poset):
"""
Return a dictionary `r_dict` with rank keys (in descending order, from supremum,
i.e. `h.vertices()[0]`, to infimum, `h.vertices()[-1]`) and lists of indices of
the vertices at that level, to be displayed from left to right, suitable to be
passed as the `heights` argument to `poset.plot`.
"""
h = poset.hasse_diagram()
ranks = [poset.rank_function()(z) for z in h.vertices()]
rank_set = set(ranks) # Note that this sorts the ranks ascending (reversed later)
r_indices = [[i for i, r in enumerate(ranks) if r == s] for s in rank_set]
r_dict = dict(reversed(list(zip(rank_set, r_indices))))
return r_dict
|
2b4a5d46a526a53d0a40203392aab2b48359204a
| 133,442 |
import json
def _parse_tag_field(row):
"""Reading in a tag field and converting to a list of strings."""
if isinstance(row, (list, tuple)):
return row
if not isinstance(row, str):
row = str(row)
if row.startswith('[') and row.endswith(']'):
return json.loads(row)
if row == '-':
return []
if ',' in row:
return row.split(',')
return [row]
|
3033adc0840058d22647403a57f70406ca44cf22
| 116,637 |
import hashlib
def get_md5_sums(file_bytes):
""" Hashes bytes with the MD5 algorithm """
return hashlib.md5(file_bytes).hexdigest()
|
0dbdebac3f0042d8174c90b9f4054b7c39a96877
| 595,267 |
def pack_str(string):
"""Pack a string into a byte sequence."""
return string.encode()
|
5ef0e1f41db1a242c8a67a90d32397f270e2ce4e
| 51,230 |
def read_numbers(filename):
""" Read each number as a line from the file and return the numbers as a list. """
with open(filename) as f:
numbers = [int(l) for l in f.readlines()]
return numbers
|
a53ff8cab390653d65772645c7ddcab8bcdfeeed
| 387,737 |
def ext_gcd(a, b):
"""
Extended Euclidean Algorithm. Find the result for ax + by = gcd(a, b).
Parameters
----------
a: int
b: int
"""
if b == 0:
return 1, 0
elif a % b == 0:
return 0, 1
else:
x, y = ext_gcd(b, a % b)
return y, x - y * (a // b)
|
18471d3b9a4e373956245c3b74246cc8a989650e
| 217,841 |
import torch
def MSELoss(outputs, targets):
"""Mean-squared error loss function
Parameters
----------
outputs : tensor
Outputs of the model.
targets : tensor
Expected value of outputs.
Returns
-------
loss : tensor
The value of the loss function.
"""
criterion = torch.nn.MSELoss()
loss = criterion(outputs, targets) * 0.5
return loss
|
9567b5f00f0d2b3fd3ac106b97e56c01678f9d1c
| 384,779 |
def ode45_step(f, x, t, dt, *args):
"""
One step of 4th Order Runge-Kutta method
"""
k = dt
k1 = k * f(t, x, *args)
k2 = k * f(t + 0.5*k, x + 0.5*k1, *args)
k3 = k * f(t + 0.5*k, x + 0.5*k2, *args)
k4 = k * f(t + dt, x + k3, *args)
return x + 1/6. * (k1 + k2 + k3 + k4)
|
84cb95edd2e94cd442f7b767e5a33252b42bfb36
| 551,173 |
import yaml
def read_config_file(config_file):
"""
Reads configuration information from a YAML file.
"""
# Read the configuration file in YAML format
try:
print(f"Using configuration file {config_file}")
config = yaml.safe_load(open(config_file))
except Exception as err:
msg = f"Error reading configuration in {config_file}: {err}"
raise Exception(msg)
return config
|
0687183337fa3a12c8b9516a84a57cc7ff773f92
| 565,032 |
def imag(x):
"""Returns the imaginary part of a complex tensor.
:param x: The complex tensor
:type x: torch.Tensor
:returns: The imaginary part of `x`; will have one less dimension than `x`.
:rtype: torch.Tensor
"""
return x[1, ...]
|
e2a9a3ba22a4ec896a60b3991618f32014d088fd
| 701,153 |
import collections
def fold_headers(headers):
"""Turn a list of headers into a folded dict."""
# If it behaves like a dict, return it. Webob uses objects which
# are not dicts, but behave like them.
try:
return dict((k.lower(), v) for k, v in headers.items())
except AttributeError:
pass
header_dict = collections.defaultdict(list)
for header, value in headers:
header_dict[header.lower()].append(value.strip())
folded_headers = {}
for header, value in header_dict.items():
folded_headers[header] = ','.join(value)
return folded_headers
|
0c4017c24cafea402fdb9d3a1353a745e13dddec
| 151,551 |
def date_format(date):
"""Converts datetime to '%d.%m.%Y-%H:%M:%S' formatted string"""
return date.strftime("%d.%m.%Y-%H:%M:%S")
|
cf67b5ae9b98692f3dbd2dbbf793db3b673b54b2
| 119,548 |
def sprt(likelihood_ratio, alpha, beta, x, random_order = True):
"""
Performs sequential probability ratio test with desired likelihood ratio.
Parameters
----------
likelihood_ratio : function
likelihood ratio function with one parameter, x, the sample values
alpha : float
Type I Error
beta : float
Type II Error
x : list
list of sample values
random_order : boolean (default : True)
True: sample values in random order
False: sample values not in random order
Returns
-------
Array
ordered pair of booleans (reject ho, reject ha)
Float
Likelihood ratio
"""
# calculate stopping rule values
A, B = beta / (1 - alpha), (1 - beta) / alpha
ts = 1
index = 0
if random_order:
while (ts > A and ts < B and index < len(x)):
ts = likelihood_ratio(x[0:index])
index += 1
else:
ts = likelihood_ratio(x)
# get conclusion of test
if ts >= B:
conclusion = [True, False]
elif ts <= A:
conclusion = [False, True]
else:
conclusion = [False, False]
return [conclusion, ts]
|
9b63f9b5ebda1d200f0f2a9a8e77438ef756635a
| 207,206 |
import hashlib
def get_agent_id(name, email):
"""Return a suitable '@id' for committers/authors
In most cases we will not have a URL for people/software agents.
Let's create a string ID that is based on the combination of both
name and email. Return an MD5 hash instead of a plain-text string
to discourage direct interpretation by humans.
"""
return hashlib.md5(u'{}<{}>'.format(
name.replace(' ', '_'),
email
).encode('utf-8')).hexdigest()
|
be016e3ae828c1c21cda3ad9a308bca9f18807dc
| 215,207 |
def rotate_left(num: int, num_size: int, shift_bits: int) -> int:
"""
Rotate a number num of num_size bits by shift_bits bits. See
https://en.wikipedia.org/wiki/Bitwise_operation#Rotate_no_carry for more information.
:param num: the number to rotate
:param num_size: the size of the number in bits
:param shift_bits: the number of bits the number is rotated by
:return: the rotated number
"""
mask = (2 ** shift_bits - 1) << (num_size - shift_bits)
bits = (num & mask) >> (num_size - shift_bits)
num = (num << shift_bits) & (2 ** num_size - 1)
num |= bits
return num
|
f8c92557bca072b00bde71dd89bb97c6bb2cd169
| 216,145 |
import inspect
def convert_to_dict(val, args, kwargs):
"""
Use the val's as_dict method if it exists and return the result from that or
return val as is.
We also see if as_dict takes in arguments and if it does, we pass in args
and kwargs to the as_dict.
"""
if not hasattr(val, "as_dict"):
return val
if hasattr(val, "is_dict") and not val.is_dict:
return val
if inspect.signature(val.as_dict).parameters:
return val.as_dict(*args, **kwargs)
else:
return val.as_dict()
|
f7e002dccfe6b3783448167c3057f310365f9d9c
| 304,731 |
def _list_new_metadata(repository_path):
"""
List the filenames of new and changed repository metadata files.
:param FilePath repository_path: Location of repository to list repository
metadata from.
:param set existing_metadata: Filenames of existing metadata files.
"""
return {"/".join(path.segmentsFrom(repository_path))
for path in repository_path.child('repodata').walk()}
|
3a62655e2554cf8fe12a0e2070b4a17201c73b4a
| 135,016 |
import textwrap
def dedent_docstring(text):
"""Dedent typical python doc string.
Parameters
----------
text : str
string, typically something like ``func.__doc__``.
Returns
-------
str
string with the leading common whitespace removed from each
line
See Also
--------
textwrap.dedent
.. versionadded:: 0.19.0
"""
lines = text.splitlines()
if len(lines) < 2:
return text.lstrip()
# treat first line as special (typically no leading whitespace!) which messes up dedent
return lines[0].lstrip() + "\n" + textwrap.dedent("\n".join(lines[1:]))
|
ba5fcd68165261e6f06fc7fb3cc3f1b61ca85734
| 309,798 |
def get_job_state_str(job):
"""Get string representation of a job."""
if not hasattr(job, 'next_run_time'):
# based on apscheduler sources
return 'pending'
elif job.next_run_time is None:
return 'paused'
else:
return 'active'
|
fc44db5560bc94a8d48988b2bf36656037bafc72
| 652,734 |
from functools import reduce
def horners_evaluation(f, u):
"""
Fast evaluation of f(u).
"""
if f.is_zero():
return f.parent().base().zero()
coefs = reversed(f.coefficients(sparse=False))
return reduce(lambda acc, next: acc * u + next, coefs)
|
59eb16cd1f55c3122ff30f32d7d9245385cae4d3
| 519,929 |
from typing import Any
def select_all(dummy: Any) -> bool:
"""
Returns True
"""
return True
|
86fa134791603024a3fb01ebe20ed5730c5edd65
| 583,926 |
def numbers_from_file(path):
""" Read file with numbers and save them to list
:param path: [string] - path to file with test numbers
:return: [list] numbers from file or [None] if failed to read file
"""
result = list()
try:
for line in open(path):
result.append(int(line))
except OSError:
print("Failed to create array")
return None
return result
|
4615cc5b1d482b1f80bca46811b0092fb8ee0f3d
| 355,651 |
def read_file(filepath: str) -> str:
"""
Reads a file and returns its contents.
Args:
filepath (str): location of file
Returns:
str: file contents
"""
content = None
with open(filepath, "r") as f:
content = f.read()
return content
|
a3d30c7b9f6fb27406230b011509102bfbf9eac7
| 246,808 |
import socket
def UsesIPv6Connection(host, port):
"""Returns True if the connection to a given host/port could go through IPv6.
"""
return any(t[0] == socket.AF_INET6 for t in socket.getaddrinfo(host, port))
|
e053f05338d3d812f6e14b1bb50a4c682000009c
| 521,472 |
import math
def hypotenuse_length(leg_a, leg_b):
"""Find the length of a right triangle's hypotenuse
:param leg_a: length of one leg of triangle
:param leg_b: length of other leg of triangle
:return: length of hypotenuse
>>> hypotenuse_length(3, 4)
5
"""
return math.sqrt(leg_a**2 + leg_b**2)
|
7a59ede73301f86a8b6ea1ad28490b151ffaa08b
| 13,710 |
def lower(s):
"""lower(s) -> string
Return a copy of the string s converted to lowercase.
"""
return s.lower()
|
b08d5adcd02f46675b3cde40c14930fae3107e1e
| 486,640 |
def edit_check(user, petition):
"""
Logic to determine if the user should be able to edit the petition
Cases when this should be true:
- User is the author of the petition
- User is a moderator / admin
*Note: These cases will be a setting in the future to allow the moderators to choose who
can / cannot edit a petition
:param user: The user object
:param petition: The petition object
:return: True / False, if the user can edit the current petition.
"""
# Initially set the edit variable to false.
edit = False
# Check if the user is logged in
if user.is_authenticated:
# Check if the user's account is active (it may be disabled)
if user.is_active:
# Check if the user the author of the petition and the petition must not have already been published.
if user.id == petition.author.id and petition.status == 0:
# The user is authenticated, and can edit the petition!
edit = True
return edit
|
f76e9968d41a766eb8a925abd6bcce20c35a736f
| 176,280 |
def is_fitted(estimator) -> bool:
"""
Checks if an estimator is fitted.
Loosely taken from
https://github.com/scikit-learn/scikit-learn/blob/2beed5584/sklearn/utils/validation.py#L1034
""" # noqa
if not hasattr(estimator, "fit"):
raise TypeError("%s is not an estimator instance." % (estimator))
attrs = [v for v in vars(estimator) if v.endswith("_") and not v.startswith("__")]
return len(attrs) > 0
|
08f7552bf6bba6918cfcf3f651b106b351abc2aa
| 383,831 |
def find_endpoint(catalog, service_type, region):
"""Locate an endpoint in a service catalog, as returned by IdentityV2.
Please note that both :param:`service_type` and :param:`region` are
case sensitive.
:param dict catalog: The Identity service catalog.
:param str service_type: The type of service to look for.
:param str region: The service region the desired endpoint must service.
:return: The endpoint offering the desired type of service for the
desired region, if available. None otherwise.
"""
for entry in catalog["access"]["serviceCatalog"]:
if entry["type"] != service_type:
continue
for endpoint in entry["endpoints"]:
if endpoint["region"] == region:
return endpoint["publicURL"]
return None
|
68158eb013106777da4107adafc315498e141683
| 276,929 |
def squareDegFromArcSecSquared(arcsecsq):
"""
Converts arc second squared to square degrees.
:param arcsecsq: arc seconds squared
:type arcsecsq: float or ndarray
:return: square degrees
:rtype: float or ndarray
"""
return arcsecsq * 7.71604938e-8
|
18ae275b7105b21ae931e9e9c36bc6e8b3278385
| 248,238 |
import json
def parseLatestCurrencies(jsonDump):
"""Parse the latest currency list from an API call."""
data = []
rawData = json.loads(jsonDump)
for rawDatum in rawData:
datum = {}
datum['symbol'] = rawDatum['symbol']
datum['name'] = rawDatum['name']
datum['algo'] = rawDatum['algo']
data.append(datum)
return data
|
54c626ae9c2079b7d762f8f41908ccc7a8b13db6
| 515,985 |
import ast
from typing import Any
def get_keyword_arg_value(
node: ast.Call, keyword_name: str, default: Any = None
) -> Any:
"""Get a keyword arg value from a Call AST node
Args:
node: AST Call node, like a function call
keyword_name: Name of the keyword argument
default: Default value to return if the keyword was not found
Returns:
keyword argument value or default or None
"""
for keyword in node.keywords:
if keyword.arg == keyword_name:
return keyword.value
return default
|
3745efeebf053c1ff90b3344611a5a4cfc37f2e1
| 437,628 |
def either_connected_or_not_connected(v, vertices_in_module, graph):
"""
Check whether ``v`` is connected or disconnected to all vertices in the
module.
INPUT:
- ``v`` -- vertex tested
- ``vertices_in_module`` -- list containing vertices in the module
- ``graph`` -- graph to which the vertices belong
EXAMPLES::
sage: from sage.graphs.graph_decompositions.modular_decomposition import *
sage: g = graphs.OctahedralGraph()
sage: print_md_tree(modular_decomposition(g))
SERIES
PARALLEL
2
3
PARALLEL
1
4
PARALLEL
0
5
sage: either_connected_or_not_connected(2, [1, 4], g)
True
sage: either_connected_or_not_connected(2, [3, 4], g)
False
"""
# marks whether vertex v is connected to first vertex in the module
connected = graph.has_edge(vertices_in_module[0], v)
# if connected is True then all vertices in module should be connected to
# v else all should be disconnected
return all(graph.has_edge(u, v) == connected for u in vertices_in_module)
|
e506914c6d2c6277e66b0458bf601a20be8fcc48
| 613,193 |
def reverse_list(lst):
"""
Returns the reversed form of a given list.
Parameters
----------
lst : list
Input list.
Returns
-------
reversed_list : list
Reversed input list.
Examples
--------
>>> lst = [5, 4, 7, 2]
>>> reverse_list(lst)
[2, 7, 4, 5]
"""
reversed_list = lst[::-1]
return reversed_list
|
fcb667f1129e35c529037816f0c9778482f0f538
| 173,388 |
from typing import Iterable
def ids2strids(ids: Iterable[int]) -> str:
"""
Returns a string representation of a sequence of integers.
:param ids: Sequence of integers.
:return: String sequence
"""
return " ".join(map(str, ids))
|
b2e62cc88ae69fb312d3f102f9dc373b26be1091
| 492,723 |
import socket
import pickle
def send_packet(sock, pack):
"""
Send a packet to remote socket. We first send
the size of packet in bytes followed by the
actual packet. Packet is serialized using
cPickle module.
Arguments
---------
sock : Destination socket
pack : Instance of class Packet.
"""
if pack is None or (sock is None or type(sock) != socket.socket):
return # Nothing to send
pack_raw_bytes = pickle.dumps(pack)
dsize = len(pack_raw_bytes)
sock.sendall(dsize.to_bytes(4, byteorder="big"))
sock.sendall(pack_raw_bytes)
return True
|
6056663868b7dbc6ad1aa7408ad7044819339308
| 690,899 |
def ordinal(num: int) -> str:
"""
Returns the ordinal representation of a number
Examples:
11: 11th
13: 13th
14: 14th
3: 3rd
5: 5th
:param num:
:return:
"""
return (
f"{num}th"
if 11 <= (num % 100) <= 13
else f"{num}{['th', 'st', 'nd', 'rd', 'th'][min(num % 10, 4)]}"
)
|
d001961ea66712ed587ec52cbe84b48ca7d0431d
| 340,048 |
def one_cycle_schedule_inv(step, total_steps, warmup_steps=None, hold_min_steps=0, m_max=.95, m_min=.85):
""" Create a schedule with a momentum that increases linearly after
linearly decreasing during a warmup period.
"""
if warmup_steps is None:
warmup_steps = (total_steps - hold_min_steps) // 2
if step < warmup_steps:
m = m_max + (step - 0) * ((m_min - m_max) / (warmup_steps - 0))
elif step < warmup_steps + hold_min_steps:
m = m_min
else:
current_percentage = step / total_steps
if current_percentage <= .9:
m = m_min + (step - warmup_steps - hold_min_steps) * ((m_max - m_min) / ((total_steps * .9) - warmup_steps - hold_min_steps))
else:
m = m_max
return m
|
240ed9733c6415bf5be8a8622417ef976a291669
| 532,897 |
def insert_doc(doc, new_items):
"""Insert ``new_items`` into the beginning of the ``doc``
Docstrings in ``new_items`` will be inserted right after the
*Parameters* header but before the existing docs.
Parameters
----------
doc : str
The existing docstring we're inserting docmentation into.
new_items : list
List of strings to be inserted in the ``doc``.
Examples
--------
>>> from nipype.utils.docparse import insert_doc
>>> doc = '''Parameters
... ----------
... outline :
... something about an outline'''
>>> new_items = ['infile : str', ' The name of the input file']
>>> new_items.extend(['outfile : str', ' The name of the output file'])
>>> newdoc = insert_doc(doc, new_items)
>>> print(newdoc)
Parameters
----------
infile : str
The name of the input file
outfile : str
The name of the output file
outline :
something about an outline
"""
# Insert new_items after the Parameters header
doclist = doc.split('\n')
tmpdoc = doclist[:2]
# Add new_items
tmpdoc.extend(new_items)
# Add rest of documents
tmpdoc.extend(doclist[2:])
# Insert newlines
newdoc = []
for line in tmpdoc:
newdoc.append(line)
newdoc.append('\n')
# We add one too many newlines, remove it.
newdoc.pop(-1)
return ''.join(newdoc)
|
6b729e9066c2690801d7a749fd366e828bc8cd18
| 39,012 |
def _mock_authenticate_user(_, client=None):
"""Mock Pycognito authenticate user method. This code is from Pycognito's test suite."""
return {
"AuthenticationResult": {
"TokenType": "admin",
"IdToken": "dummy_token",
"AccessToken": "dummy_token",
"RefreshToken": "dummy_token",
}
}
|
dccbdf5138eea63c543a824de3c003efb5af6210
| 696,317 |
def zero_to_dcf(zero_rates):
"""
Helper function transforms sorted zero rates to discount factors.
:param zero_rates: zero rates
:return: discount factors
"""
num_rates = len(zero_rates)
dcf = num_rates * [0]
for index, rate in enumerate(zero_rates):
time_to_maturity = index + 1
dcf[index] = 1 / (1 + rate) ** time_to_maturity
return dcf
|
f488da70231719183344d2cbe27bb8dfb33b1c85
| 273,116 |
import typing
import types
def flatten(input) -> typing.List:
"""Return the input flattened to an array.
input: any variable composed of lists/generators/tuples, strings, lambda functions or other
objects, nested arbitrarily.
Empty strings and None items are removed.
Returns a list of strings or other objects depth 1.
>>> flatten('hi')
['hi']
>>> flatten(['hi', 'ho', 'hum'])
['hi', 'ho', 'hum']
>>> flatten(['hi', ['ho', ['hum'] ] ])
['hi', 'ho', 'hum']
>>> flatten(['hi', ('ho', ('hum') ) ])
['hi', 'ho', 'hum']
>>> flatten(3)
[3]
>>> flatten( (x + 1) for x in [1,2,3])
[2, 3, 4]
>>> flatten(lambda: '3')
['3']
>>> flatten(['hi', lambda: 'ho', 'hum'])
['hi', 'ho', 'hum']
>>> flatten(None)
[]
"""
if not input: return []
if isinstance(input, (list, tuple, set, types.GeneratorType)):
rv = []
for l in input:
rv = rv + flatten(l)
return rv
elif hasattr(input, 'resolveToString'): # Composeable, etc - delayed
return [input]
elif hasattr(input, '__call__'): # a zero-arg lambda
return flatten(input())
else:
return [input]
|
5f6790c2a8bb9d549d3e9541c79649ccc06db0f6
| 205,288 |
import functools
import time
def timer(f):
""" Add this @decorator to a function to print its runtime after completion """
@functools.wraps(f)
def t_wrap(*args, **kwargs):
t_start = time.perf_counter()
ret = f(*args, **kwargs)
t_run = round((time.perf_counter() - t_start)/60)
print(f"{f.__name__} completed in {t_run} minutes.")
return ret
return t_wrap
|
4c2d91bd492caa5ec78ce855d979a4e63f75f199
| 65,431 |
from pathlib import Path
import hashlib
def sha256_checksum(file_path: Path, block_size: int = 65536) -> str:
"""
Compute sha256 checksum of file.
Args:
file_path: path to the file
block_size: amount of bytes read per cycle
Returns:
sha256 hash of the file
"""
sha256 = hashlib.sha256()
with open(file_path, "rb") as file_h:
for block in iter(lambda: file_h.read(block_size), b''):
sha256.update(block)
sha256.update(bytes(file_path.name, 'utf-8'))
return sha256.hexdigest()
|
00048bfb7e6ec266ce0bcf2d17cef20fd328a201
| 125,129 |
def _make_clean_col_info(col_info, col_id=None):
"""
Fills in missing fields in a col_info object of AddColumn or AddTable user actions.
"""
is_formula = col_info.get('isFormula', True)
ret = {
'isFormula': is_formula,
# A formula column should default to type 'Any'.
'type': col_info.get('type', 'Any' if is_formula else 'Text'),
'formula': col_info.get('formula', '')
}
if col_id:
ret['id'] = col_id
return ret
|
a87c7e4055979cb9d4b5eb26eece29a796f11263
| 569,307 |
def _GetSuspectedCLFoundByHeuristicForCompile(analysis):
"""For compile failure, gets the suspected revision found by heuristic."""
if not analysis or not analysis.result:
return None
for failure in analysis.result.get('failures', []):
if (failure['step_name'].lower() == 'compile' and
len(failure['suspected_cls']) == 1):
# Based on confidence calculation, suspected_cl found by heuristic for
# compile is very likely to be the culprit.
# Since the current confidence calculation is for results with single
# suspected_cl, we might need to have the same regulation here.
return failure['suspected_cls'][0]
return None
|
43117676bd1ac0bd91528291bed46278882a31f4
| 670,279 |
def output_group_name(group, pnr, settings):
"""
Output <num><group_name> if it's a group PNR and has no paxes.
Output <`unnamed seats number`><group_name> if free seats exists.
Group contains total seats and some named seats.
Example:
`0.22SOTSZSHITA/GRP NM3 TE252`
`1.CHURBANOV/STEPAN MR 2.DARENKOVA/MARINA MRS 3.EGOROV/NIKITA MR`
"""
def no_paxes():
"""
Order have no paxes.
"""
return not pnr['name'] or len(pnr['name']) == 0
def named_seats():
"""
Returns named seats in group.
"""
nseats = 0
for pax in pnr['name']:
nseats += pax.nseats
return nseats
if no_paxes():
return group.total + group.name
nseats = named_seats()
if int(group.total) > nseats:
return str(int(group.total) - nseats) + group.name
return None
|
519688e18569ebd8bd9bf793e5daafc3fd743a62
| 139,699 |
def divide_dict(a_dict, divide_func):
"""Divide a dict like object into two parts.
- a_dict: dict like object
- divide_func: the divide function to return True/False
Return two parts of the dict.
Example:
divide({'a': 1, 'b': 2}, lambda k, v: v > 1) -> {'b': 2}, {'a': 1}
"""
suit, not_suit = {}, {}
for key, value in a_dict.items():
result = suit if divide_func(key, value) else not_suit
result[key] = value
return suit, not_suit
|
a86c9ba77d6cb69d5a5b49f874444c0c8ecfc94e
| 158,802 |
def unlist(l):
"""returns a list of values from a list of lists of values"""
return [x[0] for x in l]
|
a6c72850b7af6e26067260f592792ba45389df74
| 166,430 |
def iter_reduce_ufunc(ufunc, arr_iter, out=None):
"""
constant memory iteration and reduction
applys ufunc from left to right over the input arrays
Example:
>>> # ENABLE_DOCTEST
>>> from vtool_ibeis.other import * # NOQA
>>> arr_list = [
... np.array([0, 1, 2, 3, 8, 9]),
... np.array([4, 1, 2, 3, 4, 5]),
... np.array([0, 5, 2, 3, 4, 5]),
... np.array([1, 1, 6, 3, 4, 5]),
... np.array([0, 1, 2, 7, 4, 5])
... ]
>>> memory = np.array([9, 9, 9, 9, 9, 9])
>>> gen_memory = memory.copy()
>>> def arr_gen(arr_list, gen_memory):
... for arr in arr_list:
... gen_memory[:] = arr
... yield gen_memory
>>> print('memory = %r' % (memory,))
>>> print('gen_memory = %r' % (gen_memory,))
>>> ufunc = np.maximum
>>> res1 = iter_reduce_ufunc(ufunc, iter(arr_list), out=None)
>>> res2 = iter_reduce_ufunc(ufunc, iter(arr_list), out=memory)
>>> res3 = iter_reduce_ufunc(ufunc, arr_gen(arr_list, gen_memory), out=memory)
>>> print('res1 = %r' % (res1,))
>>> print('res2 = %r' % (res2,))
>>> print('res3 = %r' % (res3,))
>>> print('memory = %r' % (memory,))
>>> print('gen_memory = %r' % (gen_memory,))
>>> assert np.all(res1 == res2)
>>> assert np.all(res2 == res3)
"""
# Get first item in iterator
try:
initial = next(arr_iter)
except StopIteration:
return None
# Populate the outvariable if specified otherwise make a copy of the first
# item to be the output memory
if out is not None:
out[:] = initial
else:
out = initial.copy()
# Iterate and reduce
for arr in arr_iter:
ufunc(out, arr, out=out)
return out
|
f73c4e556763852450443825bc12224f791f7583
| 690,479 |
def blackbody_temperature(bmag, vmag):
"""
calculate blackbody temperature using the Ballesteros formula; Eq. 14 in
https://arxiv.org/pdf/1201.1809.pdf
"""
t_bb = 4600 * (
(1 / (0.92 * (bmag - vmag) + 1.7))
+ (1 / (0.92 * (bmag - vmag) + 0.62))
)
return t_bb
|
2a62dc39eb9ecc974b942b45df68d266373fb1ef
| 472,346 |
def toc2plaintext(toc: list) -> str:
"""
:param toc: table of content <- DOCUMENT.get_toc()
:return: plaintext
"""
plaintext = []
for content in toc:
head = f'{int(content[0])*"*"}-->{content[1]}-->{content[2]}'
plaintext.append(head)
plaintext = '\n'.join(plaintext)
return plaintext
|
870a50c68fcf120809528bf8f846ec9f0fbb94c4
| 274,355 |
def mapattr(value, arg):
"""
Maps an attribute from a list into a new list.
e.g. value = [{'a': 1}, {'a': 2}, {'a': 3}]
arg = 'a'
result = [1, 2, 3]
"""
if len(value) > 0:
res = [getattr(o, arg) for o in value]
return res
else:
return []
|
34e45bcf804d37feb5995b88534cca78679d8cfb
| 701,904 |
import math
def sawtooth(v):
"""
Return a value corresponding to a sawtooth wave
"""
return 2*(v - math.floor(v + (1/2)))
|
3ebac2805d570099e3b36112d0f4d57daffb58ef
| 268,050 |
def pos_to_label_format(text):
"""Returns valid Bootstrap classes to label a ballot position."""
return {
'Yes': 'bg-yes text-light',
'No Objection': 'bg-noobj text-dark',
'Abstain': 'bg-abstain text-light',
'Discuss': 'bg-discuss text-light',
'Block': 'bg-discuss text-light',
'Recuse': 'bg-recuse text-light',
'Not Ready': 'bg-discuss text-light',
'Need More Time': 'bg-discuss text-light',
}.get(str(text), 'bg-norecord text-dark')
|
82734c483e6ab940ad2a0f72bfc74c6aa8beba08
| 422,777 |
def binarylist_to_integer(lst):
"""
Convert a binary number in a form of a list to an integer.
Parameters
----------
lst : list
List containing digits of a binary number.
Returns
-------
int
A decimal integer.
"""
return int(''.join(map(str, lst)), 2)
|
e7803b0325252f89409843fdc8a38f30d72c4b96
| 490,043 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.