content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def resolve_translation(instance, info, language_code):
"""Gets translation object from instance based on language code."""
return instance.translations.filter(language_code=language_code).first()
|
79737c123e09760fb1514bbfad7d73c385a4309a
| 16,676 |
import random
def rand_range_float(_range):
""" Generate a random float between a tuple range """
return round(random.uniform(_range[0], _range[1]), 2)
|
ba2273114a1f2d9fe1df4f30518372e591d9fe2b
| 381,048 |
def _not_cal(not_sign, right):
"""
Reverse number
Args:
not_sign (bool): if has not sign
right (bool): right number
Returns:
bool
"""
if not_sign:
right = not right
return right
|
4bc0b13d474dd1a80c439c693872b1b5e9e91bfb
| 87,778 |
def replaceAll(text, replace_dict):
"""
Replace all the ``replace_dict`` keys by their associated item in ``text``.
"""
for i, j in replace_dict.items():
text = text.replace(i, j)
return text
|
3cba9a96e70049d169c5a7d0eeedc2dca777e8cd
| 120,256 |
def has_decreased(scores, in_last):
"""Return True iff the score in the last `in_last` descended."""
if in_last >= len(scores):
return True
last = scores[-(in_last + 1)]
for score in scores[-in_last:]:
if score < last:
return True
last = score
return False
|
472f01b5573ecf965ab5b7c239904ac113cc0b67
| 30,962 |
def load_file_list(filename):
"""
Load a text file containing a list of filenames (or other strings) as a
Python list.
To obtain a list of DataFile objects, the result can easily be converted
as in the example:
>>> raw_biases = DataFileList(load_file_list('list_of_biases.txt'))
If the listed files need downloading first, the usage would be similar to:
>>> bias_list = load_file_list('list_of_biases.txt')
>>> download_files(bias_list, server='gemini', dirname='raw')
>>> raw_biases = DataFileList(bias_list, dirname='raw')
(or it may be preferable to produce the initial list by other means, such
as command-line arguments or a list definition in the user script).
The DataFileList object can subsequently be used in place of the initial
plain Python list.
Parameters
----------
filename : str
Name of a plain-text file, containing one entry per line. Although
the intention is mainly to work with filenames, any non-comment strings
are valid. Lines whose first non-whitespace character is '#' are
treated as comments.
Returns
-------
list of str
A list of filenames (or other strings), one per input line with any
leading or trailing whitespace removed.
"""
f = open(filename, 'r')
flist = []
# Append one filename per line to the list:
for line in f:
line = line.strip() # remove new lines & trailing/leading space
if line and line[0] != '#': # ignore empty lines & comments
flist.append(line)
f.close()
return flist
|
bbb49943b9d5ea6869be35d343ce0f7452d1f6d5
| 646,821 |
def _get_cols_length(table, cols_num):
"""Return the max length of every columns.
"""
cols_length = [0] * cols_num
for row in table:
for (i, col) in enumerate(row):
col_len = len(col)
if col_len > cols_length[i]:
cols_length[i] = col_len
return cols_length
|
02c852944d3d5efc50bec79adfe32f4519803c7a
| 583,492 |
def get_snapshots_from_text(query_output):
""" Translate expanded snapshots from `cali-query -e` output into list of dicts
Takes input in the form
attr1=x,attr2=y
attr1=z
...
and converts it to `[ { 'attr1' : 'x', 'attr2' : 'y' }, { 'attr1' : 'z' } ]`
"""
snapshots = []
for line in query_output.decode().splitlines():
snapshots.append( { kv.partition('=')[0] : kv.partition('=')[2] for kv in line.split(',') } )
return snapshots
|
f6bfb56329c04ee8e325b5bd82ea417901420a58
| 649,406 |
def get_band_index(band_name):
"""Get the write-index value for a Sentinel-2 image band
For bands 1 through 8, we return the band number. For 8A,
we return 9. For bands above 8A, we add one to the band
number.
Args:
band_name (str): the name of the band, e.g. "nir - 8A"
Return:
int
"""
name, num = band_name.split(' - ')
if num.lower() == '8a':
return 9
elif int(num) > 8:
return int(num) + 1
else:
return int(num)
|
16197e5303d259b3502cf20255cd68c514215e3c
| 50,224 |
def normalized_age(age):
"""
Normalize age to [-1, 1]
"""
return age / 120 - 1
|
608b3aae05397210f50a28ee5697b2acd91cde4d
| 271,038 |
def format_port(port):
"""Render port option."""
return '-p {}'.format(port) if port else ''
|
bb7dcdb474d11d2e7c9cf6f404b9b4db87321e36
| 157,052 |
def parse_bsub(output):
"""Parse bsub output and return job id.
:param output: stdout of bsub command
:type output: str
:returns: job id
:rtype: str
"""
for line in output.split("\n"):
if line.startswith("Job"):
return line.split()[1][1:-1]
|
557377cbc28ba9e1bd516a3ce68132ced5b05b7b
| 65,320 |
def sim_to_rgba(similarity, edge_color):
"""Convert similarity to RGBA.
Parameters
----------
similarity : float
Similarity between two answers.
edge_color : tuple
When the graph is plotted, this is the RGB color of the edge
i.e (0, 0, 1).
Returns
-------
tuple i.e (0, 0, 1, 0.78)
RGBA values i.e (R, G, B, A).
"""
RGBA = (
*edge_color, similarity
)
return RGBA
|
3b609dae4a8bb096333fbed76e9ea3d0ece92952
| 573,948 |
def fib(N):
"""
Avoid redoing the something over and over and store any Fib in a dictionary
to not do it again
Time: o(n)
"""
memo = {}
if N in memo:
return memo[N]
if N <= 2:
f = 1
else:
f = fib(N - 1) + fib(N - 2)
memo[N] = f
return f
|
c7461f0861844080c6a7bd2371a3644e4d48e441
| 401,632 |
def get_file_contents(filename):
"""
Return contents of a file
"""
with open(filename) as fd:
return fd.read()
|
b967f4676141afe877e5c33f315cc013b1cc49f6
| 264,445 |
def show_world(world):
"""
Shows the world parameter in the form a string instead of a list, being separated by line breaks (\n)
"""
rendered_world = ""
for line in range(len(world)):
rendered_world += ''.join(world[line])
rendered_world += "\n"
rendered_world = rendered_world.strip()
return rendered_world
|
3b3fcc22e0626e08bdd16a392f3de572bbd3b412
| 324,677 |
import time
def elim_code_36(main_df):
"""
Remove undefined violations (code 36)
"""
start = time.time()
main_df = main_df[main_df['Violation Code']!=36].sort_values('Summons Number',ascending=False)
end = time.time()
print("Eliminate undefined violations time: ", end - start)
print(main_df.head())
return main_df
|
533697c50f716ebd2a156d02f12e6e234f193e14
| 162,594 |
import torch
def sort_batch(seq_len):
"""Sorts torch tensor of integer indices by decreasing order."""
with torch.no_grad():
slens, sidxs = torch.sort(seq_len, descending=True)
oidxs = torch.sort(sidxs)[1]
return oidxs, sidxs, slens.tolist()
|
bb8861731974a5e561322d7f2cb73b1a0837664f
| 119,705 |
def cat_arrays(vector_arrays):
"""Return a new |VectorArray| which a concatenation of the arrays in `vector_arrays`."""
vector_arrays = list(vector_arrays)
total_length = sum(map(len, vector_arrays))
cated_arrays = vector_arrays[0].empty(reserve=total_length)
for a in vector_arrays:
cated_arrays.append(a)
return cated_arrays
|
eb7793aa45e031c374957803e135f5e605f25aa9
| 389,097 |
def unit_temp(ua):
"""
Computes the unit temperature in the given units
"""
return ua.MeV
|
65d40ce710195f6111dedf0779199f2932ee3a7d
| 543,558 |
def normalize_image(image):
"""Normalizes an image by dividing it by 255
Parameters
----------
image -- a numpy array representing an image
Returns
-------
Normalized image
"""
return image / 255
|
b6927305c69c9da8e92503c8e29d7212804adfe3
| 52,917 |
def is_unsupported_size_mechanism_type(size_mechanism: str) -> bool:
"""Whether the given size mechanism is unknown/unsupported."""
return size_mechanism not in {
"fixed",
"len",
"len-in-bytes",
"ivi-dance",
"passed-in",
"passed-in-by-ptr",
"ivi-dance-with-a-twist",
"two-dimension",
"custom-code",
}
|
dd069872d58b9ead8e460d6802a3dc5b37331a6b
| 151,910 |
import requests
def registry(url, resource='/registry'):
"""Retrieve the chip spec registry
Args:
url (str): protocol://host:port/path
resource (str): /registry/resource/path (default: /registry)
Returns:
list
Example:
>>> chipmunk.registry(url='http://host:port/path')
[{'data_fill': '-9999',
'data_mask': {},
'data_range': [],
'data_scale': None,
'data_shape': [100, 100],
'data_type': 'INT16',
'data_units': None,
'info': 'band 5 top-of-atmosphere reflectance',
'tags': ['swir1', 'b5', 'tab5', 'lt05', 'lt05_tab5', 'ta'],
'ubid': 'LT05_TAB5'},
{'data_fill': '-9999',
'data_mask': {},
'data_range': [],
'data_scale': None,
'data_shape': [100, 100],
'data_type': 'INT16',
'data_units': None,
'info': 'band 7 top-of-atmosphere reflectance',
'tags': ['lt05_tab7', 'b7', 'lt05', 'swir2', 'tab7', 'ta'],
'ubid': 'LT05_TAB7'}, ...]
"""
return requests.get(url="{}{}".format(url, resource)).json()
|
5be4970ddeba9c1a087569a6601596c90e9cede3
| 321,748 |
import re
def regex_replace(value, regex, replace):
"""Replace every string matching the given regex with the replacement"""
return re.sub(regex, replace, value, flags=re.IGNORECASE)
|
b9b892041fe92399a5ff67c605451b1d406235d9
| 89,975 |
def lookupName(n, names):
"""Check if name is in list of names
Parameters
----------
n : str
Name to check
names : list
List of names to check in
Returns
-------
bool
Flag denoting if name has been found in list (True) or not (False)
"""
if n in names:
return True
else:
return False
|
0fbb97e252f5daf9de52a946c206fa74395b01c6
| 4,924 |
def species_block(spc_dct):
""" Writes the species block of the mechanism file
:param spc_dct: dct containing the species data
:type spc_dct: dct {spc_name:data}
:return spc_str: str containing the species block
:rtype: str
"""
# Get the max species name length
max_len = 0
for spc_name in spc_dct.keys():
if len(spc_name) > max_len:
max_len = len(spc_name)
buffer = 5
# Write the spc_str
spc_str = 'SPECIES \n\n'
for spc_name, spc_data in spc_dct.items():
spc_str += (
'{0:<' + str(max_len+buffer) + 's}{1:>9s}{2:>9s}\n').format(
spc_name, '! InChi: ', spc_data['inchi'])
spc_str += '\nEND \n\n\n'
return spc_str
|
3ee94eeeb0d23531009a8f218dccbaf8553ae98f
| 472,301 |
def column_lettering(colnum: int) -> str:
"""
Converts a zero-based column index into a spreadsheet-style column name
(A[0] to Z[25], then AA[26] to AZ[51], etc). Basically, it's almost base
26, but without a proper sense of zero (in that A is zero, but AA is 26).
"""
assert colnum >= 0
base = 26
zero_char = ord("A")
reversed_chars = ""
while True:
big, small = divmod(colnum, base)
reversed_chars += chr(zero_char + small)
if big == 0:
break
colnum = big - 1
return reversed_chars[::-1]
|
ef92fae57f32cca77e2dfad3183dfcd1f002a002
| 146,009 |
def get_appendix_data(df, table):
"""
Given Appendix A DataFrame df and table name,
return Sequence, Start, and End numbers as lists.
"""
df = df[df['name'] == table]
return df['seq'].tolist(), df['start'].tolist(), df['end'].tolist()
|
46ec1a9b5c0c065d9a261981efb5d7bb625971a9
| 648,951 |
def check_password(password, guess):
"""Takes two string variables: the password and the guess. Returns true if
they match, or false if they do not."""
return True if password == guess else False
|
dc49a94229ddf8db5c2594db735008cfee145b75
| 372,412 |
def get_times(ts_full, ts_system, len_state, sys_position, sys_length):
"""
This is a function specifically designed for TEDOPA systems. It calculates
the proper 'ts' and 'subsystems' input lists for :func:`tmps.evolve` from a
list of times where the full state shall be returned and a list of times
where only the reduced state of the system in question shall be returned.
ts then basically is a concatenation of ts_full and ts_system,
while subsystems will indicate that at the respective time in ts either
the full state or only a reduced density matrix should be returned.
Args:
ts_full (list[float]):
List of times where the full state including environment chain
should be returned
ts_system (list[float]):
List of times where only the reduced density matrix of the system
should be returned
len_state (int):
The length of the state
sys_position (int):
The position of the system (first site would be 0)
sys_length (int):
Length of the system, i.e. number of sites the system is
comprised of
Returns:
tuple(list[float], list[list[int]]):
Times and subsystems in the form that has to be provided to
:func:`tmps.evolve`
"""
ts = list(ts_full) + list(ts_system)
subsystems = [[0, len_state]] * len(ts_full) + \
[[sys_position, sys_position + sys_length]] * len(ts_system)
return ts, subsystems
|
26495149a867ed9b42da4c0db288d81c4db350dc
| 33,877 |
from typing import Tuple
import asyncio
import threading
def setup_loop_in_thread() -> Tuple[asyncio.AbstractEventLoop, threading.Thread]:
"""Sets up a new asyncio event loop in a new thread, and runs it forever.
Returns:
A tuple containing the event loop and the thread.
"""
loop = asyncio.new_event_loop()
thread = threading.Thread(target=loop.run_forever, daemon=True)
thread.start()
return loop, thread
|
cd8e3c882cd50ddb7fbee4b0653c981c1c3bdb8a
| 84,491 |
def secondary_sort(attr: str) -> tuple:
"""
Determine the secondary sort attribute and order when the
primary sort attribute has the same value.
Args:
attr (str): The primary sort attribute
Returns:
tuple: Secondary sort attribute and sort order
"""
if attr in ['wins', 'losses', 'ties']:
secondary_attr = 'win_pct'
elif attr == 'win_pct':
secondary_attr = 'wins'
elif attr in ['coference_wins', 'conferences_losses', 'conference_ties']:
secondary_attr = 'conference_win_pct'
elif attr == 'conference_win_pct':
secondary_attr = 'conference_wins'
else:
secondary_attr = attr
return secondary_attr, True
|
54c86b1856baca7f5f1748108bf974e91018a581
| 273,809 |
def colored_pixels(img, thresh):
"""
Find the positions of all pixels in the image that are considered dark
enough for text.
The image is assumed to be grayscale.
Args:
img (numpy.ndarray): Image to check.
thresh (int): Background threshold up to which a pixel is considered
colored.
Returns:
list[tuple[int, int]]: List of coordinates for all colored pixels.
"""
assert len(img.shape) == 2 # 2D matrix
return [(x, y) for y, row in enumerate(img) for x, pixel in enumerate(row)
if pixel <= thresh]
|
c8a045245972e574df27f15219d5642b3e5cd02b
| 74,612 |
def _trim(strings):
"""Remove leading and trailing whitespace from each string."""
return [x.strip() for x in strings]
|
a0c3c6352a27791f3477dae5f807e4d6727b3e6e
| 236,837 |
def replace_repetitive_column_names(column_name, buildings):
"""
Returns column_name _unless_ it's one of a few special cases (building names, PIPE names, NODE names, srf names)
:param str column_name: the name of the column
:return: column_name or similar (for repetitive column names)
"""
if column_name.startswith('srf'):
column_name = "srf0"
if column_name.startswith('PIPE'):
column_name = "PIPE0"
if column_name.startswith('NODE'):
column_name = "NODE0"
if column_name in buildings:
column_name = buildings[0]
return column_name
|
26a6deccee32d87ef99da90f37b3fb929475b0f2
| 443,576 |
def arithmetic_series(a: int, n: int, d: int = 1) -> int:
"""Returns the sum of the arithmetic sequence with parameters a, n, d.
a: The first term in the sequence
n: The total number of terms in the sequence
d: The difference between any two terms in the sequence
"""
return n * (2 * a + (n - 1) * d) // 2
|
168f0b07cbe6275ddb54c1a1390b41a0f340b0a6
| 706,463 |
def order_tweets_by_polarity(tweets, positive_highest=True):
"""Sort the tweets by polarity, receives positive_highest which determines
the order. Returns a list of ordered tweets."""
return sorted(tweets, key=lambda x: x.polarity, reverse=positive_highest)
|
1b98a8ff71e00f52465f395851c499368003afd1
| 134,623 |
import struct
def calcChecksum(data):
"""Calculate the checksum for an arbitrary block of data.
Optionally takes a 'start' argument, which allows you to
calculate a checksum in chunks by feeding it a previous
result.
If the data length is not a multiple of four, it assumes
it is to be padded with null byte.
>>> print calcChecksum(b"abcd")
1633837924
>>> print calcChecksum(b"abcdxyz")
3655064932
"""
remainder = len(data) % 4
if remainder:
data += b"\0" * (4 - remainder)
value = 0
blockSize = 4096
assert blockSize % 4 == 0
for i in range(0, len(data), blockSize):
block = data[i:i+blockSize]
longs = struct.unpack(">%dL" % (len(block) // 4), block)
value = (value + sum(longs)) & 0xffffffff
return value
|
7678edec74457c74123ac3de1d1fe83008f67f31
| 512,074 |
def checkio(expression):
"""Check expression for correctly matched brackets."""
stack = []
closers = dict(zip(')]}', '([{'))
for char in expression:
# build up the stack with opening brackets
if char in '([{':
stack.append(char)
# tear down the stack when closing brackets are encountered
if char in closers:
if stack and stack[-1] == closers[char]:
stack.pop()
else:
return False
# check for remaining un-matched opening brackets
return False if stack else True
|
73c2b9ec6c70735b822133dde07ffe4127ecc7e5
| 460,684 |
import re
def extract_shortwords(text, length=3):
"""Returns a list with all short words of a particular length
params:
length: Specify the length of words you want to remove
default is 3 letter word length
"""
token_words = re.split(r"\W+", text)
short_words_list = [i for i in token_words if len(i) <= int(length)]
return short_words_list
|
8f91a457be6cfc1d8dab82f160d76934928fd93b
| 470,611 |
from datetime import datetime
def time_diff_close_enough(time1, time2, within_minutes):
"""Test whether two datetimes (TIMESTAMP's) are:
1. within a specified number of minutes of each other; and
2. within a specified number (the same number) of minutes of right now.
If both are true, then they are deemed to be "close enough", on the
assumption that they were each set to NOW() or CURRENT_TIMESTAMP(), and
the difference is because VoltDB and its comparison database (HSQL or
PostgreSQL) called that function at slightly different times.
"""
time_diff_in_minutes = (time1 - time2).total_seconds() / 60
if abs(time_diff_in_minutes) > within_minutes:
return False
time_diff_in_minutes = (time2 - datetime.now()).total_seconds() / 60
if abs(time_diff_in_minutes) > within_minutes:
return False
return True
|
347cadf36008e4f483515215e8c0a66ea23e34f6
| 134,382 |
def mcastIp2McastMac(ip):
""" Convert a dot-notated IPv4 multicast address string into an multicast MAC address"""
digits = [int(d) for d in ip.split('.')]
return '01:00:5e:%02x:%02x:%02x' % (digits[1] & 0x7f, digits[2] & 0xff, digits[3] & 0xff)
|
4713507e933e35b07fa917e04da431d71fe2b38f
| 121,805 |
def get_openssl_url_version(version):
"""
Get the OpenSSL URL version part from the openssl version string.
"""
if version[:3] == "3.0":
return "3.0"
tokens = version.split(".")
if len(tokens) > 2:
return ".".join(tokens[:2] + [tokens[2][0],])
return version
|
24ef49860a1e658a748b8a5c6dc39f0d38959bb0
| 593,087 |
def dot_product(vector1, vector2):
"""
Returns the dot product between two vectors. It can be
used as a helper function to calculate the multiplication
by two matrices.
"""
result = 0
for i in range(len(vector1)):
result += vector1[i] * vector2[i]
return result
|
e23cb2cfc480cf1ec0eea0fe3e915ee7bd20a8d3
| 416,871 |
import functools
def optional_parameter_decorator(f):
"""
A decorator for a decorator, allowing the decorator to be used both with
and without arguments applied.
Example:
------
@optional_parameter_decorator
def decorator(func, foo='bar'):
pass
which can now be used as
@decorator
def my_method():
pass
or
@decorator(foo='bar')
def my_method():
pass
"""
@functools.wraps(f)
def wrapped_decorator(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# Return the actual decorated function.
return f(args[0])
else:
# Wrap the function in a decorator in the case that arguments are
# applied.
return lambda realf: f(realf, *args, **kwargs)
return wrapped_decorator
|
dd92d18aa3c6159f6dfbfb31267e6a47c3dad237
| 320,595 |
def _generate_cache_key(ctx, spec_name, platform, configuration):
"""
Generate a composite key for (optional) spec_name, platform, and configuration
:param ctx: The context
:param spec_name: name of the spec (or None, use the current spec)
:param platform: the platform (or None, use the current platform)
:param configuration: the configuration (or None, use the current configuration)
:return: The composite key
"""
if hasattr(ctx, 'env'):
if not platform:
platform = ctx.env['PLATFORM']
if not configuration:
configuration = ctx.env['CONFIGURATION']
else:
if not platform:
platform = 'none'
if not configuration:
configuration = 'none'
composite_key = spec_name + '_' + platform + '_' + configuration
return composite_key
|
41cc6d6e66a4eba8cd74c81d13e8a1b0cd4623c3
| 90,989 |
from typing import List
def get_scores(true_pos: int, false_pos: int, false_neg: int) -> List[float]:
"""
calculate scores of performances with criteria of the computed confusion matrix : precision, recall or sensibility
and F1-score.
:param true_pos: number of correct detections
:type true_pos: int
:param false_pos: number of false detections
:type false_pos: int
:param false_neg: number of missed QRS complex
:type false_neg: int
:return: list of calculated scores : precision, recall or sensibility and F1-score
:rtype: list(float)
"""
positive_predictivity = round(100 * true_pos / (true_pos + false_pos), 2)
recall = round(100 * true_pos / (true_pos + false_neg), 2)
f1_score = round(100 * 2 * true_pos / ((2 * true_pos) + false_pos + false_neg), 2)
return [positive_predictivity, recall, f1_score]
|
a917033018e551f55290f4f9ce20f90ba4c50ad1
| 580,988 |
def len_max_increasing_subsequence(n, l):
"""
l is a list containing a permutation of 1, ..., n.
Returns the maximal length of an increasing subsequence in l.
"""
d = [0] * n
# at position x, store length of maximal increasing subsequence ending in x
for i, x in enumerate(l):
try:
d[x] = 1 + max(d[j] for j in range(x))
except ValueError: # max does not accept empty sequence
d[0] = 1
return max(d)
|
5fc9aca6915d3a3e78e40c8386977aa8539b5f4f
| 489,202 |
def genKgris(k):
"""
Calcule une liste de ``k`` teintes de gris allant du noir au blanc.
Paramètre:
k --> nombre de teintes (>=2)
La liste génére doit nécessairement commencer par la couleur noir (0,0,0)
et nécessairement terminer par la couleur blanc (255,255,255).
Les autres valeurs doivent être des teintes de gris uniformément réparties
entre le noir et le blanc.
:: EXEMPLES::
>>> genKgris(2)
[(0, 0, 0), (255, 255, 255)]
>>> genKgris(3)
[(0, 0, 0), (127, 127, 127), (255, 255, 255)]
>>> genKgris(4)
[(0, 0, 0), (85, 85, 85), (170, 170, 170), (255, 255, 255)]
"""
coef = 255//(k-2+1) # -2 (blanc et noir) +1 (1 élément minimum)
# teintes contient les valeurs de chaque pixel pour éviter la répétition
# teintes commence et fini par du blanc...
teintes = [0]
teintes += [n*coef for n in range(1, k-1)] # valeurs intermédiaires
# et se fini par du noir.
teintes += [255]
return [(v,v,v) for v in teintes]
|
3e480d5bba5f60e3448392da97c7d7738f5decad
| 24,531 |
def get_threshold(rows, bands):
"""Approximate threshold from bandwidth and number of rows
:param rows: rows per band
:param bands: number of bands
:return: threshold value
:rtype: float
"""
return (1. / bands) ** (1. / rows)
|
3c5a5e417e96797e18b571cb7c09597442cb5f44
| 26,886 |
def create_token_request_payload(auth_code, redirect_uri, optional_token_request_params):
"""
Construct payload for access token request
"""
token_request_payload = {
# Don't include client_id param: Verizon doesn't like it
'grant_type': 'authorization_code',
'code': auth_code,
'redirect_uri': redirect_uri,
**optional_token_request_params
}
return token_request_payload
|
d61708ec69913fcc52ec3538a240633716e36377
| 566,580 |
import torch
def scale_boxes(boxes, scale):
"""
Args:
boxes (tensor): A tensor of shape (B, 4) representing B boxes with 4
coords representing the corners x0, y0, x1, y1,
scale (float, float): The box scaling factor (w, h).
Returns:
Scaled boxes.
"""
w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5
h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5
x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5
y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5
w_half *= scale[0]
h_half *= scale[1]
scaled_boxes = torch.zeros_like(boxes)
scaled_boxes[:, 0] = x_c - w_half
scaled_boxes[:, 2] = x_c + w_half
scaled_boxes[:, 1] = y_c - h_half
scaled_boxes[:, 3] = y_c + h_half
return scaled_boxes
|
88e97c67d7fefa387c4794508368d3d20082a5dc
| 414,953 |
def levenshtein_distance(str_a, str_b):
"""Calculate the Levenshtein distance between string a and b.
:param str_a: String - input string a
:param str_b: String - input string b
:return: Number - Levenshtein Distance between string a and b
"""
len_a, len_b = len(str_a), len(str_b)
if len_a > len_b:
str_a, str_b = str_b, str_a
len_a, len_b = len_b, len_a
current = range(len_a + 1)
for i in range(1, len_b + 1):
previous, current = current, [i] + [0] * len_a
for j in range(1, len_a + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if str_a[j - 1] != str_b[i - 1]:
change += + 1
current[j] = min(add, delete, change)
return current[len_a]
|
89506ab9235a67cb581d703ad91d267e376ec24c
| 522,483 |
from datetime import datetime
def reformat_subway_dates(date):
"""
The dates in our subway data are formatted in the format month-day-year.
The dates in our weather underground data are formatted year-month-day.
In order to join these two data sets together, we'll want the dates formatted
the same way. Write a function that takes as its input a date in the MTA Subway
data format, and returns a date in the weather underground format.
Hint:
There are a couple of useful functions in the datetime library that will
help on this assignment, called strptime and strftime.
More info can be seen here and further in the documentation section:
http://docs.python.org/2/library/datetime.html#datetime.datetime.strptime
"""
struct_time = datetime.strptime(date, "%m-%d-%y")
date_formatted = datetime.strftime(struct_time, "%Y-%m-%d")
return date_formatted
|
a40aff606bc790e41b75b4588dbb9b4442510805
| 51,274 |
def w_lin_update(u, Lin_lhs, Lin_rhs):
"""Computes the update for the auxilary tumor dose distribution w_0 based on the current u
Parameters
----------
u : np.array of shape(None,)
Beamlet radiation distribution
Lin_lhs : np.array of shape(None, u.shape[0])
Stacked lhs of the constraints
Lin_rhs : np.array of shape(None,)
Stacked rhs of the constraints
Returns
-------
np.array of shape (None,)
update for the auxilary fluence map w_lin
"""
w_lin_next = Lin_lhs.dot(u)
violation_indices = w_lin_next - Lin_rhs > 0
w_lin_next[violation_indices] = Lin_rhs[violation_indices]
return w_lin_next
|
9e8e5f8d1daac113a9673daaa1414f5b9b709af0
| 231,353 |
def _is_hd(release_name):
""" Determine if a release is classified as high-definition
:param release_name: release name to parse
:type release_name: basestring
:return: high-def status
:rtype: bool
"""
if "720" in release_name or "1080" in release_name:
return True
return False
|
eac085e5de4a5edd63d1960cb1b3a8ef79b7fd58
| 648,295 |
def generate_catalog_mags(instrument_mags, color, model):
"""
Generate catalog magnitudes from instrumental magnitudes
given a model that relates the two.
"""
return instrument_mags + model(color)
|
0b39a7dae5eb1f573c62b25b7053acebf28e91d2
| 20,194 |
def llist(self, nl1="", nl2="", ninc="", lab="", **kwargs):
"""Lists the defined lines.
APDL Command: LLIST
Parameters
----------
nl1, nl2, ninc
List lines from NL1 to NL2 (defaults to NL1) in steps of NINC
(defaults to 1). If NL1 = ALL (default), NL2 and NINC are ignored
and all selected lines [LSEL] are listed. If NL1 = P, graphical
picking is enabled and all remaining command fields are ignored
(valid only in the GUI). A component name may also be substituted
for NL1 (NL2 and NINC are ignored).
lab
Determines what type of listing is used (one of the following):
(blank) - Prints information about all lines in the specified range.
RADIUS - Prints the radius of certain circular arcs, along with the keypoint numbers of
each line. Straight lines, non-circular curves, and
circular arcs not internally identified as arcs (which
depends upon how each arc is created) will print a radius
value of zero.
LAYER - Prints layer-mesh control specifications.
HPT - Prints information about only those lines that contain hard points. HPT is not
supported in the GUI.
ORIENT - Prints a list of lines, and identifies any orientation keypoints and any cross
section IDs that are associated with the lines. Used for
beam meshing with defined orientation nodes and cross
sections.
Notes
-----
There are 2 listings for the number of element divisions and the
spacing ratio. The first listing shows assignments from LESIZE only,
followed by the "hard" key (KYNDIV). See LESIZE for more information.
The second listing shows NDIV and SPACE for the existing mesh, if one
exists. Whether this existing mesh and the mesh generated by LESIZE
match at any given point depends upon meshing options and the sequence
of meshing operations.
A "-1" in the "nodes" column indicates that the line has been meshed
but that there are no interior nodes.
An attribute (TYPE, MAT, REAL, or ESYS) listed as a zero is unassigned;
one listed as a positive value indicates that the attribute was
assigned with the LATT command (and will not be reset to zero if the
mesh is cleared); one listed as a negative value indicates that the
attribute was assigned using the attribute pointer [TYPE, MAT, REAL, or
ESYS] that was active during meshing (and will be reset to zero if the
mesh is cleared).
This command is valid in any processor.
"""
command = f"LLIST,{nl1},{nl2},{ninc},{lab}"
return self.run(command, **kwargs)
|
8a6b258be0c34d930d01dd27791b3c2ec2708ec5
| 127,909 |
def get_z(coeffs, x, y):
"""
Calculate and return the height z given the coefficients and ordinates of ax + by + c = z
:param coeffs: numpy ndarray of coefficients a, b, c, in the order (c, a, b)
:param x: x value
:param y: y vakye
:return: the calculated z value
"""
assert isinstance(coeffs, tuple) and 3 == len(coeffs)
assert all([isinstance(v, float) for v in coeffs])
assert isinstance(x, float)
assert isinstance(y, float)
a, b, c = coeffs
z = a*x + b*y + c
# print z
return z
|
e414555b052deb02928859f71ede4d1f9a2efa09
| 621,294 |
def merge(values_1, values_2, labels, join='inner'):
"""Merge two dictionaries. The resulting dictionary will map key values to
dictionaries. Each nested dictionary has two elements, representing the
values from the respective merged dictionary. The labels for these elements
are defined by the labels argument.
The join method allows for four types of merging:
- inner: Keep only those keys that are in the intersection of both
dictionaries.
- outer: Keep all keys from the union of both dictionaries.
- left-outer: Keep all keys from the first dictionary.
- right-outer: Keep all keys from the second dictionary.
Raises a ValueError if the number of given labels is not two or if an
invalid join method is specified.
Parameters
----------
vaues_1: dict
Left side of the join.
values_2: dict
Right side of the join.
join: enum['inner', 'outer', 'left-outer', 'right-outer'], default='inner'
Join method identifier.
Returns
-------
dict
Raises
------
ValueError
"""
if len(labels) != 2:
raise ValueError('invalid label list {}'.format(labels))
label_1, label_2 = labels
result = dict()
if join == 'inner':
for key, value in values_1.items():
if key in values_2:
result[key] = {label_1: value, label_2: values_2[key]}
elif join == 'outer':
for key, value in values_1.items():
result[key] = {label_1: value, label_2: values_2.get(key)}
# Add elements in the second dictionary that are not part of the
# result yet.
for key, value in values_2.items():
if key not in result:
result[key] = {label_1: None, label_2: value}
elif join == 'left-outer':
for key, value in values_1.items():
result[key] = {label_1: value, label_2: values_2.get(key)}
elif join == 'outer':
for key, value in values_2.items():
result[key] = {label_1: values_1.get(key), label_2: value}
else:
raise ValueError('invalid join method {}'.format(join))
return result
|
282b358c44a53a0f88afab5af8beb639c45b90d0
| 106,174 |
import re
def to_single_space(string):
"""Remove leading and trailing whitespace and replace newlines, tabs and
sequences of 2 or more space to one space.
"""
patt = re.compile(' {2,}')
return patt.sub(' ', string.strip().replace('\n', ' ').replace('\t', ' '))
|
1e4c5b18bd9c42019194e4f769888293c0bd90f9
| 559,750 |
def map_freqs(s):
"""
s é uma string não-vazia de caracteres
Retorna um dicionário que mapeia cada caractere em s que
é um dígito (0-9) ao número de vezes que ele ocorre em s.
"""
d = {}
for char in s:
if char.isdigit():
if char not in d:
d[char] = 1
else:
d[char] = d[char] + 1
return d
|
08150866dcb45e758cab9c0761061e14eba3fc76
| 75,171 |
import json
def read_proxy_config(file_name):
"""convert proxy file from json file to python data structure
Parameters
--------------
file_name : str
location of proxies json list file
Returns
-------------
object
datastructure representation of json file
"""
with open(file_name) as data_file:
data = json.load(data_file)
return data
|
15ef78c1b5a67424d5cf1b3179fad1d884b80178
| 623,883 |
import json
def getJson(fnm):
"""Parse a json file as a dict"""
with open(fnm) as f:
return json.load(f)
|
3d89138ba5615ec17e0cad025736da0a9a8838a2
| 491,600 |
def get_first_match(regex, directory):
"""
Checks if `directory` matches `regex` and returns the first match converted
to an integer. If it does not match -1 is returned.
Arguments:
- `regex`: Regular expression to be tested against
- `directory`: name of the directory under test
"""
result = regex.search(directory)
if result is None:
return -1
else:
return int(result.group(1))
|
958417a22f43045f0c2384cf2da1dec108fdc8b3
| 142,484 |
def get_ref_path(openapi_major_version):
"""Return the path for references based on the openapi version
:param int openapi_major_version: The major version of the OpenAPI standard
to use. Supported values are 2 and 3.
"""
ref_paths = {2: 'definitions',
3: 'components/schemas'}
return ref_paths[openapi_major_version]
|
f0e29f0e02cb38f503bad0e14b2516b5a2d2ac9f
| 611,531 |
def MakeRanges(codes):
"""Turn a list like [1,2,3,7,8,9] into a range list [[1,3], [7,9]]"""
ranges = []
last = -100
for c in codes:
if c == last+1:
ranges[-1][1] = c
else:
ranges.append([c, c])
last = c
return ranges
|
f43e79a4d604d8ce322fdd1fc7c22bdf57725e60
| 615,933 |
import pathlib
def get_creation_timestamp(filename):
"""Get creation timestamp for a file"""
filen = pathlib.Path(filename)
assert filen.exists()
return filen.stat().st_ctime
|
aa8b8b1efa24c0449c7d53eb4bb15de4c51cbea2
| 566,673 |
from datetime import datetime
def date_slug(timestamp: datetime = datetime.now()) -> str:
"""Format current date stamp as a slug."""
return timestamp.strftime("%Y%m%d%H%M%S")
|
527dd2266d9b004f6c062437e6426ccaaac19259
| 496,760 |
def GetHeaderGuard(path):
"""
Returns the header #define guard for the given file path.
This treats everything after the last instance of "src/" as being a
relevant part of the guard. If there is no "src/", then the entire path
is used.
"""
src_index = path.rfind('src/')
if src_index != -1:
guard = path[src_index + 4:]
else:
guard = path
guard = guard.upper()
return guard.replace('/', '_').replace('.', '_').replace('\\', '_') + '_'
|
950ab5729325c096780e662aa225faaaf00a66b3
| 548,578 |
def parseInt(s, ret=0):
"""Parses a value as int."""
if not isinstance(s, str):
return int(s)
elif s:
if s[0] in "+-":
ts = s[1:]
else:
ts = s
if ts and all([_ in "0123456789" for _ in ts]):
return int(s)
return ret
|
a4f3f900c8010aad4dfd9ec4265cdff292053625
| 532,624 |
def decdeg2dms(dd):
""" Tansform decimal degrees into degrees minutes seconds
Argument:
dd (float): decimal angle
Returns:
degrees, minutes, seconds"""
negative = dd < 0
dd = abs(dd)
minutes, seconds = divmod(dd * 3600, 60)
degrees, minutes = divmod(minutes, 60)
if negative:
if degrees > 0:
degrees = -degrees
elif minutes > 0:
minutes = -minutes
else:
seconds = -seconds
return degrees, minutes, seconds
|
7ef8af3156988fdc93e6287761dd82e428372550
| 418,797 |
import asyncio
def make_future(result) -> asyncio.Future:
"""Make future for value."""
f = asyncio.Future() # type: ignore
f.set_result(result)
return f
|
df8eaf3e7affbf0235127e50ea79e605c93668b1
| 150,384 |
def sum_numbers(first_int, second_int):
"""Returns the sum of the two integers"""
result = first_int + second_int
return result
|
eaf7d297e4f2043124f1b9864447d0af3691d19a
| 402,646 |
def sub_field(k, v):
"""Return a nested dictionary with field keys k and value v."""
res = {}
field_d = res
fields = k.split('.')
for f in fields[:-1]:
field_d[f] = {}
field_d = field_d[f]
field_d[fields[-1]] = v
return res
|
193869fdfaca84172c71ca935f5fdb312682b19e
| 9,247 |
import copy
def remove_pad_sequences(sequences, pad_id=0):
"""Remove padding.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
pad_id : int
The pad ID.
Returns
----------
list of list of int
The processed sequences.
Examples
----------
>>> sequences = [[2,3,4,0,0], [5,1,2,3,4,0,0,0], [4,5,0,2,4,0,0,0]]
>>> print(remove_pad_sequences(sequences, pad_id=0))
[[2, 3, 4], [5, 1, 2, 3, 4], [4, 5, 0, 2, 4]]
"""
sequences_out = copy.deepcopy(sequences)
for i, _ in enumerate(sequences):
# for j in range(len(sequences[i])):
# if sequences[i][j] == pad_id:
# sequences_out[i] = sequences_out[i][:j]
# break
for j in range(1, len(sequences[i])):
if sequences[i][-j] != pad_id:
sequences_out[i] = sequences_out[i][0:-j + 1]
break
return sequences_out
|
15657e0ce29774ec7bb84c9c16b8ffdd25a38567
| 622,337 |
from typing import Tuple
def calculate_number_of_valid_lines(lines: Tuple[str]) -> int:
"""
Note that this function does not count empty lines and headings.
:param lines: Result of a f.readlines() operation, i.e., a list of strings.
:return: The number of valid lines in this file
"""
line_counter = 0
for line in lines:
if line.strip("\n ") and not line.startswith("=="):
line_counter += 1
return line_counter
|
5fcd4fd514d9a095a9e9409ba512e693331dcfff
| 222,766 |
def column_id(team_name: str, session: str) -> str:
"""
Create a unique column ID based on team and session identifier.
This is needed for the dash datatable.
"""
return team_name + "_" + session
|
c53b8fac617e17b311c405c083f6a3f5ced6f732
| 239,960 |
def read_flag_file(filename):
"""
Reads the flag file, ignoring comment lines
"""
with open(filename, "r") as myfile:
lines = myfile.read().splitlines()
# filter the lines
lines = [l for l in lines if l.startswith("-")]
return " ".join(lines)
|
d990347b56d7f85339eb0c0dbad925680010fd6f
| 85,022 |
def add_weight_decay(model, weight_decay=1e-5, skip_list=()):
"""Splits param group into weight_decay / non-weight decay.
Tweaked from https://bit.ly/3dzyqod
:param model: the torch.nn model
:param weight_decay: weight decay term
:param skip_list: extra modules (besides BN/bias) to skip
:returns: split param group into weight_decay/not-weight decay
:rtype: list(dict)
"""
# if weight_decay == 0:
# return model.parameters()
decay, no_decay = [], []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
if len(param.shape) == 1 or name in skip_list:
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0, 'ignore': True},
{'params': decay, 'weight_decay': weight_decay, 'ignore': False}]
|
786993f755adabf09b34a36bade30cbad71d76ce
| 679,149 |
def has_valid_number_pieces(configuration, original_permutation):
"""
Checks if a certain chess configuration has the valid number of pieces
"""
pieces = set(original_permutation)
while len(pieces) > 0:
piece = pieces.pop()
if original_permutation.count(piece) != configuration.count(piece):
return False
return True
|
b5ea05de21b38b22dab87a162d87791c6b194299
| 419,205 |
def copy_digesters(digesters):
"""Returns copy of provided digesters since deepcopying doesn't work."""
result = {}
for hash_algorithm in digesters:
result[hash_algorithm] = digesters[hash_algorithm].copy()
return result
|
56cd4c26d6c8cb5c5f82ac3420f1165bb8c8e9fc
| 609,938 |
import requests
def _get_newest_entry(subreddit_name):
"""
Extracts newest subreddit entry from JSON RSS
:param str subreddit_name: Subreddit to get entry for
:return: Extracted rss data
:rtype: dict
"""
url = "https://www.reddit.com/r/{}/new/.json".format(subreddit_name)
headers = {'User-Agent': 'Mozilla/5.0'}
response = requests.get(url, headers=headers)
response.raise_for_status()
rss = response.json()
return rss['data']['children'][0]['data']
|
9e43739d631d9a4c253937f09dcabb20c779aefc
| 199,201 |
import json
def parse_message(line: str) -> dict:
"""
Given a string that looks like json with
a 'msg' element in it, remove the msg element
and replace it with a set of keys and values
derived from expanding the msg field on spaces.
For example, a line string like:
{"one": "fish","two": "fish","msg": "red fish poop fish"}
would produce a return dict like:
{'one': 'fish', 'two': 'fish', 'red': 'fish', 'poop': 'fish'}
"""
data = json.loads(line)
message = data['msg']
del data['msg']
elements = message.split()
while elements:
key = elements.pop(0)
if elements:
value = elements.pop(0)
else:
value = ""
data[key] = value
return data
|
659f9666b7c135fabff59ff40770352964aad3d0
| 398,457 |
def parse_relation(fields):
"""
Assumes all relation are binary, argument names are discarded
:param fields: correspond to one Brat line seperated by tab
:return: relation id, relation name, arg1 and arg2
"""
rel, a1, a2 = fields[1].split(" ")
rel_id = fields[0]
return rel_id, rel, a1.split(":")[1], a2.split(":")[1]
|
9e86f45d571e7b3de2e64645209a5854f145330e
| 692,858 |
def upper_chars(string, indices):
"""
Make characters uppercase in string
:param string: string to modify
:param indices: character indice to change to uppercase
:return: uppercased string
"""
upper_string = "".join(c.upper() if i in indices else c for i, c in enumerate(string))
return upper_string
|
409a0dc99649d1ee5a3e8def0fa618f89b43b949
| 598,990 |
def output_script_error(error, cfg):
"""
Cleanly outputs an error return from an Apps Script API call
"""
cfg['logger'].error('Script error', error_message=str(error['errorMessage']))
if 'scriptStackTraceElements' in error:
cfg['logger'].error('Script error stracktrace follows')
for trace in error['scriptStackTraceElements']:
cfg['logger'].error('\t%s: %s' % (str(trace['function']),
str(trace['lineNumber'])))
return 'Error'
|
62bbb1fc4cef44df56efbd7f0177f2a064f9c49c
| 148,078 |
def find_missing(integers_list, start=None, limit=None):
""" Given a list of integers and optionally a start and an end
finds all integers from start to end that are not in the list
"""
start = start if start is not None else integers_list[0]
limit = limit if limit is not None else integers_list[-1]
return [i for i in range(start, limit + 1) if i not in integers_list]
|
317b60d7b9dd1fb6168ce2fd2bc00f03a69772a7
| 84,304 |
def get_nim_sum(state: tuple[int, ...]) -> int:
"""
Get the nim sum of a position. See https://www.archimedes-lab.org/How_to_Solve/Win_at_Nim.html
:param state: the state of the game
:return: the nim sum of the current position
"""
cur_sum = 0
for n in state:
cur_sum ^= n
return cur_sum
|
d1b3cf67d86fce56ffd69cb6ede438b8c2cc85f6
| 688,427 |
def flatten_spatial_dims(data):
"""Flatten all spatial dimensions of an input array.
Assumes input array has shape (time, vert, lat, lon).
"""
n_lon, n_lat, n_lev = data.shape[-1], data.shape[-2], data.shape[-3]
n_pt = n_lon*n_lat*n_lev
return data.reshape((-1, n_pt))
|
07be987d55b115c8e0e9977bc6731b50450cee1d
| 297,889 |
from datetime import datetime
def createTrunk(name):
"""
Creates a trunk name for data in data_vault corresponding
to the current date.
Arguments:
name (str) : the name of the client.
Returns:
(*str) : the trunk to create in data_vault.
"""
date = datetime.now()
trunk1 = '{0:d}_{1:02d}_{2:02d}'.format(date.year, date.month, date.day)
trunk2 = '{0:s}_{1:02d}:{2:02d}'.format(name, date.hour, date.minute)
return ['', str(date.year), '{:02d}'.format(date.month), trunk1, trunk2]
|
c4b1967468159cc13a551afcb142b05a510174ad
| 23,153 |
def read_entries(fileobj):
"""
Reads environment variable assignments from a file-like object. Only lines that
contain an equal sign (=) and do not start with # (comments) are considered. Any
leading/trailing quotes around the value portion of the assignment are stripped.
"""
entries = {}
for line in fileobj.readlines():
line = line.strip()
if "=" in line and not line.startswith("#"):
key, value = line.split("=", 1)
entries[key.strip()] = value.strip().strip("\"'")
return entries
|
3a74fd3855536389ef06ac2317b2bb52dd75ce52
| 351,367 |
def create_searching_by_message(searching_by: str):
"""
Creates a message informing the user how something is being searched
:param searching_by: A string saying how something is searched
:return:
A title cased string in the format of 'Searching: {searching_by}' minus
any `-` characters
"""
# remove any `-` characters and title case
formatted_string = searching_by.replace('-', '').title()
return f'Searching: {formatted_string}'
|
6c695b491f3558f2bdddc8b880662aa706fb9395
| 128,771 |
def classname(class_object):
"""Returns the class name of the given object"""
return class_object.__class__.__name__
|
f08c8bf90b0e5243c83d9d213ca208f50e27ab96
| 590,412 |
import unicodedata
def strip_symbols(s, pass_symbols=(u'й', u'Й', u'\n')):
""" Strip ugly unicode symbols from a string """
result = []
for char in s:
# Pass these symbols without processing
if char in pass_symbols:
result.append(char)
continue
for c in unicodedata.normalize('NFKC', char):
if unicodedata.category(c) == 'Zs':
result.append(u' ')
continue
if unicodedata.category(c) not in ['So', 'Mn', 'Lo', 'Cn', 'Co', 'Cf', 'Cc']:
result.append(c)
return u"".join(result)
|
223e2c0bb742bfaa9b5674584f700f9fcb0ff265
| 369,901 |
def get_names(soup):
"""Return a list of store location public names"""
storeid = soup.find(id="our-menu").find_all('option')
n = len(storeid)
names = [storeid[i].text for i in range(n)]
return names
|
5eab999c9b7cfa90dc8e62d30e7a7e1b119dd157
| 342,788 |
def _fuzz_vhost_create_dev(client, socket, is_blk, use_bogus_buffer, use_valid_buffer, test_scsi_tmf, valid_lun):
"""Create a new device in the vhost fuzzer.
Args:
socket: A valid unix domain socket for the dev to bind to.
is_blk: if set, create a virtio_blk device, otherwise use scsi.
use_bogus_buffer: if set, pass an invalid memory address as a buffer accompanying requests.
use_valid_buffer: if set, pass in a valid memory buffer with requests. Overrides use_bogus_buffer.
test_scsi_tmf: Test scsi management commands on the given device. Valid if and only if is_blk is false.
valid_lun: Supply only a valid lun number when submitting commands to the given device. Valid if and only if is_blk is false.
Returns:
True or False
"""
params = {"socket": socket,
"is_blk": is_blk,
"use_bogus_buffer": use_bogus_buffer,
"use_valid_buffer": use_valid_buffer,
"test_scsi_tmf": test_scsi_tmf,
"valid_lun": valid_lun}
return client.call("fuzz_vhost_create_dev", params)
|
1045e55eb6acc5eea690a0389475a7b0bfea9484
| 276,810 |
def suffixtonumber(suffix):
""" Given a set of ascii_lowercase values, get a base 26 number.
a = 0, ... z = 25, aa = 26, ...
"""
# int(base=26) doesn't quite work, since first ten ints are actually ints!
base36 = '0123456789abcdefghijklmnopqrstuvwxyz'
return int(''.join([base36[base36.index(b)-10] for b in suffix]), 26)
|
08dcfab32f17a0fc52ccbb432513ab17cb174c3b
| 165,970 |
def getDocComment(node):
"""
Returns the first doc comment of the given node.
"""
comments = getattr(node, "comments", None)
if comments:
for comment in comments:
if comment.variant == "doc":
return comment
return None
|
4cc32f84d4e127880f9a137447e2d9401a6fbd6c
| 644,668 |
import re
def get_ao3_identifiers(bookmarks):
"""
Given the output from Pinboard's /posts/all API, return all the unique AO3
identifiers. This is the numeric ID after /works.
e.g. The ID in https://archiveofourown.org/works/1160745 is 1160745
"""
saved_urls = [bk["href"] for bk in bookmarks]
AO3_LINK_RE = re.compile(
r"^https?://archiveofourown\.org/works/(?P<work_id>\d+)"
)
ao3_identifiers = set()
for saved_bookmark in bookmarks:
url = saved_bookmark["href"]
match = AO3_LINK_RE.match(url)
if match is None:
continue
ao3_identifiers.add(match.group("work_id"))
return ao3_identifiers
|
e1cfbf68b4e7296241e419794810afa61405154b
| 393,074 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.