content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def get_workitem_id_from_task_name(task_name: str):
"""Parse the Task Name to get the Work Item ID"""
return task_name.split(" ")[0]
|
2f5c22b02fc132e319404fb43f444f9f2044315e
| 30,627 |
import torch
def get_model(model='PGAN', dataset='celebAHQ-512', use_gpu=True):
"""Returns a pretrained GAN from (https://github.com/facebookresearch/pytorch_GAN_zoo).
Args:
model (str): Available values are "PGAN", "DCGAN".
dataset (str: Available values are "celebAHQ-256", "celebAHQ-512', "DTD", "celeba".
Ignored if model="DCGAN".
use_gpu (bool): Whether to use gpu.
"""
all_models = ['PGAN', 'DCGAN']
if not model in all_models:
raise KeyError(
f"'model' should be in {all_models}."
)
pgan_datasets = ['celebAHQ-256', 'celebAHQ-512', 'DTD', 'celeba']
if model == 'PGAN' and not dataset in pgan_datasets:
raise KeyError(
f"If model == 'PGAN', dataset should be in {pgan_datasets}"
)
model = torch.hub.load('facebookresearch/pytorch_GAN_zoo:hub', model,
model_name=dataset, pretrained=True, useGPU=use_gpu)
return model
|
bb8df4164d27566960acf01d3664b3802d0c4fe7
| 30,628 |
import re
def __expand_ranges(expression: str) -> str:
"""Expand ranges in a given expression.
Args:
expression: The expression to expand.
Returns:
The expression with ranges expanded.
"""
# Find {n}..{n} in the expression.
pattern = re.compile("(\\d+)\.\.(\\d+)")
# Expand ranges
while True:
match = pattern.search(expression)
if match is None:
break
left, right = int(match.group(1)), int(match.group(2))
if left <= right:
# Replace hyphen expression with comma-separated list.
numbers = [str(i) for i in range(left, right + 1)]
expression = expression.replace(match.group(0), ",".join(numbers))
return expression
|
3563e4b452d49eca3c04ace0e796a14a7347700a
| 30,632 |
def fmtcols(mylist, cols):
"""Generate a string of tab and newline delimited columns from a list
"""
lines = ("\t".join(mylist[i:i + cols])
for i in range(0, len(mylist), cols))
return '\n'.join(lines)
|
5f83aa16039edafa6789c8cd1580ff45ae495f67
| 30,637 |
def split_list(l: list, n: int = 1) -> list:
"""Split list into n parts"""
length = len(l)
split = []
for i in range(n):
split.append(l[i*length // n: (i+1)*length // n])
return split
|
76bfc987dca606fda945a3222a852d0c0c8489db
| 30,638 |
def get_start(maze):
"""Searches for the 1 inside the maze.
Returns:
The row and column of the found 1.
E.g. if 1 was in row 3 and column 4, this would return:
3, 4
If there is no 1 in the maze, this returns
-1, -1
"""
for y, row in enumerate(maze):
for x, col in enumerate(row):
if col == 1:
return y, x
return -1, -1
|
f2561d9af924eb28c86807e079c9515f8c395bf1
| 30,641 |
def get_nonzero_either_mask(vector_a, vector_b):
"""Returns a numpy array of boolean values indicating where values in two
vectors are both greater than zero.
Parameters
----------
vector_a : numpy.ndarray
Array of counts or RPKM
vector_b : numpy.ndarray
Array of counts or RPKM
Returns
-------
numpy.ndarray
Boolean array that is `True` where both `vector_a` and `vector_b`
have values greater than zero, and `False` elsewhere.
"""
return (vector_a > 0) & (vector_b > 0)
|
82f7433bcbcfcfc799b46083b112a9a7abcab918
| 30,642 |
def get_setting_name_and_refid(node):
"""Extract setting name from directive index node"""
entry_type, info, refid = node['entries'][0][:3]
return info.replace('; setting', ''), refid
|
f72908c1f3adfc1d37f4760a240f68c66031dc19
| 30,643 |
import csv
def get_func_rep(thresh_results, input_comps, conf_values = True):
"""
Find the functional representation of a set of components based on the results of data mining
Parameters
----------
thresh_results : dict
The return dictionary from the "get_top_results" function
input_comps : string
The filename of a .csv file containing the components of a product
conf_values : bool
A boolean of whether or not to return the results with the frequency values, default is True
Returns
-------
learned_dict
Returns a dictionary of function and flow combinations sorted by confidence for each component in the input_case.
The key is the component and the value is a list of function-flow combinations.
unmatched
Returns a list of components that were in the set of input components but not found in the data mining results.
"""
# Instances of each component are counted
counts = {}
# keep a list of components that were not learned in data mining
unmatched = []
with open(input_comps, encoding='utf-8-sig') as input_file:
for row in csv.reader(input_file, delimiter=','):
comp = row[0]
# Create a dictionary with each component
if comp not in counts:
counts[comp] = 1
else:
counts[comp] += 1
# Method of returning results with frequency values
if conf_values is True:
res_with_conf = {}
# Inherit the values of thresh_results for each of the same component in input components
for k, v in counts.items():
if k in thresh_results:
res_with_conf[k] = thresh_results[k]
else:
if k not in unmatched:
unmatched.append(k)
return res_with_conf, unmatched
else:
# Method of returning results without frequency values
# List for keeping track of which function-flows happen for each component
keep_flows = []
# Dictionary for keeping CFF combinations from the learning set
learned_dict = {}
for k, v in counts.items():
if k in thresh_results:
for vs in thresh_results[k]:
# Append list of all of the function-flows for each component
keep_flows.append(vs[0])
# Save list of function-flows for each component
learned_dict[k] = keep_flows
# Reset list for each component
keep_flows = []
else:
if k not in unmatched:
unmatched.append(k)
return learned_dict, unmatched
|
a24d2f4833330dbcc33a422225e517e29b38f868
| 30,644 |
def version_sum(packet: dict) -> int:
"""
Recursively calculate the sum of version numbers in packet.
"""
return packet["version"] + sum(version_sum(sub) for sub in packet["subpackets"])
|
2559e2531c59d93f6bd00a625e7a1e21c6bdeaa1
| 30,645 |
import pickle
def load_clf(trained_mod):
"""Load a trained model from pickle file.
Args:
trained_mod (str): file path to pickle file.
Returns:
sklearn.classifier: A trained sklearn classifier.
"""
# save model with open(wb) + pickle.dump.
with open(trained_mod, 'rb') as file:
model = pickle.load(file)
return model
|
21a9dbd4e5455e8909ed0f46b78cd5fb7d161b04
| 30,646 |
def r_perimeter(l, b):
"""Function for calculating Perimeter of Rectangle"""
return 2 * (l + b)
|
7860ebc843faf55a3ad893f4359e802775260a0f
| 30,648 |
def tree_pop_fields(root, fields):
"""deletes given fields (as iterable of keys) from root and all its children (recursively)
returnes updated root """
for f in fields:
root.pop(f)
if root['is_leaf']: return root
for i in range(len(root['children'])):
root['children'][i]['child'] = tree_pop_fields(root['children'][i]['child'], fields)
return root
|
1dca88301219ad2a9c83642024ab0db08472b507
| 30,652 |
from typing import Tuple
from typing import Callable
def interpolate(p1: Tuple[int, float], p2: Tuple[int, float]) -> Callable[[int], float]:
""" Returns a function that linearly interpolates between these two points.
Implements the equation given in https://mathworld.wolfram.com/Two-PointForm.html
Args:
p1: A point (x1, y1) with the smaller x value.
p2: A point (x2, y2) with the larger x value.
Raises:
ValueError if x1 is greater than x2.
"""
x2, y2 = p2
x1, y1 = p1
if x1 >= x2:
raise ValueError(f"x2 ({x2}) must be greater than x1 ({x1}).")
slope = (y2 - y1) / (x2 - x1)
return lambda x: slope*(x - x2) + y2
|
2196e99e1ae22328d45047474cd5d5b092ee01ce
| 30,653 |
def count(value, node):
"""
Count number of list elements that match a value
:param value: value to search for
:param node: value of head node, start of list
:return: int: number of elements that match the value
"""
if node is not None:
if value == node.value: # basically same as length but only add if they match a value
return 1 + count(value, node.next_node)
return count(value, node.next_node)
return 0
|
05ffe8ce83e3fff981d8953090f6615463627e43
| 30,656 |
import torch
def clamp(image, min=0., max=1.):
"""Clamp values in input tensor exceeding (min, max) to (min, max)"""
return torch.clamp(image, min, max)
|
4b7fe6100d0e85a7ee1ae00a53df5a6616bd65c9
| 30,658 |
def split_out_internet_rules(rule_list):
"""Separate rules targeting the Internet versus normal rules"""
normal_rules = filter(lambda x: x.target_zone != 'internet', rule_list)
internet_rules = filter(lambda x: x.target_zone == 'internet', rule_list)
return list(normal_rules), list(internet_rules)
|
aa838ef7655658b3255c127f392c536bceb5a3bd
| 30,661 |
def _get_response_status(response) -> int:
"""Get the HTTP status code from any type of response object."""
if hasattr(response, "status"):
# aiohttp-style
return response.status
elif hasattr(response, "status_code"):
# starlette-style
return response.status_code
raise TypeError(f"Don't know how to find the path for {type(response)}")
|
1a9286db6277601240545e36c4a51536555a83d0
| 30,669 |
import click
def pywbem_error_exception(exc, intro=None):
"""
Return the standard click exception for a pywbem Error exception. These
exceptions do not cause interactive mode failure but display the exception
class and its str value and return to the repl mode.
Parameters:
exc (Exception): The pywbem Error exception.
intro (string): An additional message used as introduction for the
resulting click exception message. This message usually states what
cannot be done due to the error.
Returns:
click.ClickException: Click exception for the pywbem Error exception.
"""
if intro:
msg = "{}: {}: {}".format(intro, exc.__class__.__name__, exc)
else:
msg = "{}: {}".format(exc.__class__.__name__, exc)
return click.ClickException(msg)
|
3d99a69857d99e3e7c579a7e9be147574c9baf67
| 30,675 |
def getbinlen(value):
"""return the bit length of an integer"""
result = 0
if value == 0:
return 1
while value != 0:
value >>= 1
result += 1
return result
|
523772f1c5eb856bff831e1565b2ff47fc19b2ff
| 30,679 |
def calculate_mean(some_list):
"""
Function to calculate the mean of a dataset.
Takes the list as an input and outputs the mean.
"""
return (1.0 * sum(some_list) / len(some_list))
|
d0374fc5321f6caa05f546e274490e906bf60106
| 30,681 |
def merge_media(forms, arg=None):
"""Merge media for a list of forms
Usage: {{ form_list|merge_media }}
* With no arg, returns all media from all forms with duplicates removed
Usage: {{ form_list|merge_media:'media_type' }}
* With an arg, returns only media of that type. Types 'css' and 'js' are common.
See Django documentation for more information about form media.
"""
if len(forms) == 0:
return ''
combined = forms[0].media
if len(forms) > 1:
for val in forms[1:]:
combined += val.media
if arg is None:
return str(combined)
return str(combined[arg])
|
e4885524e3ac6c8598f485f55fa915b6a4874001
| 30,683 |
import re
def split_filenames(text):
"""Splits comma or newline separated filenames
and returns them as a list.
"""
names = [name.strip()
for name in re.split(r'[\n,]', text)]
return list(filter(None, names))
|
85d53b77a81d6c1133068932a510ff3c9087a3cd
| 30,684 |
from typing import Dict
def _is_import_finished(log: Dict) -> bool:
"""Returns whether the import has finished (failed or succeeded)."""
return log['state'] not in ('QUEUED', 'RUNNING')
|
4dbb9ee522b210781bbc25542dd1ab86dc0cd397
| 30,690 |
from typing import Dict
def _query_dict_to_qs(dic: Dict[str, str]) -> str:
"""
{'k1': 'v1', 'k2': 'v2'} -> ?k1=v1&k2=v2
"""
if not dic:
return ''
return '?' + '&'.join(f'{k}={v}' for k, v in dic.items())
|
27e9d7de3da75a9ed589d2a40a00b6cc2461afcd
| 30,691 |
def extents_overlap(a_extent, b_extent):
"""Test if two extents overlap"""
if (a_extent.xmin > b_extent.xmax or
a_extent.xmax < b_extent.xmin or
a_extent.ymin > b_extent.ymax or
a_extent.ymax < b_extent.ymin):
return False
else:
return True
|
09f30e3982fd139b4208501236c2a0fc4a413b96
| 30,693 |
def get_high_lows_lookback(high, low, lookback_days):
"""
Get the highs and lows in a lookback window.
Parameters
----------
high : DataFrame
High price for each ticker and date
low : DataFrame
Low price for each ticker and date
lookback_days : int
The number of days to look back
Returns
-------
lookback_high : DataFrame
Lookback high price for each ticker and date
lookback_low : DataFrame
Lookback low price for each ticker and date
"""
# getting max price for high prices excluding present day
lookback_high = high.rolling(window=lookback_days).max().shift()
# getting min price for low prices excluding present day
lookback_low = low.rolling(window=lookback_days).min().shift()
return lookback_high, lookback_low
|
d24905db2ae2425f7d57e3af503802c597d0c212
| 30,698 |
def _get_weights(model, features):
"""
If the model is a linear model, parse the weights to a list of strings.
Parameters
----------
model : estimator
An sklearn linear_model object
features : list of str
The feature names, in order.
Returns
-------
list of str
The weights associated with each feature.
"""
try:
weights = model.coef_
intercept = model.intercept_
assert weights.shape[0] == 1
assert weights.shape[1] == len(features)
assert len(intercept) == 1
weights = list(weights.flatten())
except (AttributeError, AssertionError):
return None
col_width = max([len(f) for f in features]) + 2
txt_out = ["Feature" + " " * (col_width - 7) + "Weight"]
for weight, feature in zip(weights, features):
space = " " * (col_width - len(feature))
txt_out.append(feature + space + str(weight))
txt_out.append("intercept" + " " * (col_width - 9) + str(intercept[0]))
return txt_out
|
f26947922505cb3c06f1421238fdcde11064a686
| 30,705 |
import socket
def is_port_available(port: int, udp: bool = False) -> bool:
"""Checks whether a specified port is available to be attached to.
From `podman_compose <https://github.com/containers/podman-compose/blob/devel/podman_compose.py>`_.
Args:
port (int): The port to check.
udp (bool): Also check udp
Returns:
bool: True if available, False otherwise.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
in_use = s.connect_ex(('localhost', int(port))) == 0
if udp:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
in_use = s.connect_ex(('localhost', int(port))) == 0 or in_use
return not in_use
|
a963ad45477fc43bca1a356c6b76f8995f7df60b
| 30,706 |
def remove_character_at(str, idx):
"""Removes the character from str at index idx, returning the remaining string
str, int -> str
>>> remove_character_at("boats", 2)
'bots'
"""
return str[:idx] + str[idx+1:]
|
abc7bedb33c5c9e024dd8cf5830f3b3ee8b08f42
| 30,708 |
def get_scope(field):
"""For a single field get the scope variable
Return a tuple with name:scope pairs"""
name = field['name']
if 'scope' in field['field']:
scope = field['field']['scope']
else:
scope = ''
return (name, scope)
|
1b931ec1a7c5a629fe6b39034c23fd02568ed5a7
| 30,724 |
import torch
def quantile_features(density, q_vals):
"""
Input
- density: tensor of shape [n_samples]
- q_vals: list of numbers between 0 and 1 with the quantiles to use
Output
- quartile_sigs: tensor of shape [len(q_vals)]
"""
q_vals = torch.tensor(q_vals, dtype=density.dtype)
quantiles = torch.quantile(density, q_vals)
return quantiles
|
f720125b43250403a6164b7a94bd97f25cfea422
| 30,727 |
def And(s1, s2):
""" And(s1, s2) returns a new selector that selects a node only if BOTH
s1, and s2 select the node."""
return lambda x: s1(x) and s2(x)
|
2de67c6b7109bf6b12c187f85bf8ca4483289156
| 30,728 |
def get_cell(caves, row, col):
"""Get (row, col) cell in caves."""
return caves[row][col]
|
743068c3be8e0e60b56cc8f0c9c99a0cea07e4c2
| 30,731 |
def namebunch(abunch, aname):
"""give the bunch object a name, if it has a Name field"""
if abunch.Name == None:
pass
else:
abunch.Name = aname
return abunch
|
d3a32d578ef604760d1f5adb009c96de519f0ec3
| 30,737 |
def convert_bytes_to_bits(byte_value):
""" Convert input bytes to bits """
return byte_value * 8
|
e6cda98e84b133dc48a19ebc3e98e79bd577bf47
| 30,740 |
import random
def _drawRandom(nbToDraw, maxValue, exclusion=None):
"""Draws random numbers from 0 to maxValue.
Args:
nbToDraw (int): number of numbers to draw
maxValue (int): max value for the numbers to draw
exclusion (set): numbers to exclude
"""
numbers = set()
while len(numbers) < nbToDraw:
choice = random.randrange(0, maxValue)
if not exclusion or choice not in exclusion:
numbers.add(choice)
return numbers
|
24e44dc52cce7722bb1074b747457fc160912664
| 30,753 |
def get_bound(atom, bound=None):
"""
Return appropriate `bound` parameter.
"""
if bound is None:
bound = atom.bound
if bound is None:
raise ValueError('either atom must be in bound '
+ 'mode or a keyword "bound" '
+ 'argument must be supplied')
return bound
|
f9223945011fbc056db170a943cf33fb09662920
| 30,761 |
import math
def solve_tangent_angle(distance, radius):
"""
Helper function to calculate the angle between the
centre of a circle and the tangent point, as seen from
a point a certain distance from the circle.
:Parameters:
distance: float
Distance of point from centre of circle.
radius: float
Radius of circle
:Returns:
tangent_angle: float
The tangent angle in radians, or None if there is
no solution.
"""
sinangle = float(radius) / float(distance)
if abs(sinangle) <= 1.0:
angle = math.asin(sinangle)
else:
angle = None
return angle
|
6db4a340f50d0fd426dbae3e2248624cc3c50563
| 30,766 |
import uuid
def _obtain_signed_blob_storage_urls(self, workspace_id, id_count=1, blob_path=None):
"""Obtain a signed blob storage url.
Returns:
[dict]: blob storage urls
[dict]: blob storage ids
"""
blob_url = f'{self.HOME}/{self.API_1}/project/{workspace_id}/signed_blob_url'
if blob_path:
id_set = {"ids": [f'{blob_path}/{str(uuid.uuid4())}' for i in range(id_count)]}
else:
id_set = {"ids": [str(uuid.uuid4()) for i in range(id_count)]}
response = self._auth_post(blob_url, body=None, json=id_set, return_response=True)
data = response.json()
urls = data
return urls, id_set
|
e6fa3e492930162ff7963ce0a8aedc2d91bd3583
| 30,773 |
import re
def gettime_s(text):
"""
Parse text and return a time in seconds.
The text is of the format 0h : 0.min:0.0s:0 ms:0us:0 ns.
Spaces are not taken into account and any of the specifiers can be ignored.
"""
pattern = r'([+-]?\d+\.?\d*) ?([mμnsinh]+)'
matches = re.findall(pattern, text)
if len(matches) == 0:
return None
time = 0.
for res in matches:
tmp = float(res[0])
if res[1] == 'ns':
tmp *= 1e-9
elif res[1] == u'\u03BCs':
tmp *= 1e-6
elif res[1] == 'ms':
tmp *= 1e-3
elif res[1] == 'min':
tmp *= 60
elif res[1] == 'h':
tmp *= 3600
time += tmp
return time
|
49f315b9f92dc04eea450f7d8b93a7f9bd08da14
| 30,774 |
def convert_openlayers_roi_to_numpy_image_roi(roi: list, image_height: int) -> list:
"""In both openlayers and numpy, the same roi format applies
Args:
roi (list): roi in format [x, y, width, height]
image_height (int): height of the original image from which the roi is cropped
Returns:
list: [description]
"""
[x, y, width, height] = roi
return [x, image_height - y - height, width, height]
|
6fe3247b0b1dcc7a9f9da23cbde1e42d71199d88
| 30,775 |
def _adjust_map_extent(extent, relocate=True, scale_ratio=1):
"""
Adjust the extent (left, right, bottom, top) to a new staring point and
new unit. extent values will be divided by the scale_ratio
Example:
if scale_ratio = 1000, and the original extent unit is meter, then the
unit is converted to km, and the extent is divided by 1000
"""
if relocate:
left = 0
right = (extent[1]-extent[0])/scale_ratio
bottom = 0
top = (extent[3]-extent[2])/scale_ratio
else:
left = extent[0]/scale_ratio
right = extent[1]/scale_ratio
bottom = extent[2]/scale_ratio
top = extent[3]/scale_ratio
return (left, right, bottom, top)
|
ee1d6c4195daab7cc8473b05f334357d25b5b7b5
| 30,776 |
def fbexp(db, dp, rhog, rhos, umf, us):
"""
Bed expansion factor for calculating expanded bed height of a bubbling
fluidized bed reactor. See equations 14.7 and 14.8 in Souza-Santos [1]_.
Parameters
----------
db : float
Diameter of the bed [m]
dp : float
Diameter of the bed particle [m]
rhog : float
Density of gas [kg/m^3]
rhos : float
Density of bed particle [kg/m^3]
umf : float
Minimum fluidization velocity [m/s]
us : float
Superficial gas velocity [m/s]
Returns
-------
fbx : float
Bed expansion factor [-]
Example
-------
>>> umf = 0.1157
... us = 3.0*umf
... fbexp(0.05232, 0.0004, 0.4413, 2500, 0.1157, us)
1.4864
References
----------
.. [1] Marcio de Souza-Santos. Solid Fuels Combustion and Gasification:
Modeling, Simulation, and Equipment Operations. CRC Press, Taylor and
Francis Group, 2nd edition, 2010.
"""
if db < 0.0635:
# diameter of bed as db < 0.0635 m from Eq 14.7
tm1 = 1.032 * ((us - umf)**0.57) * (rhog**0.083)
tm2 = (rhos**0.166) * (umf**0.063) * (db**0.445)
fbx = 1 + (tm1 / tm2)
else:
# diameter of bed as db >= 0.0635 m from Eq 14.8
tm1 = 14.314 * ((us - umf)**0.738) * (dp**1.006) * (rhos**0.376)
tm2 = (rhog**0.126) * (umf**0.937)
fbx = 1 + (tm1 / tm2)
return fbx
|
c78b94639f1d6835ee490636e85d49f04b09ebe1
| 30,780 |
from typing import List
def write_floats_10e(vals: List[float]) -> List[str]:
"""writes a series of Nastran formatted 10.3 floats"""
vals2 = []
for v in vals:
v2 = '%10.3E' % v
if v2 in (' 0.000E+00', '-0.000E+00'):
v2 = ' 0.0'
vals2.append(v2)
return vals2
|
7e2f9b1a9e4560d3d9194c18601d22a57ed0811e
| 30,782 |
def dup_integrate(f, m, K):
"""
Computes the indefinite integral of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_integrate(x**2 + 2*x, 1)
1/3*x**3 + x**2
>>> R.dup_integrate(x**2 + 2*x, 2)
1/12*x**4 + 1/3*x**3
"""
if m <= 0 or not f:
return f
g = [K.zero]*m
for i, c in enumerate(reversed(f)):
n = i + 1
for j in range(1, m):
n *= i + j + 1
g.insert(0, K.exquo(c, K(n)))
return g
|
0f1981d699c4c80b61d4f0aececa1ccc4601712b
| 30,783 |
from typing import Dict
import torch
def inputs_to_cuda(inputs: Dict[str, torch.Tensor]):
"""
Move tensors in the inputs to cuda.
Args:
inputs (dict[str, torch.Tensor]): Inputs dict
Returns:
dict[str, torch.Tensor]: Moved inputs dict
"""
if not torch.cuda.is_available():
return inputs
for key, value in inputs.items():
if isinstance(value, torch.Tensor):
inputs[key] = value.cuda()
return inputs
|
1c67e915463ea04b2df03f3697a2eb83dedb07a2
| 30,784 |
def pruneNullRows(df):
"""
Removes rows that are all nulls.
:param pd.DataFrame df:
This is done in place to avoid storage problems with large dataframes.
:return pd.DataFrame:
"""
return df.dropna(axis=0, how='all')
|
af0a34bed71f937d6ff970f521d5f82720fffdc9
| 30,785 |
def load_ed25519_vectors(vector_data):
"""
djb's ed25519 vectors are structured as a colon delimited array:
0: secret key (32 bytes) + public key (32 bytes)
1: public key (32 bytes)
2: message (0+ bytes)
3: signature + message (64+ bytes)
"""
data = []
for line in vector_data:
secret_key, public_key, message, signature, _ = line.split(':')
secret_key = secret_key[0:64]
signature = signature[0:128]
data.append({
"secret_key": secret_key,
"public_key": public_key,
"message": message,
"signature": signature
})
return data
|
618ea06c408d131664bbfe0b4350fee5e6a3edd0
| 30,786 |
from bs4 import BeautifulSoup
def parse_html(html: str) -> BeautifulSoup:
"""Parse the HTML with Beautiful Soup"""
return BeautifulSoup(html, features="html.parser")
|
8e10667747f24b9f9790b2b512bc9d5635ec7cd9
| 30,787 |
import hashlib
def convert_email(email):
""" MD5 hash the email address """
email = email.strip().encode('utf-8').lower()
return hashlib.md5(email).hexdigest()
|
a556147ffb9111b6001c4d76f6cd82c3442e115e
| 30,788 |
def get_n_lines(fin: str, size: int = 65536) -> int:
"""Given a filename, return how many lines (i.e. line endings) it has.
:param fin: input file
:param size: size in bytes to use as chunks
:return: number of lines (i.e. line endings) that `fin` has
"""
# borrowed from https://stackoverflow.com/a/9631635/1150683
def blocks(fh):
while True:
b = fh.read(size)
if not b:
break
yield b
with open(str(fin), encoding="utf-8") as fhin:
return sum([bl.count("\n") for bl in blocks(fhin)])
|
0259c71681a9779e3df311ff03010262ded8f058
| 30,790 |
def auth_token(pytestconfig):
"""Get API token from command line"""
return pytestconfig.getoption("token")
|
419a0e617f242ac9b657b7b397e8b06e447a7efe
| 30,796 |
def conv_params(sz_in: int, sz_out: int):
"""Solves for filter_size, padding and stride per the following equation,
sz_out = (sz_in - filter_size + 2*padding) / stride + 1
Attempts to find a solution by iterating over various filter_size, stride and padding
in that order. If no solution is found, raises an error
"""
filter_size, stride, padding = [3,2,4,5], [2,1,3], [1,2,3]
for f in filter_size:
for s in stride:
for p in padding:
if ((sz_in - f + 2*p) / s + 1) == sz_out:
return (f, s, p)
raise Exception("Unable to find valid parameters for {0} to {1} convolution".format(sz_in, sz_out))
|
86c1a2437231d2fb6515a6581719d3568cdee813
| 30,799 |
def get_model_io_names(model):
"""Gets names of the input and output nodes of the model
Args:
model (keras Model): model to parse
Returns:
inputs (list): names of all the input nodes
outputs (list): names of all the output nodes
"""
num_inputs = len(model.inputs)
num_outputs = len(model.outputs)
inputs = []
outputs = []
for i in range(num_inputs):
nm = model.inputs[i].name.split(':')[0].split('/')[0]
inputs.append(nm)
for i in range(num_outputs):
nm = model.outputs[i].name.split(':')[0].split('/')[0]
outputs.append(nm)
return inputs, outputs
|
b8bc93bd2bf01597b16eaee5bc0f1a210e185dbe
| 30,800 |
def get_value(input_data, field_name, required=False):
"""
Return an unencoded value from an MMTF data structure.
:param input_data:
:param field_name:
:param required:
:return:
"""
if field_name in input_data:
return input_data[field_name]
elif required:
raise Exception('ERROR: Invalid MMTF File, field: {} is missing!'.format(field_name))
else:
return None
|
3e4ec623528f279a61b5ad9897935a5fda8af2d1
| 30,806 |
def seconds_to_string(seconds):
"""
Format a time given in seconds to a string HH:MM:SS. Used for the
'leg time/cum. time' columns of the table view.
"""
hours, seconds = divmod(int(seconds), 3600)
minutes, seconds = divmod(seconds, 60)
return f"{hours:02d}:{minutes:02d}:{seconds:02d}"
|
23db1370e887a9dad3d6dbd40bc2f25c244f1f77
| 30,807 |
def get_min_max(arr):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
arr(list): list of integers containing one or more integers
Returns:
(int, int): A tuple of min and max numbers.
"""
if len(arr) == 0:
return None, None
min_number = max_number = arr[0]
for number in arr:
if number < min_number:
min_number = number
if number > max_number:
max_number = number
return min_number, max_number
|
d7cd2304092c766bfd0ffcb2235e7ad0c6428e61
| 30,814 |
def size_for(s):
"""
This function takes a string representing an amount of bytes and converts
it into the int corresponding to that many bytes. The string can be a plain
int which gets directly converted to that number of bytes or can end in a specifier
such as 100k. This indicates it is 100 kilobytes and so is translated into 100000
for example
size_for('1000') == 1000
size_for('10m') == 10000000
size_for('1000k') == size_for('1m')
etc.
The valid specifiers are
k = kilo (1000)
m = mega (1000000)
g = giga (1000000000)
"""
s = s.strip()
try:
return int(s)
except ValueError:
pass
d = s[-1]
v = s[:-1]
m = 1
if d == 'k':
m = 1000
elif d == 'm':
m = 1000000
elif d == 'g':
m = 1000000000
return int(v) * m
|
3e88f0555f0ab1b06432d87c5ebca7f33b24d1c7
| 30,816 |
def _is_whitenoise_installed() -> bool:
"""
Helper function to check if `whitenoise` is installed.
"""
try:
return True
except ModuleNotFoundError:
pass
return False
|
3732d32de4fae1d9f65baeb481c9eb6a6dcdd7bd
| 30,820 |
def sanitize_version_number(version):
"""Removes common non-numerical characters from version numbers obtained from git tags, such as '_rc', etc."""
if version.startswith('.'):
version = '-1' + version
version = version.replace('_rc', '.')
return version
|
4627ce6ad06046b575da3a272e8d8acc41183000
| 30,822 |
def get_cells(worksheet, get_range: str):
"""
Get cells from sheet
params
------
workbook: openpyxl.WorkSheet
loaded worksheet.
get_range: str
Get cells range.
Ex: "A1:B3"
return
------
cells: Tuple[Cell]
Got cells tuple
"""
cells = worksheet[get_range]
return cells
|
179c20419975daac5913b149efb60b4cc22537d9
| 30,825 |
import torch
def ent_loss(probs):
"""Entropy loss"""
ent = -probs * torch.log(probs + 1e-8)
return ent.mean()
|
4ccd777d3b434b3d1c79f36c735cf6252d749587
| 30,827 |
def IPRange(first, last):
"""
Generate a list of IP addresses
Args:
first: the first IP in the range
last: the last IP in the range
Returns:
A list of IPs from first to last, inclusive (list of str)
"""
all_ips = []
ip = first
while ip <= last:
all_ips.append(str(ip))
ip += 1
return all_ips
|
16bd7302b02e0b15b85edb8a60bfc7749744b3fe
| 30,828 |
from datetime import datetime
def python_type_to_sql_type(_python_type):
"""
Convert a python data type to ab SQL type.
:param _python_type: A Python internal type
"""
if _python_type == str:
return 'string'
elif _python_type == bytes:
return "blob"
elif _python_type == float:
return "float"
elif _python_type == int:
return "integer"
elif _python_type == datetime:
return "datetime"
elif _python_type == bool:
return "boolean"
else:
raise Exception("python_type_to_sql_type: _type_code \"" + str(_python_type) + "\"not supported")
|
d74c0a8e8b1ef2340e1fc1decddcd60aba718570
| 30,832 |
import re
def string_to_list(s):
"""Return a list of strings from s where items are separated by any of , ; |"""
try:
return [text for text in re.split(r'\s*[,;\|]\s*', s) if text]
except TypeError:
if type(s) == list:
return s
raise
|
4e679bfaf0d51120a2194a4db173d34a9eaf47d0
| 30,834 |
def standardise_name(name):
"""
Standardise field names: Survey (Title) -> survery_title
"""
result = name.lower().replace(" ", "_").replace("(", "").replace(")", "")
# remove any starting and ending "_" that have been inserted
start_loc = 1 if result[0] == "_" else 0
loc = result.rfind("_")
end_loc = loc if loc == (len(result) - 1) else len(result)
return result[start_loc:end_loc]
|
af9c5b52c1c7fc86ea758cb29dabb2f6405bb16e
| 30,836 |
from typing import Iterable
def check_all_dicts(iterable_dict: Iterable[dict]):
"""Check if Iterable contains all dictionaries
Args:
iterable_dict (Iterable[dict]): Iterable of dictionaries
"""
# Check if dict
def check_dict(d):
return isinstance(d, dict)
# Check if all instances are type dict, return True or False
all_dict = all(map(check_dict, iterable_dict))
# print(all_dict)
if not all_dict:
raise BaseException("Iterable has mixed types, expected Iterable[dictionaries]")
return True
|
0e87989d600d303e9bdadf04725c398841bcd214
| 30,839 |
def _get_timezone_name(timezone):
"""
Return the offset for fixed offset timezones, or the name of timezone if
not set.
"""
return timezone.tzname(None) or str(timezone)
|
4cb02cdf53269b328c727eaa11c3d16acd99e3bb
| 30,841 |
def trailing_silence_mask(f0):
"""
>>> f0 = torch.tensor([1.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0])
>>> trailing_silence_mask(f0)
tensor([False, False, False, False, True, True, True])
"""
assert f0.ndim == 1
mask = ((f0.flip(0) != 0.0).cumsum(0) == 0).flip(0)
return mask
|
03c76e96a94d9c80ca9ab38e5ce735bc161d1929
| 30,847 |
def df_if_two_one(value):
""" Final Data Cleaning Function
- This is run against station, latitude, longitude, and elevation for indidividual records
- Many of these records have usable data, so don't want to just throw them out.
- Example issues:
- Instead of a station of '000248532' a value may contain '000248532 000248532'
- Both are equal - function returns the first one
- Instead of a latitude of '29.583' a value may contain '29.583 29.58333333'
- This is from raw csv data files where they changed the number of decimal points userd
part of the way through a year.
- Function converts both to integers, which rounds up to the nearest whole number. If both
whole numbers are equal, then the function returns the first value from the original pair.
- exception handler:
- ValueError -> str of '': Looks like some empty strings for latitude and/or longitude slipped
through data cleaning. This handles those.
Args:
value (str): value to check and clean if needed
Returns: str
"""
try:
split = value.split(' ')
if len(split) > 1:
if '.' in split[0]:
if int(float(split[0])) == int(float(split[1])):
return split[0]
elif split[0] == split[1]:
return split[0]
return value
except ValueError as e:
if "could not convert string to float: ''" not in str(e):
raise ValueError(e)
return value
|
c417c683e2cedb37b2c557a78e358112f060edfe
| 30,848 |
import hashlib
def hex_hash(path):
"""
Return the first 2 hex digits of the md5 of the given path.
Suitable for creating sub dirs to break up a large directory
"""
return hashlib.md5(path).hexdigest()[:2]
|
b3629cd8034e1944cdb3998592d1caca96deacb9
| 30,851 |
def get_manhattan_distance(node):
"""Function to calculate the manhattan distance for a
particular configuration
Parameters
----------
node : [list]
[list to check for the heuristics]
Return
------
[int]
[returns the heuristic distance for a particular node]
"""
h_score = 0
node = list(node)
for i in range(9):
h_score += abs( node[i]/3 - (i%3) ) + abs( node[i] % 3 - (i/3) )
return h_score
|
99d2b8828babf09509984289bf460914aa0eac69
| 30,854 |
def within_date(date_min, date_max, current_date):
"""
Test if a provided date is greater than or equal to a min date or less than max date
"""
if date_min <= current_date < date_max:
return True
else:
return False
|
44d96e463b97fa9ca82e34b0c2bed3694959b525
| 30,857 |
def find_episode(episode_id, seasons):
"""
Return metadata for a specific episode from within a nested
metadata dict.
Returns an empty dict if the episode could not be found.
"""
for season in seasons:
for episode in season['episodes']:
if str(episode['id']) == episode_id:
return episode
return {}
|
64255ca8e330c3b45768704644ac8bfddbfc1416
| 30,862 |
def child_or_children(value):
""" Return num followed by 'child' or 'children' as appropriate """
try:
value = int(value)
except ValueError:
return ''
if value == 1:
return '1 child'
return '%d children'
|
a22be46a3fd1086dac116c187189204b5ea1a6db
| 30,864 |
def _decoding_base_info(encoded_info):
"""
Decode base info
Args:
encoded_info(list or dict): encoded base info
"""
if isinstance(encoded_info, dict):
return encoded_info
base_info = dict()
for item in encoded_info:
base_info[item['symbol']] = item['base']
return base_info
|
d47e7940af8dc1f42168d5630d95345a6111c865
| 30,868 |
def is_table_taxa_alike(feature_table1, feature_table2):
"""This method checks if `feature_table2` instance contains same taxonomy
as `feature_table1`
Parameters
----------
feature_table1
First FeatureTable
feature_table2
Second FeatureTable
Returns
-------
bool
True if taxonomies are same. False otherwise
"""
feature_table1_lineage_sorted = (
feature_table1.taxonomy.loc[:, "lineage"]
.sort_values(axis=0)
.reset_index(drop=True)
)
feature_table2_lineage_sorted = (
feature_table2.taxonomy.loc[:, "lineage"]
.sort_values(axis=0)
.reset_index(drop=True)
)
return feature_table1_lineage_sorted.equals(feature_table2_lineage_sorted)
|
e4fef557c168c885917d8183f3d0f0ab3969abb6
| 30,873 |
def _gen_eval_kwargs(name):
"""
Find the keyword arguments to pass to numexpr for the given operation.
Parameters
----------
name : str
Returns
-------
eval_kwargs : dict
Examples
--------
>>> _gen_eval_kwargs("__add__")
{}
>>> _gen_eval_kwargs("rtruediv")
{"reversed": True, "truediv": True}
"""
kwargs = {}
# Series and Panel appear to only pass __add__, __radd__, ...
# but DataFrame gets both these dunder names _and_ non-dunder names
# add, radd, ...
name = name.replace('__', '')
if name.startswith('r'):
if name not in ['radd', 'rand', 'ror', 'rxor']:
# Exclude commutative operations
kwargs['reversed'] = True
if name in ['truediv', 'rtruediv']:
kwargs['truediv'] = True
if name in ['ne']:
kwargs['masker'] = True
return kwargs
|
17fc51c954ada4170a6fcfa68dda4018faa71cac
| 30,876 |
def GetNiceArgs(level: int):
"""Returns the command/arguments to set the `nice` level of a new process.
Args:
level: The nice level to set (-20 <= `level` <= 19).
"""
if level < -20 or level > 19:
raise ValueError(
f"The level must be >= -20 and <= 19. The level specified is {level}.")
return ["nice", "-n", str(level)]
|
6805178232e96caea19035b4286d7d9dddff8a88
| 30,877 |
from datetime import datetime
import pytz
def utc_to_unix(t):
""" UTC Y-M-D -> UTC unix time (ignore float second point)
t = "2000-01-01T00:00:00.111" """
t = t.split('.')[0]
dt = datetime.strptime(t, '%Y-%m-%dT%H:%M:%S')
tz = pytz.timezone('UTC')
dt = tz.localize(dt)
unix_time = int(dt.timestamp())
return unix_time
|
7f870d05bb3382923a2f9485194c3435673e4b77
| 30,878 |
def get_involved_objects(config):
"""Given a pytest config, get the list of objects specified via the
--involving flag"""
return config.getoption("--involving") or []
|
8da5599eb30bcd1a4960eefa8ed235b989badff2
| 30,880 |
def tmpdirec(tmp_path_factory):
"""Pytest fixture instantiating a new session-scope "data" folder.
Parameters
----------
tmpdir_factory :
Pytest fixture for creating temporary directories.
"""
return tmp_path_factory.mktemp("data")
|
870e81aa93a95e9ce28be1c4a902f213ca13c626
| 30,884 |
def safe_subpath(path, altitudes, h):
"""
Computes the maximum subpath of path along which the safety constraint is
not violated
Parameters
----------
path: np.array
Contains the nodes that are visited along the path
altitudes: np.array
1-d vector with altitudes for each node
h: float
Safety threshold
Returns
-------
subpath: np.array
Maximum subpath of path that fulfills the safety constraint
"""
# Initialize subpath
subpath = [path[0]]
# Loop through path
for j in range(len(path) - 1):
prev = path[j]
succ = path[j + 1]
# Check safety constraint
if altitudes[prev] - altitudes[succ] >= h:
subpath = subpath + [succ]
else:
break
return subpath
|
179fc42254a76ef4247140d7292d547c6b2681b6
| 30,889 |
def toggle_active_links(pathname):
"""Toggles active menu links based on url pathname
Args:
pathname (str): Url pathname
Returns:
bool: Active state for each page
"""
if pathname in ["/datyy/", "/datyy/summary"]:
return True, False, False, False, False
if pathname == "/datyy/orders":
return False, True, False, False, False
if pathname == "/datyy/products":
return False, False, True, False, False
if pathname == "/datyy/projects":
return False, False, False, True, False
if pathname == "/datyy/support":
return False, False, False, False, True
return False, True, False, False, False
|
9d362d2a3d57d16c9163a4b09cabdd730f6ebb5a
| 30,894 |
def run_one_day(fish: list[int], start_time: int = 6, new_time: int = 8):
"""Runs one day of lanternfish reproducing."""
fishes = fish.copy()
for i, f in enumerate(fish):
if f == 0:
fishes[i] = start_time
fishes.append(new_time)
else:
fishes[i] = f - 1
return fishes
|
ca4b1e533ba1604eaa688f4d7996ceafad3a7ed4
| 30,903 |
def sex2dec(rain,decin):
"""
Converts sexagesimal coordinates to decimal. HMS and DMS separated by colons (:)
Parameters
----------
rain : str
input Right Ascension as a sexagesimal string -- e.g., '03:45:6789'
decin : str
input Declination as a sexagesimal string -- e.g., '-12:34:5678998765'
Returns
-------
list
[12.345678, -10.987654]
"""
if ':' in rain: ra=[float(val)*360./24 for val in rain.split(':')]
else: ra=[float(val)*360./24 for val in rain.split(' ')]
raout=ra[0]+ra[1]/60.+ra[2]/3600.
if ':' in decin: dec=[float(val) for val in decin.split(':')]
else: dec=[float(val) for val in decin.split(' ')]
if dec[0]<0: decout=dec[0]-dec[1]/60.-dec[2]/3600.
else: decout=dec[0]+dec[1]/60.+dec[2]/3600.
return [raout,decout]
|
82a4fa431e483f59ed0fef0acd403714d18806e0
| 30,912 |
def ttfAutohintDict( parameterValue ):
"""Returns a dict for a TTFAutohint parameter value."""
ttfAutohintDict = {}
for ttfAutohintOption in parameterValue.split("--"):
if "=" in ttfAutohintOption:
[key, value] = ttfAutohintOption.split("=")
value = value.strip()
else:
key = ttfAutohintOption
value = None
if key:
ttfAutohintDict[key.strip(" -")] = value
return ttfAutohintDict
|
aa9839f64c9eefb1404238c8689e4826d8e525fd
| 30,913 |
def _fmt_date(date_as_bytes):
"""Format mail header Date for humans."""
date_as_string = date_as_bytes.decode()
_month = {
'Jan': 1,
'Feb': 2,
'Mar': 3,
'Apr': 4,
'May': 5,
'Jun': 6,
'Jul': 7,
'Aug': 8,
'Sep': 9,
'Oct': 10,
'Nov': 11,
'Dec': 12,
}
date_list = date_as_string.split(',')
# week = date_list[0].replace(' ', '')
date_list = date_list[1].split(' ')
date_list = list(filter(lambda x: x != '', date_list))
day = date_list[0]
month = _month[date_list[1]]
year = date_list[2]
times = date_list[3]
time_zone = date_list[4]
return '{}-{}-{} {} {}'.format(year, month, day, times, time_zone)
|
e1e273eb22d60ca945ce9b065f6c4b8cf62cf82e
| 30,914 |
from typing import Any
def parse_error(err: Any, raw: bool = True) -> dict:
"""
Parse single error object (such as pydantic-based or fastapi-based) to dict
:param err: Error object
:param raw: Whether this is a raw error or wrapped pydantic error
:return: dict with name of the field (or "__all__") and actual message
"""
message = err.msg or ""
if not raw:
if len(err.loc) == 2:
if str(err.loc[0]) == "body":
name = err.loc[1]
else:
name = err.loc[0]
elif len(err.loc) == 1:
if str(err.loc[0]) == "body":
name = "__all__"
else:
name = str(err.loc[0])
else:
name = "__all__"
else:
if len(err.loc) == 2:
name = str(err.loc[0])
message = f"{str(err.loc[1]).lower()}: {message}"
elif len(err.loc) == 1:
name = str(err.loc[0])
else:
name = "__all__"
return {"name": name, "message": message.capitalize()}
|
73bb041e3d6e2259cf390d42485a9e9b7e77abba
| 30,917 |
from datetime import datetime
import pytz
def from_timestamp(timestamp):
"""
Transform a UNIX UTC timestamp to a Python datetime object.
"""
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp, tz=pytz.UTC)
|
85bf1f0c5d4fb8395e86acce7a322885ec565e16
| 30,920 |
def rollout(env, maxsteps=100):
""" Random policy for rollouts """
G = 0
for i in range(maxsteps):
action = env.action_space.sample()
_, reward, terminal, _ = env.step(action)
G += reward
if terminal:
return G
return G
|
cef1043e82e048999f89e1ca6ed6011a62b83aaa
| 30,921 |
from typing import Tuple
from typing import List
def read_fastq(filename: str) -> Tuple[List[str], List[str]]:
"""
Reads sequences and qualities from a .fastq file
filename: relative or absolute path of the .fa file to be read from
Returns:
List of sequence reads
List of qualities corresponding to each sequence read
"""
reads = []
qualities = []
with open(filename, "r") as f:
while True:
f.readline()
read = f.readline().rstrip()
f.readline()
seq_qualities = f.readline().rstrip()
if len(read) == 0:
break
reads.append(read)
qualities.append(seq_qualities)
return reads, qualities
|
cd4ffc29b2cd7b76b256c82e7ed438939e5c6ec4
| 30,925 |
def enable_runtime_call_stats() -> dict:
"""Enable run time call stats collection.
**Experimental**
"""
return {"method": "Profiler.enableRuntimeCallStats", "params": {}}
|
e9af8c51a8ab8e2c10f0023bebecd8703ce09b08
| 30,926 |
def create_bed_info_gp(gp):
"""Creates the block_starts, block_sizes and exon_frames fields from a GenePredTranscript object"""
block_starts = ','.join(map(str, gp.block_starts))
block_sizes = ','.join(map(str, gp.block_sizes))
exon_frames = ','.join(map(str, gp.exon_frames))
return block_starts, block_sizes, exon_frames
|
260ecdef20f4ec25e873b978e644e5d90755774e
| 30,929 |
def char_ngrams(n, word, **kwargs):
"""This function extracts character ngrams for the given word
Args:
n (int): Max size of n-gram to extract
word (str): The word to be extract n-grams from
Returns:
list: A list of character n-grams for the given word
"""
del kwargs
char_grams = []
for i in range(len(word)):
# if char ngram of length n doesn't exist, if no ngrams have been extracted for the token,
# add token to the list and return. No need to compute for other windows.
# Ex: token is "you", n=4, return ["you"], token is "doing", n=4 return ["doin","oing"]
if len(word[i : i + n]) < n:
if not char_grams:
char_grams.append((word[i : i + n]))
return char_grams
char_grams.append((word[i : i + n]))
return char_grams
|
27d46d014198e7290d98bfc8e31aa24f74454b48
| 30,930 |
import copy
def redact_loc(image_meta, copy_dict=True):
"""
Create a shallow copy of image meta with 'location' removed
for security (as it can contain credentials).
"""
if copy_dict:
new_image_meta = copy.copy(image_meta)
else:
new_image_meta = image_meta
new_image_meta.pop('location', None)
new_image_meta.pop('location_data', None)
return new_image_meta
|
f34e0577510c6cc05b1e36e02a48d9be2722c777
| 30,934 |
from typing import OrderedDict
def sort_request(request):
"""
Sort a JSON-RPC request dict.
This has no effect other than making the request nicer to read.
>>> json.dumps(sort_request(
... {'id': 2, 'params': [2, 3], 'method': 'add', 'jsonrpc': '2.0'}))
'{"jsonrpc": "2.0", "method": "add", "params": [2, 3], "id": 2}'
Args:
request: JSON-RPC request in dict format.
"""
sort_order = ["jsonrpc", "method", "params", "id", "session", "verbose"]
return OrderedDict(sorted(request.items(), key=lambda k: sort_order.index(k[0])))
|
0602f0e65845d942f39db0cd1dac18923e00d0b4
| 30,935 |
def get_mf6_mshape(disfile):
"""Return the shape of the MODFLOW 6 model.
Parameters
----------
disfile : str
path to a MODFLOW 6 discretization file
Returns
-------
mshape : tuple
tuple with the shape of the MODFLOW 6 model.
"""
with open(disfile, "r") as f:
lines = f.readlines()
d = {}
for line in lines:
# Skip over blank and commented lines
ll = line.strip().split()
if len(ll) < 2:
continue
if line.strip()[0] in ["#", "!"]:
continue
for key in ["NODES", "NCPL", "NLAY", "NROW", "NCOL"]:
if ll[0].upper() in key:
d[key] = int(ll[1])
if "NODES" in d:
mshape = (d["NODES"],)
elif "NCPL" in d:
mshape = (d["NLAY"], d["NCPL"])
elif "NLAY" in d:
mshape = (d["NLAY"], d["NROW"], d["NCOL"])
else:
print(d)
raise Exception("Could not determine model shape")
return mshape
|
32f25a2a8a49737296bf3f5c4d6c8bc2768e935a
| 30,938 |
def para_size_greater_than_n(para_list, n = 1):
"""
Returns paragraphs whose length are greater than n
:param para_list: a list of paragraphs
:param n: paragraphs having length >n are selected
:return: list of paragraphs having length >n
"""
if n > 0:
return [para for para in para_list if len(para)>n]
|
fb8b2a43f43b70821ae1a9be21fad39440ce75dd
| 30,943 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.