content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import requests
def does_exist(url: str) -> bool:
"""Determines if a particular file exists on the server."""
resp = requests.head(url)
if resp.status_code == requests.codes.ok:
return True
return False
|
b506d654e5a89a1a35911e8c8089bcc442169e6b
| 35,830 |
from typing import OrderedDict
def build_fhir_id(key1, value1, key2, value2, key3, value3):
"""
Construct an OrderedDict for ID
:param key1:
:param value1:
:param key2:
:param value2:
:param key3:
:param value3:
:return:
"""
id_info = OrderedDict()
id_info[key1] = value1
id_info[key2] = value2
id_info[key3] = value3
return id_info
|
f2db277f21683b3ce910b5398b90e607f9cf6a40
| 35,831 |
def is_basic_type(signature):
"""Returns True if the signature is a basic type
'a', '(', '{', and 'v' are not considered basic types because they usually
cannot be handled the same as other types."""
basic_types = ('b','d', 'g', 'i','n','o','q','s','t','u','x','y')
return signature in basic_types
|
5a454a699e6e7c0f89806f3cdba12ce78e42477e
| 35,838 |
import re
def correct_namespace(name, api_name, env_name) -> bool:
"""
Checks that a name of a thing we want to create in Apigee matches our namespacing conventions.
e.g. for api_name="canary-api" and env_name="internal-dev"
|--------------------------------------------------------------+--------|
| name | result |
|--------------------------------------------------------------+--------|
| "canary-api-internal-dev" | True |
| "canary-api-extra-thing-internal-dev" | True |
| "canary-apiinternal-dev" | False |
| "canary-api-internal-dev-application-restricted" | True |
| "canary-api-extra-thing-internal-dev-application-restricted" | True |
|--------------------------------------------------------------+--------|
:param name: Name of thing in Apigee.
:param api_name: The meta.api.name item from your manifest
:param env_name: The environment name (e.g. 'internal-dev', 'int', or 'prod')
"""
regex = f"^{api_name}(-[a-z]+)*-{env_name}(-[a-z]+)*$"
return bool(re.match(regex, name))
|
8e812a5e2729779837b85eed29ff9bb7a4a05953
| 35,841 |
def confusion_matrix (dataset,subgroup,targetColumn):
"""Returns the confusion matrix of a dataset with a subgroup."""
total_rows = len(dataset)
#Calculate the complement of the dataset over the subgroup
complement = dataset[~dataset.index.isin(subgroup.index)]
#Elements of confusion matrix
subgroup_pos_target_rate = len(subgroup[subgroup[targetColumn] == 1]) / total_rows
subgroup_neg_target_rate = len(subgroup[subgroup[targetColumn] == 0]) / total_rows
complement_pos_target_rate = len(complement[complement[targetColumn] == 1]) / total_rows
complement_neg_target_rate = len(complement[complement[targetColumn] == 0]) / total_rows
return [[subgroup_pos_target_rate,complement_pos_target_rate],
[subgroup_neg_target_rate,complement_neg_target_rate]]
|
bffd2c8f2aebc7d5e3f41373cfa8291797f122df
| 35,846 |
from typing import Any
from typing import Callable
def _map(obj: Any, fn: Callable) -> Any:
"""Recursively maps a function to a nested object.
If the passed object is a list, dictionary, set, or tuple, then all child
elements are recursively mapped.
Args:
obj: The object to map against
fn: The function to map
Returns:
The mutated object
"""
if isinstance(obj, dict):
return {k: _map(v, fn) for k, v in obj.items()}
elif (
isinstance(obj, list) or isinstance(obj, set) or isinstance(obj, tuple)
):
return [_map(v, fn) for v in obj]
else:
return fn(obj)
|
cecbfec90f2a870624b9f5ff51d5f4b9ed3865c7
| 35,849 |
def str2int(val, base=None):
"""String to integer conversion"""
try:
if isinstance(val, int) or val is None:
return val
elif base:
return int(val, base)
elif '0x' in val:
return int(val, 16)
elif '0b' in val:
return int(val, 2)
else:
return int(val)
except (ValueError, TypeError) as e:
raise ValueError("Can't convert '%s' to int!" % val)
|
b4c9a4329670bf28f01b292f1686800f6186d487
| 35,854 |
def get_commun_films(df_movies,Actor1,Actor2):
"""
Function that gives the movies in which two actors have played
Parameters
----------
df_movies : dataframe
IMDb movie database
Actor1 : string
name of the first actor entered by the user via the Tkinter interface
Actor2 : string
name of the second actor entered by the user via the Tkinter interface
Returns
-------
result : string
result displayed in the graphical interface
"""
Actor1 = Actor1.lower()
Actor2 = Actor2.lower()
# 1 - Constitution du dataframe
# Filtrage film avec le nom des acteurs
df=df_movies[df_movies['actors_low'].str.contains(Actor1,na=False)]
df=df_movies[df_movies['actors_low'].str.contains(Actor2,na=False)]
# 2 - Extraction de l'information
list_film=list(df['original_title'])
liste=""
for element in list_film:
liste=liste+"- "+element+"\n"
# 3- On stock toute l'information récupéré dans une variable char appelé result
if liste=="" :
result="Ces 2 acteurs n'ont pour l'instant joué dans aucun film ensemble."
else:
result="Ces 2 acteurs ont joué ensemble dans les films suivant :\n"+liste
return result
|
d716138ff19b3a58c668a1f9b050e16bde927ffe
| 35,859 |
def echo_worker(data):
"""
Example of worker that simply echoes back the received data.
:param data: Request data dict.
:returns: True, data
"""
return data
|
d694b301aefdcb1631567b3e1c269b24aa827824
| 35,860 |
from typing import Sequence
from typing import Tuple
def get_default_powerup_distribution() -> Sequence[Tuple[str, int]]:
"""Standard set of powerups."""
return (('triple_bombs', 3), ('ice_bombs', 3), ('punch', 3),
('impact_bombs', 3), ('land_mines', 2), ('sticky_bombs', 3),
('shield', 2), ('health', 1), ('curse', 1))
|
1e125dfe64627b25e56e9f905d4fc8cc1a878684
| 35,863 |
def interpolate(x0, y0, x1, y1, x):
"""Linear interpolation between two values
Parameters
----------
x0: int
Lower x-value
y0: int
Lower y-value
x1: int
Upper x-value
y1: int
Upper y-value
x: int
Requested x-value
Returns
-------
int, float
Interpolated y-value
"""
y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)
return y
|
082cc92c4c170dbba479e396731326e450b5d765
| 35,868 |
import requests
def requests_adapter(url: str) -> dict:
"""An adapter that encapsulates requests.get"""
resp = requests.get(url)
return resp.json()
|
f57c03cf6573ba6043390a8a099125ecf3ba3315
| 35,874 |
import math
def math_round(number: float, decimals: int = 0) -> float:
"""Округлить математическиим (не банковским) способом.
Работает обычным математическим образом, в отличие от встроенной функции round(),
которая использует банковское округление.
:param number: число, которое требуется округлить
:param decimals: сколько разрядов после запятой оставить
:return: округлённое число с плавающей запятой
>>> math_round(2.735, 2)
2.74
>>> round(2.735, 2)
2.73
"""
if math.isnan(number):
return math.nan
exp = number * 10 ** decimals
if abs(exp) - abs(math.floor(exp)) < 0.5:
return math.floor(exp) / 10 ** decimals
return math.ceil(exp) / 10 ** decimals
|
a223494af85a016ed8b1c0e3ffe6aa9593bd8da2
| 35,878 |
def indent_string(string, indent=' ', include_first=True, include_last=False):
"""
Indent a string by adding indent after each newline.
:param string: The string to indent
:param indent: The string to use as indentation
:param include_first: Also indent the first line of the string (before the first newline)
:param include_last: If the string ends on a newline, also add an indent after that.
:return: A new string.
"""
base = string.replace('\n', '\n' + indent)
if include_first:
base = indent + base
if not include_last and base.endswith('\n' + indent):
base = base[:-len(indent)]
return base
|
95bc16848fe6e095677f4a95a517b43fb10fd315
| 35,879 |
def generate_TF(corpus, corpus_dict):
"""
Function to generate TF for queries.
:param corpus: Corpus of words in the queries.
:param corpus_dict: Mapping of query number and its corpus.
:return tf_dict: Term Frequency Mapping.
"""
tf_dict = dict()
for document_number, words_in_it in corpus_dict.items():
tf_dict_each = dict()
for doc_word in words_in_it:
tf_dict_each[doc_word] = words_in_it.count(doc_word) / len(words_in_it)
tf_dict[document_number] = tf_dict_each
return tf_dict
|
4967f6968c4974fa3c8b400c9fec1a6b39f5ae43
| 35,881 |
def build_feature_dict_mapper(feature_names):
"""Build a function for tf.data.Dataset.map.
Args:
feature_names: List of feature names.
Returns:
A function converting tuples into (dictionary of features, label).
"""
def mapper(*tuple_args):
d = {}
for i in range(len(feature_names)):
d[feature_names[i]] = tuple_args[i]
return d, tuple_args[-1]
return mapper
|
9b3837cf3d1ff7bcc39242d660c255863a8dc98c
| 35,884 |
def reshape_axis(ax, axis_size_pix):
"""reshape axis to the specified size in pixels
this will reshape an axis so that the given axis is the specified size in pixels, which we use
to make sure that an axis is the same size as (or an integer multiple of) the array we're
trying to display. this is to prevent aliasing
NOTE: this can only shrink a big axis, not make a small one bigger, and will throw an exception
if you try to do that.
Arguments
---------
ax : `matpotlib.pyplot.axis`
the axis to reshape
axis_size_pix : `int`
the target size of the axis, in pixels
Returns
-------
ax : `matplotlib.pyplot.axis`
the reshaped axis
"""
if ax.bbox.width < axis_size_pix[1] or ax.bbox.height < axis_size_pix[0]:
raise Exception("Your axis is too small! Axis size: ({}, {}). Image size: ({}, {})".format(
ax.bbox.width, ax.bbox.height, axis_size_pix[1], axis_size_pix[0]))
bbox = ax.figure.get_window_extent().transformed(ax.figure.dpi_scale_trans.inverted())
fig_width, fig_height = bbox.width*ax.figure.dpi, bbox.height*ax.figure.dpi
rel_axis_width = axis_size_pix[1] / fig_width
rel_axis_height = axis_size_pix[0] / fig_height
ax.set_position([*ax.get_position().bounds[:2], rel_axis_width, rel_axis_height])
return ax
|
5a029753014ebb4af4683be3a1d50ae4130ccc32
| 35,885 |
import csv
import io
def read_model_analysis_csv(csvfile):
"""
Reads CSV generated from a spreadsheet of the same form as 'EXAMPLE template results spreadsheet v2 warming levels'.
Returns the model analyses as a list of Dicts.
"""
# List of keys corresponding to the column headings for v2 of the template
keys = ['dataset', 'n_members', 'experiment', 'statmodel', 'seasonalcycle', 'spatialpattern', 'sigma', 'sigma_min', 'sigma_max', 'xi', 'xi_min', 'xi_max', 'statprop', 'conclusion', 'include_model', 'threshold10y', 'GMSTnow', 'PR', 'PR_min', 'PR_max', 'Delta_I', 'Delta_I_min', 'Delta_I_max', 'GMSTfuture', 'PR_future', 'PR_min_future', 'PR_max_future', 'Delta_I_future', 'Delta_I_min_future', 'Delta_I_max_future']
# CSV uploaded as bytes - assume UTF-8 encoding
csv_reader = csv.reader(io.StringIO(csvfile.read().decode('utf-8')))
rows = []
parse_rows = False
for values in csv_reader:
if parse_rows:
# Zip up the values in the row with the keys for each column heading
params = {k:v for k,v in zip(keys,values)}
# If the 'Include model?' field is empty (or just whitespace) then assume we have reached the end of the input rows
if not params['include_model'] or params['include_model'].isspace():
print('Log: Found row with empty "Include model?" field. Stopping CSV parsing.')
break
rows.append(params)
else:
# Skip rows until 'Model' heading appears in first column.
# Once found, skip the following line (that contains a description
# of the column) and begin parsing rows.
if values[0] == 'Model':
next(csv_reader, None)
parse_rows = True
continue
else:
continue
return rows
|
62a32a66ba56c487ee2ef12ac1518f4be92f04d5
| 35,894 |
import typing
import asyncio
async def exec_as_aio(
blocking_fn: typing.Callable[..., typing.Any], *args: typing.Any
) -> typing.Any:
"""Asynchronously run blocking functions or methods.
Args:
blocking_fn (Callable[..., Any]): The blocking function/method.
Returns:
Any: The return value of the blocking function/method.
"""
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, blocking_fn, *args)
|
0f8d0eb069ad8f33534931b4b9134486a641fa47
| 35,903 |
import re
def did_parse(did):
"""
Parse a DID into it's parts.
:param did: Asset did, str.
:return: Python dictionary with the method and the id.
"""
if not isinstance(did, str):
raise TypeError(f'Expecting DID of string type, got {did} of {type(did)} type')
match = re.match('^did:([a-z0-9]+):([a-zA-Z0-9-.]+)(.*)', did)
if not match:
raise ValueError(f'DID {did} does not seem to be valid.')
result = {
'method': match.group(1),
'id': match.group(2),
}
return result
|
a0ed14d68aac933ead173b53ba26a80c1e6c83fd
| 35,905 |
def max_standard_deviation(window):
"""Return the maximal spatial velocity standard deviation in a window over all the times."""
max_std = 0
for i in range(window.shape[1]):
current_std = window[:, i].std()
max_std = max(max_std, current_std)
return max_std
|
dd99de7170ce942b70b34f44cc50aff7da03091c
| 35,910 |
def get_bounds(params):
"""Gets the bounds of the parameters
A list of ``(min, max)`` pairs will be returned. And the None value for the
unbound parameters will be kept.
:param params: An iterable for the model parameters.
:returns: The list of bounds for the parameters.
:rtype: list
"""
return [
(i.lower, i.upper) for i in params
]
|
5fffe3c863e57de8f141b7a742e1b6ac65c8fc94
| 35,925 |
def baskets(items, count):
""" Place list itmes in list with given basket count.
Original order is not preserved.
Example:
> baskets([1,2,3,4,5,6,7,8, 9, 10], 3)
[[1, 4, 7, 10], [2, 5, 8], [3, 6, 9]]
"""
_baskets = [[] for _ in range(count)]
for i, item in enumerate(items):
_baskets[i % count].append(item)
return list(filter(None, _baskets))
|
dabf82b5229595276fd6dc2cf6bdb7c104d9867a
| 35,934 |
def dates(df, params, field='dates'):
"""
Return an inclusive sliced portion of the input data frame based on a min and max date
Args:
df (pd.DataFrame): The input data
params (Tuple[dt.datetime, dt.datetime]): Dates, must be in order of MIN, MAX
field (str): The date field used to find matching values
Returns:
pd.DataFrame
"""
_min, _max = params
return df[(df[field] >= _min) & (df[field] <= _max)].reset_index(drop=True)
|
4bf52d35d4c9d6601edea6eeb621163d318dd975
| 35,935 |
def get_hyperparams_wd(optimizer_def):
"""
returns the weight decay stored in the hyperparameters
"""
return optimizer_def.hyper_params.weight_decay
|
f9a097e3d6c9a8963bc6cf6a8a96c334551c730b
| 35,937 |
def sobloo_opensearch_params(latitude, longitude, max_cloud_cover):
"""Build Sobloo open search query string to get the latest Sentinel-2 image
with minimal cloud cover at the requested location
Arguments:
latitude {float} -- latitude of search point
longitude {float} -- longitude of search point
max_cloud_cover {float} -- max cloud cover percentage (0 to 1)
Returns:
dict -- parameters of GET request to Sobloo Opensearch endpoint
"""
# Create a small polygon around the search point
polygon = [
(longitude - 0.001, latitude - 0.001),
(longitude + 0.001, latitude - 0.001),
(longitude + 0.001, latitude + 0.001),
(longitude - 0.001, latitude + 0.001),
(longitude - 0.001, latitude - 0.001)
]
# To WKT
polygon = 'POLYGON ((%s))' % ', '.join(['%s %s' % p for p in polygon])
# Querying the latest Sentinel 2 image for our polygon
params = {
'f': [
'state.services.wmts:eq:true',
'identification.collection:eq:Sentinel-2',
'contentDescription.cloudCoverPercentage:lt:%i' % int(
max_cloud_cover * 100)
],
'sort': '-timeStamp',
'size': 1,
'gintersect': polygon
}
return params
|
1ee722c56e42c6594dd3476825af345a78481c1b
| 35,943 |
from pathlib import Path
def data_file(path):
"""
Get a path to a file in the local 'data' directory.
"""
my_location = Path(__file__).parent
return str(my_location / 'data' / path)
|
89b41133878c0b22ae7c1f3ee65cae20a96f58f9
| 35,945 |
def _map_boolean_to_human_readable(boolean, resource, token):
"""
Map a boolean into a human readable representation (Yes/No).
:param boolean: boolean with the value that we want to transform
:param resource: resource containing all the values and keys
:param token: user token
"""
if boolean:
return 'Yes'
else:
return 'No'
|
380a9ff38cc5999a9e062b2487a7e54158c02a69
| 35,946 |
def json_path_components(path):
"""Convert JSON path to individual path components.
:param path: JSON path, which can be either an iterable of path
components or a dot-separated string
:return: A list of path components
"""
if isinstance(path, str):
path = path.split(".")
return list(path)
|
0b9a1c7e68b368b04616d7a29254bd4509673d51
| 35,950 |
def find_kth(nums1, nums2, k):
"""find kth number of two sorted arrays
>>> find_kth([1, 3], [2], 2)
2
>>> find_kth([2], [1, 3], 1)
1
>>> find_kth([1, 3], [2], 3)
3
>>> find_kth([1], [2], 1)
1
>>> find_kth([1], [2], 2)
2
"""
# assume len(nums1) <= len(nums2)
if len(nums2) < len(nums1):
nums1, nums2 = nums2, nums1
# if nums1 is empty
if not nums1:
return nums2[k - 1]
if k == 1:
return min(nums1[0], nums2[0])
# divide and conquer
if len(nums1) < k // 2:
return find_kth(nums1, nums2[k - k // 2:], k // 2)
elif nums1[k // 2 - 1] == nums2[k - k // 2 - 1]:
return nums1[k // 2 - 1]
elif nums1[k // 2 - 1] < nums2[k - k // 2 - 1]:
return find_kth(nums1[k // 2:], nums2[:k - k // 2], k - k // 2)
else:
return find_kth(nums1[:k // 2], nums2[k - k // 2:], k // 2)
|
8471f79de2d388ff942482f758076a5ebe39164c
| 35,952 |
def inertia_update(iteration,n_iterations,wmin,wmax):
"""
Time varying acceleration inertia:
w^k = wmax - (wmax - wmin)/kmax * k
Parameters:
iteration: int
The number of the iteration
n_iterations: int
The number of total iterations
wmin: float
The minimum value of the itertia weight
wmax: float
The maximum value of the inertia weight
Returns: float
The new intertia weight value
"""
W=wmax-((wmax-wmin)/n_iterations)*iteration
return W
|
20831f2eeac8cdb269674188704a0944469f66d1
| 35,957 |
def crop_scores(player):
"""Select specific only parts of the full score object
Args:
player (dict): player object from which to read the score
Returns:
score (dict): filtered scores from the game data
"""
score = {}
# if there is no active username, nobody finished, but the
# last seen player might still get a valid rank
if player['username'] in ['dead', 'open']:
score['finished'] = 0
score['rank'] = player['finishrank']
return score
keys_wanted = [
'capitalships',
'freighters',
'planets',
'starbases',
'militaryscore',
'percent',
]
score = {k: player['score'].get(k, None) for k in keys_wanted}
score['finished'] = 1
score['rank'] = player['finishrank']
return score
|
5466cf39b59ce4b49b7da3ff2c355b2d7b46455c
| 35,959 |
def get_header_item_group(header, group):
"""
Filter header and return list of items of a specific header
group (e.g. 'CTYPE').
Return empty list when unable to find @group in @_header.items().
Parameters
----------
header: dictonary
The dictonary that contains the header of the SST cube file.
group: string
Keyword you want to find.
Returns
-------
list: list
List that contains the matched group else it is empty.
"""
return [i for i in header.items() if not i[0].find(group) and not i[0] == group]
|
5417756a05186fa17f67b83e520d8b035a43a21c
| 35,961 |
def title(title):
""" Generate reST title directive.
:Examples:
>>> section('Page title')
'''
==========
Page title
==========
<BLANKLINE>
'''
"""
title_len = len(title)
return '\n'.join((
'',
'=' * title_len,
title,
'=' * title_len,
'',
))
|
86bd46cc28ad704d9f90844511574a912f6df9a5
| 35,964 |
from functools import reduce
def concat_cols(df, cols, delim):
"""
Concatenate columns in a dataframe with a
delimiter.
Args:
df (DataFrame): input DataFrame
cols (list-like): columns to concatenate
delimiter (str): delimiter to join column values
Returns:
Series with concatenated columns.
"""
cols_str = [df[x].astype(str) for x in cols]
return reduce(lambda a, b: a + delim + b, cols_str)
|
5088848fbc6337d8ae251dedf3b7c4d56df4192c
| 35,967 |
import inspect
def import_subclass_from_module(parent_cls, imported_module):
"""
Import class(es) from the Python module `imported_module` if
it is a subclass of a parent class, `parent_cls`
"""
child_classes = []
for cls_name, cls_path in inspect.getmembers(
imported_module, inspect.isclass):
if cls_name != parent_cls.__name__:
child_cls = getattr(imported_module, cls_name)
if issubclass(child_cls, parent_cls):
child_classes.append(child_cls)
return child_classes
|
e8696663b821c0a572df9875a3dd8bdfb101fc7a
| 35,969 |
def update_dictionary(default_dict, overwrite_dict=None, allow_unknown_keys=True):
"""Adds default key-value pairs to items in ``overwrite_dict``.
Merges the items in ``default_dict`` and ``overwrite_dict``,
preferring ``overwrite_dict`` if there are conflicts.
Parameters
----------
default_dict: `dict`
Dictionary of default values.
overwrite_dict: `dict` or None, optional, default None
User-provided dictionary that overrides the defaults.
allow_unknown_keys: `bool`, optional, default True
If false, raises an error if ``overwrite_dict`` contains a key that is
not in ``default_dict``.
Raises
------
ValueError
if ``allow_unknown_keys`` is False and ``overwrite_dict``
has keys that are not in ``default_dict``.
Returns
-------
updated_dict : `dict`
Updated dictionary.
Returns ``overwrite_dicts``, with default values added
based on ``default_dict``.
"""
if overwrite_dict is None:
overwrite_dict = {}
if not allow_unknown_keys:
extra_keys = overwrite_dict.keys() - default_dict.keys()
if extra_keys:
raise ValueError(f"Unexpected key(s) found: {extra_keys}. "
f"The valid keys are: {default_dict.keys()}")
return dict(default_dict, **overwrite_dict)
|
503934d42362ea2b7ff7732bb9f752de45140898
| 35,974 |
def find_in_mapping(sequence, key, value):
"""
Search a sequence of mappings for one with a matching key-value pair.
Only searches one level deep.
Args:
sequence (list(dict)): Sequence of mappings.
key: Key to match.
value: value to match
Returns:
The first matching mapping, or ``None`` if no such mapping exists.
"""
mapping = None
for map_value in sequence:
try:
if map_value[key] == value:
mapping = map_value[key]
except (KeyError, TypeError):
pass
return mapping
|
105738b193b9003c108726a05c65e2b41295dde4
| 35,977 |
from typing import List
from typing import Dict
import logging
def decode_one_sequence_label_to_span(sequence_label: List[str]) -> List[Dict]:
"""
对 BIO 序列进行解码成 List. 例如:
["B-Per", "I-Per", "O", "B-Loc"] ->
[ {"label": "Per", "begin": 0, "end": 2},
{"label": "Loc", "begin": 3, "end": 4} ]
:param sequence_label: BIO 序列。
:return: 解码好的字典列表
"""
idel_state, span_state = 0, 1
spans = list()
begin = None
tag = None
state = idel_state
for i, label in enumerate(sequence_label):
if state == idel_state:
if label[0] == "B":
begin = i
tag = label[2:]
state = span_state
elif label[0] == "O":
pass
elif label[0] == "I":
logging.warning(f"{sequence_label} 有不满足 BIO 格式的问题")
else:
raise RuntimeError(f"{label} schema 不符合 BIO")
elif state == span_state:
if label[0] == "B":
span = {"label": tag,
"begin": begin,
"end": i}
spans.append(span)
begin = i
tag = label[2:]
state = span_state
elif label[0] == "O":
span = {"label": tag,
"begin": begin,
"end": i}
spans.append(span)
begin = None
tag = None
state = idel_state
elif label[0] == "I":
state = span_state
else:
raise RuntimeError(f"{label} schema 不符合 BIO")
else:
raise RuntimeError(f"{state} 错误,应该是 在 [{idel_state}, {span_state}] ")
if state == span_state:
span = {"label": tag,
"begin": begin,
"end": len(sequence_label)}
spans.append(span)
return spans
|
5df60c24b5ab1276568d2193bf276ef3ef8e54fd
| 35,979 |
from typing import Any
def cast_number(number: Any) -> float:
"""Cast to a float"""
number_float = float(number)
return number_float
|
d43bd8db72a4817ab18e5c24fe9225a3b1702d00
| 35,980 |
def readInput(inFile):
"""
Reads in the key values.
@ In, inFile, Python file object, file containing inputs
@ Out, (x,y,z), tuple(float,float,float), input values
"""
x, y, z = 0,0,0
for line in inFile:
var,val = line.strip().split('=')
if var.strip() == 'x':
x = float(val)
elif var.strip() == 'y':
y = float(val)
elif var.strip() == 'z':
z = float(val)
if x is None or y is None or z is None:
raise IOError('x,y,z were not all found in input file',inFile)
return x,y,z
|
68a194375d880070f5404318b4ebfccb62ae479f
| 35,981 |
def get_label(labels, index):
""" Gets label if exists, otherwise returns label number """
if index < len(labels):
return labels[index]
else:
return '#%d' % (index)
|
40ca42a95c5ac3252aa63e84708177b87c538411
| 35,986 |
def formatHowpublished(howpublished):
"""How something strange has been published. The first word should be
capitalized. """
return howpublished.capitalize()
|
480c6b3f7f08a3e79c496b1a40a65390175893d6
| 35,989 |
def apply_at(
arr,
func,
mask=None,
else_=None,
in_place=False):
"""
Apply a function to an array.
Warning! Depending on the value of `in_place`, this function may alter
the input array.
Args:
arr (np.ndarray): The input array.
func (callable): The function to use.
Must have the signature: func(np.ndarray) -> np.ndarray
mask (np.ndarray[bool]): The mask where the function is applied.
Must have the same shape as `arr`.
else_ (callable|Any|None): The alternate behavior.
If callable, this is a function applied to non-masked values.
Must have the signature: func(np.ndarray) -> np.ndarray
If Any, the value is assigned (through broadcasting) to the
non-masked value.
If None, the npn-masked value are left untouched.
in_place (bool): Determine if the function is applied in-place.
If True, the input gets modified.
If False, the modification happen on a copy of the input.
Returns:
arr (np.ndarray): The output array.
Examples:
>>> arr = np.arange(10) - 5
>>> print(arr)
[-5 -4 -3 -2 -1 0 1 2 3 4]
>>> print(apply_at(arr, np.abs, arr < 0))
[5 4 3 2 1 0 1 2 3 4]
>>> print(apply_at(arr, np.abs, arr < 2, 0))
[5 4 3 2 1 0 1 0 0 0]
>>> print(apply_at(arr, np.abs, arr < 0, lambda x: x ** 2, True))
[ 5 4 3 2 1 0 1 4 9 16]
>>> print(arr)
[ 5 4 3 2 1 0 1 4 9 16]
"""
if not in_place:
arr = arr.copy()
if mask is not None:
arr[mask] = func(arr[mask])
if else_ is not None:
if callable(else_):
arr[~mask] = else_(arr[~mask])
else:
arr[~mask] = else_
else:
arr[...] = func(arr)
return arr
|
1af1e62f6e1ede616508d017bd14ca0fcd4556d6
| 35,990 |
import re
def parse_data_refs(tf_content):
"""
Look for references to other remote states. These references look like this:
gcp_org_id = "${data.terraform_remote_state.foundation.org_id}"
"""
result = []
p = re.compile(r'data\.terraform_remote_state\.([_a-z][_\-0-9a-z]*)\.')
result = p.findall(tf_content)
# remove duplicates
if len(result) > 1:
res_set = set(result)
result = list(res_set)
return result
|
63af09c6ac830b6d822629cfeb8a02f50b366d56
| 35,991 |
import struct
def read_varint(buffer: bytearray) -> int:
""" Parse a varint, read bytes are consumed.
"""
i, = struct.unpack('<B', buffer[:1])
if i < 0xfd:
del buffer[:1]
res = i
elif i < 0xfe:
res, = struct.unpack('<H', buffer[1:3])
del buffer[:3]
elif i < 0xff:
res, = struct.unpack('<I', buffer[1:5])
del buffer[:5]
else:
res, = struct.unpack('<Q', buffer[1:9])
del buffer[:9]
return res
|
ae3a41d4efa8b13a7fda86fc39acc0d396a22b77
| 35,992 |
def custom_formatwarning(msg, *a):
"""Given a warning object, return only the warning message."""
return str(msg) + '\n'
|
5da47d79d37c79d3072aedb754f9fcac13baf0b1
| 35,995 |
def calculate_interest_amount_in_years(starting_amount, number_of_years, interest_rate, stipend_rate):
"""
After X number of years, how much would I have in the bank?
:param starting_amount: The amount of money the bank has to start with.
:type starting_amount: double
:param number_of_years: The amount of time to accrue interest.
:type number_of_years: int
:param interest_rate: The rate that interest is being added into the bank.
:type interest_rate: float
:param stipend_rate: The amount taken out each year for a stipend.
:type stipend_rate: float
:return: The amount in the bank, and the yearly stipends.
"""
# Money is not gained. Can be calculated, but not here.
if stipend_rate >= interest_rate:
return -1, -1
current_amount = starting_amount
stipend = {}
for year in range(number_of_years):
current_amount += (current_amount * interest_rate)
# We take the stipend out after adding new interest.
new_stipend_amount = current_amount * stipend_rate
current_amount -= new_stipend_amount
stipend[year+1] = round(new_stipend_amount, 2)
return current_amount, stipend
|
ea6a7e503c92f4b65e1ebaba8cca1bfce89ceff1
| 35,996 |
def _shadow_level(kernel, threshold, sensitivity):
"""
Calculates the greyscale shadow level for a given kernel,
the diode threshold and the diode sensitivity.
:param kernel: pooling kernel for down scaling
:type kernel: float array (2 dimensional)
:param threshold: thresholds for light intensity (determines the shadowlevel)
:type threshold: list or tuple (len == 3) descending order
:param sensitivity: diode sensitivity
:type sensitivity: float
:return: shadowlevel (integer between 0 and 3)
"""
light_intensity = 0.0
for y in range(len(kernel)):
for x in range(len(kernel[0])):
light_intensity += kernel[y][x]
# Normalizing the shadowlevel with kernel size
light_intensity /= (len(kernel)*len(kernel[0]))
if light_intensity > (threshold[0] + sensitivity):
return 0
elif light_intensity > (threshold[1] + sensitivity):
return 1
elif light_intensity > (threshold[2] + sensitivity):
return 2
else:
return 3
|
6aa09975ab8933d812ca599fae2d6116fc9190e9
| 36,002 |
def combine_ctrlpts_weights(ctrlpts, weights=None):
""" Multiplies control points by the weights to generate weighted control points.
This function is dimension agnostic, i.e. control points can be in any dimension but weights should be 1D.
The ``weights`` function parameter can be set to None to let the function generate a weights vector composed of
1.0 values. This feature can be used to convert B-Spline basis to NURBS basis.
:param ctrlpts: unweighted control points
:type ctrlpts: list, tuple
:param weights: weights vector; if set to None, a weights vector of 1.0s will be automatically generated
:type weights: list, tuple or None
:return: weighted control points
:rtype: list
"""
if weights is None:
weights = [1.0 for _ in range(len(ctrlpts))]
ctrlptsw = []
for pt, w in zip(ctrlpts, weights):
temp = [float(c * w) for c in pt]
temp.append(float(w))
ctrlptsw.append(temp)
return ctrlptsw
|
b55b77159ec04aa287314f8771f0056c820c5cad
| 36,004 |
import torch
def diagonal_mask(dim, num_diagonals):
"""Creates a binary mask with ones around the major diagonal defined by
`num_diagonals`.
Parameters
----------
dim : int
dimension of the mask matrix
num_diagonals : int
number of diagonals. The number gets rounded up to the nearest odd
number. 1 means an identity matrix.
Returns
-------
torch tensor
mask tensor
"""
if num_diagonals == 0:
raise Exception(
f'Expected positive number of diagonals. Found {num_diagonals=}.'
)
mask = torch.eye(dim)
for i in range(num_diagonals // 2):
d = i + 1
mask += torch.diag(torch.ones(dim - d), diagonal=d) \
+ torch.diag(torch.ones(dim - d), diagonal=-d)
return mask
|
2c6653e36da449bfba7321965b8091952e316263
| 36,006 |
def locate_card_linear(cards, query):
"""Linear search for `query` in a desc sorted list `cards`."""
# time complexity: O(N)
# space complexity: O(1)
position = 0
while position < len(cards):
# check if current elem matches the query
if cards[position] == query:
return position
position += 1
# if we have reached the end of the list w/out returning,
# then query is not in cards
return -1
|
c3acb6064228cd0357fceb5804b0f4a500aae780
| 36,009 |
def string_to_tuple( word ) :
"""Convert string word into a tuple suitable for use as a key to a dictionary
of anagrams"""
this_word_list = list(word) # convert the string to a list so we can sort it
this_word_list.sort()
this_word_tuple = tuple( this_word_list ) # Conver the list to a tuple which can be the key to a dictionary
return this_word_tuple
|
a80ecbdeaa9b9a3d185d00befd6b9a33a970eb73
| 36,011 |
def findCon(name,conList):
"""
Returns the index of a constituent from a list
"""
return next((i for i, j in enumerate(conList) if j == name))
|
3de9c922459175156900649800465a3546e330a1
| 36,015 |
def find_chunk_shape(shape, n_max=None):
"""
Given the shape of an n-dimensional array, and the maximum number of
elements in a chunk, return the largest chunk shape to use for iteration.
This currently assumes the optimal chunk shape to return is for C-contiguous
arrays.
"""
if n_max is None:
return tuple(shape)
block_shape = []
max_repeat_remaining = n_max
for size in shape[::-1]:
if max_repeat_remaining > size:
block_shape.append(size)
max_repeat_remaining = max_repeat_remaining // size
else:
block_shape.append(max_repeat_remaining)
max_repeat_remaining = 1
return tuple(block_shape[::-1])
|
3370be64a4ba13a5d7a3f1e2e85858068202df38
| 36,018 |
import re
def clean_name(text):
"""
Return a cleaned version of a string - removes everything
but alphanumeric characters and dots.
:param str text: string to clean.
:returns: cleaned string.
:rtype: str
"""
return re.sub(r'[^a-zA-Z0-9\n\.]', '_', text)
|
51c9663d4d6a7f5b3099fc30b0108e99ac2608d0
| 36,022 |
def remove_non_datastore_keys(form_data):
"""Remove keys not relevant to creating a datastore object."""
form_dict = {k: v[0] for k, v in form_data.items()}
for key in ["csrfmiddlewaretoken", "name", "type", "owner"]:
form_dict.pop(key, None)
return form_dict
|
0d0e561cb24eaf7cb3ee77060ed2149c55140812
| 36,025 |
import re
def _parse_boolean(value):
"""
Attempts to convert a value to a boolean and returns it.
If it fails, then it raises an Exception.
:param value: a value
:return: boolean
"""
if re.match("^(on|true|yes|1)$", str(value), re.IGNORECASE):
return True
if re.match("^(off|false|no|0)$", str(value), re.IGNORECASE):
return False
raise Exception("Unable to coerce value '{}' to boolean".format(value))
|
3090f60eaccbfc65ecb3cf6f956ab3e57d572798
| 36,026 |
import torch
def padding_mask(x_lens):
"""
transform lengths of samples to a binary mask.
inputs:
- x_lens: length of each sample in minibatch. # tensor # (batch_size, )
outputs:
- mask: 1-0 binary mask. 1 means valid and 0 means invalid.
# tensor # (batch_size, longest_time_step, 1)
"""
longest_len = max(x_lens)
batch_size = len(x_lens)
mask = torch.zeros(batch_size, longest_len, 1)
for i, x_len in enumerate(x_lens):
mask[i, :x_len] = 1.
return mask
|
ea9e3c06d61f5d9b19795a59dbb2956e8bdb4385
| 36,033 |
def certificate_files(create_certificate):
"""Returns a dict with the certificate files
The dict contains the following keys:
caKeyPath
caCrtPath
clientKeyPath
clientCrtPath
"""
return create_certificate
|
b30ae8b72b5b933b8f7cb973c1b5022a9e97ed53
| 36,035 |
def get_seed(seed_id):
"""
This function provides the random seed.
:param seed_id: int
the seed_id is the 'seeds' vector index
:return:
"""
seeds = [1859168769, 1598189534,
1822174485, 1871883252, 694388766,
188312339, 773370613, 2125204119, #0,1,2,3,4,5
2041095833, 1384311643, 1000004583,
358485174, 1695858027, 762772169,
437720306, 939612284, 1998078925,
981631283, 1024155645, 1147024708, #19
558746720, 1349341884, 678622600,
1319566104, 538474442, 722594620,
1700738670, 1995749838, 1936856304,
346983590, 565528207, 513791680,
1996632795, 2081634991, 1769370802,
349544396, 1996610406, 1973272912,
1972392646, 605846893, 934100682,
222735214, 2101442385, 2009044369,
1895218768, 701857417, 89865291,
144443207, 720236707, 822780843,
898723423, 1644999263, 985046914,
1859531344, 1024155645, 764283187,
778794064, 683102175, 1334983095,
1072664641, 999157082, 1277478588,
960703545, 186872697, 425414105]
return seeds[seed_id]
|
d568962485aa02f88ed0b12a2b6dcfb397773ec2
| 36,036 |
import json
def format_json(data):
"""
Returns a human-formatted JSON
"""
return json.dumps(data, sort_keys=True, indent=2, separators=(',', ': '))
|
a0ea13af5e95d5cd9879de0cdf5055d6fa1dd4c8
| 36,038 |
def find_request_end_token(data):
"""Find token that indicates that request is over."""
lines = data.splitlines(True)
if not lines:
return False
elif 'command_list_ok_begin' == lines[0].strip():
return 'command_list_end' == lines[-1].strip()
else:
return lines[0].endswith('\n')
|
5b09d57fdb020940f6d16183c0ae3d84dcb2ce2c
| 36,046 |
def set_invalid(field, render_kw=None):
"""
Returns *render_kw* with `invalid` added to *class* on validation errors.
Set (or appends) 'invalid' to the fields CSS class(es), if the *field* got
any errors. 'invalid' is also set by browsers if they detect errors on a
field.
"""
if render_kw is None:
render_kw = {}
if field.errors:
classes = render_kw.get('class') or render_kw.pop('class_', '')
if classes:
render_kw['class'] = 'invalid {}'.format(classes)
else:
render_kw['class'] = 'invalid'
return render_kw
|
5e32005e1a5405b3ba86293c986e6368c177ad40
| 36,047 |
from datetime import datetime
def convert_unix_to_date(d):
""" Convert millise since epoch to date formatted MM/DD/YYYY HH:MI:SS """
if d:
dt = datetime.utcfromtimestamp(d / 1000)
return dt.strftime('%m/%d/%Y %H:%M:%S')
return 'N/A'
|
e587839c4c8cc6464704d7b2ce1b4ae0bf9db302
| 36,052 |
def add_key_arguments(parser):
"""
Adds the arguments required to create a new key to the parser given as a parameter
Args:
- parser: Parser where to add the key parameters
+ Type: argparse.ArgumentParser
Return:
- group: Argument group containing all the key arguments
+ Type: argparse._ArgumentGroup
"""
group = parser.add_argument_group("Key management")
group.add_argument(
"-ks", "--key_size",
help='Length of the new key',
type=int,
default=4096
)
group.add_argument(
"-kt", "--key_type",
help="Method used for generating the new key",
choices=["dsa", "rsa"],
default="rsa"
)
return group
|
329a90dbf639283e62765690cfe724038ce61bbd
| 36,053 |
def weight_normalization(weight1, weight2):
"""
A function to normalize the weights of each modality so the weights' sum is 1 for each pixel of the image
:param weght1: The weight of madality 1, a grayscale image
:param weight2: The weight of modality 2, a grayscale image
:return: Two weights, weight1_normalized and weight2_normalized, respectively the normalized versions of weight1 and weight2, two grayscale images.
"""
weight1_normalized = weight1 / (weight1 + weight2)
weight2_normalized = weight2 / (weight1 + weight2)
return weight1_normalized, weight2_normalized
|
0e04960ba7baec3e0e657117ebb5c3da8c9bd845
| 36,054 |
import uuid
def CreateShoppingCampaign(client, budget_id, merchant_id):
"""Creates a shopping campaign with the given budget and merchant IDs.
Args:
client: an AdWordsClient instance.
budget_id: the str ID of the budget to be associated with the shopping
campaign.
merchant_id: the str ID of the merchant account to be associated with the
shopping campaign.
Returns:
The created Shopping Campaign as a sudsobject.
"""
campaign_service = client.GetService('CampaignService', 'v201809')
campaign = {
'name': 'Shopping campaign #%s' % uuid.uuid4(),
# The advertisingChannelType is what makes this a shopping campaign
'advertisingChannelType': 'SHOPPING',
# Recommendation: Set the campaign to PAUSED when creating it to stop the
# ads from immediately serving. Set to ENABLED once you've added targeting
# and the ads are ready to serve.
'status': 'PAUSED',
# Set portfolio budget (required)
'budget': {
'budgetId': budget_id
},
'biddingStrategyConfiguration': {
'biddingStrategyType': 'MANUAL_CPC'
},
'settings': [
# All shopping campaigns need a ShoppingSetting
{
'xsi_type': 'ShoppingSetting',
'salesCountry': 'US',
'campaignPriority': '0',
'merchantId': merchant_id,
# Set to "True" to enable Local Inventory Ads in your campaign.
'enableLocal': True
}
]
}
campaign_operations = [{
'operator': 'ADD',
'operand': campaign
}]
campaign = campaign_service.mutate(campaign_operations)['value'][0]
print ('Campaign with name "%s" and ID "%s" was added.'
% (campaign['name'], campaign['id']))
return campaign
|
208b2ad37d2fda5ee5b85827029597ed8bc6801b
| 36,057 |
from typing import Dict
from typing import Tuple
def get_conversion_endpoints(options: Dict) -> Tuple[str, str, Dict]:
"""Returns conversion direction with endpoints
(Text to DNA) or (DNA to Text)."""
_from, _to = "TEXT", "DNA"
if options["convert_to"] == "DNA":
_from, _to = "TEXT", "DNA"
elif options["convert_to"] == "TEXT":
_from, _to = "DNA", "TEXT"
options["convert_from"] = _from
return _from, _to, options
|
02fd713646efb72959d3b42dd1f2c3f9de343da8
| 36,063 |
def dice_coefficient(outputs, targets, eps=1.0):
"""Calculates the Dice coefficient between the predicted and target masks.
More information:
- https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient
Args:
outputs (torch.tensor): Outputs of the model (N x 1 x H x W).
targets (torch.tensor): Manual segmentations (N x H x W).
eps (float, optional): Smooth parameter (avoid zero divisions). Defaults to 1.
Returns:
(float): Dice coefficient.
"""
outputs = outputs.contiguous().view(-1)
targets = targets.contiguous().view(-1)
intersection = (outputs * targets).sum()
union = outputs.sum() + targets.sum()
return ((2 * intersection + eps) / (union + eps))
|
b74d2ae00e30bc1127c098a17b427e9fcc3ac30b
| 36,064 |
def calculate_center(shape: tuple):
"""
Calculate and return the center point of ``shape``.
:param shape: A tuple (width, height) of odd numbers
:return: A ``tuple`` (x, y) containing the center points coordinates
"""
if any(d%2 == 0 for d in shape):
raise ValueError("width and height of shape must be odd numbers")
x, y = [int((d-1)/2) for d in shape[-2:]]
return (x, y)
|
dbe2485b39c7670adfb832d3626e7e9106e5b2c0
| 36,070 |
def format_datetime(datetime_):
"""Convert datetime object to something JSON-serializable."""
if datetime_ is None:
return None
return datetime_.strftime('%Y-%m-%dT%H:%M:%SZ')
|
16fbcff4737ec985405c8267b74e9085f56d06df
| 36,071 |
def _set_key2sl(key):
"""Convert a key part to a slice part."""
if isinstance(key, int) or key is Ellipsis:
return key
elif isinstance(key, slice):
# Forbid slice steps
if key.step is not None:
raise ValueError("farray slice step is not supported")
return key
else:
raise TypeError("expected int, slice, or ...")
|
c735f009ef5576f4eaf293152b715aed8d20f965
| 36,073 |
def sb_xs_compare(sb, sheet):
"""Compute the absolute and percentage difference between the maximum fields of one CrossSection in a SectionBook and all the other CrossSections
args:
sb - SectionBook object
sheet - sheet string of the CrossSection in sb that should be
compared with all the other CrossSections
returns:
abs_dif - DataFrame with the absolute difference of the maximum
fields at ROW edges
rel_dif - DataFrame with the relative difference of the maximum
fields at ROW edges (multiply by 100 to get percentage dif)"""
rem = sb.ROW_edge_max
abs_dif = rem - rem.loc[sheet]
rel_dif = (rem - rem.loc[sheet])/rem
return(abs_dif, rel_dif)
|
491dd1ada0b23882d2f9de474b2ccb56c36eb705
| 36,079 |
def percent_change(a,b):
"""
Returns the percentage change from a to b.
"""
return float((b - a)/a)
|
34bf312683f72107404919bb01b9f01464e23c47
| 36,081 |
import torch
def make_cuda(tensor):
"""Turn the tensor into cuda if possible."""
if torch.cuda.is_available():
return tensor.cuda()
return tensor
|
14f05f1f27a0f846448ac89a2e8371b4acac9711
| 36,083 |
def check_rent_history(rent_list, username):
"""
return farm ids that the given username has rented before
"""
farm_rent_before = []
for rent in rent_list:
if rent.get('username') == username:
farm_rent_before.append(str(rent.get('farm_id')))
return farm_rent_before
|
b480651ab7e17673df91c7e34dc5d70f8f360ac7
| 36,086 |
def db2lin(x):
"""From decibel to linear"""
return 10.0**(x/10.0)
|
653fb36943baeb393d4cec07544df95a230a5880
| 36,088 |
def remove_nan_columns(dataframe):
"""Removes columns of data frame where any value is NaN.
Args:
dataframe (pandas.DataFrame): Input dataframe.
Returns:
tuple: tuple containing:
pandas.DataFrame: Dataframe with columns containing NaN values removed.
numpy.array: Array of bools indicating which columns were kept.
"""
df_copy = dataframe.set_index("timestamp")
selected_columns_mask = df_copy.notnull().all().values
return dataframe.dropna(axis=1, how="any"), selected_columns_mask
|
058c0469dd93a71866e1a09d8dd16b80379c7d15
| 36,090 |
import torch
def kl_divergence(q, p):
"""Calculates the KL divergence between q and p.
Tries to compute the KL divergence in closed form, if it
is not possible, returns the Monte Carlo approximation
using a single sample.
Args:
q : torch.distribution
Input distribution (posterior approximation).
p : torch.distribution
Target distribution (prior).
Returns:
The KL divergence between the two distributions.
"""
if isinstance(q, torch.distributions.Normal) \
and isinstance(p, torch.distributions.Normal):
var_ratio = (q.scale / p.scale.to(q.scale.device)).pow(2)
t1 = ((q.loc - p.loc.to(q.loc.device)) / p.scale.to(q.loc.device)).pow(2)
return 0.5 * (var_ratio + t1 - 1 - var_ratio.log())
else:
s = q.rsample()
return q.log_prob(s) - p.log_prob(s)
|
3d0be3599332840057f07d9bee71681f4a710557
| 36,096 |
import re
def ipv4_address_validator(addr):
"""
Regex to validate an ipv4 address.
Checks if each octet is in range 0-255.
Returns True/False
"""
pattern = re.compile(
r"^([1]?\d?\d|2[0-4]\d|25[0-5])\.([1]?\d?\d|2[0-4]\d|25[0-5])\.([1]?\d?\d|2[0-4]\d|25[0-5])\.([1]?\d?\d|2[0-4]\d|25[0-5])$"
)
if pattern.fullmatch(str(addr).strip().strip("\n")):
return True
else:
return False
|
6204d9d54536e510b5556d13ee259ec3d7b3fc95
| 36,098 |
def plot_hist(self, parameters=None, mean_line=False, **kwds):
"""
Make a histogram of the WaterFrame's.
A histogram is a representation of the distribution of data.
This function calls pandas.DataFrame.hist(), on each parameter of the
WaterFrame, resulting in one histogram per parameter.
Parameters
----------
parameters: str, list of str, optional (parameters=None)
keys of self.data to plot. If parameters=None, it will plot all
parameters.
mean_line: bool, optional (mean_line=False)
It draws a line representing the average of the values.
**kwds:
All other plotting keyword arguments to be passed to
DataFrame.hist().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.hist.html
Returns
-------
ax: matplotlib.AxesSubplot
Axes of the plot.
"""
if parameters is None:
parameters = list(self.parameters)
if isinstance(parameters, str):
parameters = [parameters]
axes = self.data.hist(column=parameters, **kwds)
parameter_counter = 0
try:
for ax in axes:
ax.set_xlabel("Values")
ax.set_ylabel("Frequency")
if mean_line is True:
if parameter_counter < len(parameters):
x_mean = self.mean(parameters[parameter_counter])
ax.axvline(x_mean, color='k',
linestyle='dashed',
linewidth=1)
parameter_counter += 1
except AttributeError:
# Creation of the mean line
parameter_counter = 0
for irow in range(len(axes)):
for icol in range(len(axes[irow])):
if parameter_counter < len(parameters):
axes[irow, icol].set_xlabel("Values")
axes[irow, icol].set_ylabel("Frequency")
if mean_line is True:
x_mean = self.data[axes[irow, icol].get_title()].mean()
axes[irow, icol].axvline(x_mean, color='k',
linestyle='dashed',
linewidth=1)
parameter_counter += 1
return axes
|
5c927a170fff37691135dd367f0b42247d4aa3e1
| 36,101 |
def isDelimited(value):
"""
This method simply checks to see if the user supplied value has delimiters.
That is, if it starts and ends with double-quotes, then it is delimited.
"""
if len(value) < 2:
return False
if value[0] == '"' and value[-1] == '"':
return True
else:
return False
|
4a89211c27f6cf826b3b29cc718b0e00a4184a0f
| 36,103 |
import types
def asmodule(module):
"""
Return the module references by `module` name. If `module` is
already an imported module instance, return it as is.
"""
if isinstance(module, types.ModuleType):
return module
elif isinstance(module, str):
return __import__(module, fromlist=[""])
else:
raise TypeError(type(module))
|
1e2def23f770f9bc84aa8af3273f830fedea3282
| 36,105 |
def flatten(list_of_lists):
"""Takes a list of lists and turns it into a list of the sub-elements"""
return [item for sublist in list_of_lists for item in sublist]
|
11df76be33e96295e4e5230873368be707ae032f
| 36,106 |
def leftmost(n):
"""
Return the leftmost item in a binary tree.
"""
while n.left is not None:
n = n.left
return n
|
f22dbb611a088e52c37002c1dad3a704033145e0
| 36,107 |
def sdfGetPropList(mol):
"""
sdfGetPropList() returns the list of all property names in molecule mol
"""
sdfkeyvals = mol["keyvals"]
return [pair[0] for pair in sdfkeyvals] if sdfkeyvals else []
|
da897a54a4e0bfc59ca2bd78a2d39d0b64b32db1
| 36,112 |
from typing import Dict
def str_dict_to_bytes(str_dict: Dict[str, str]) -> Dict[bytes, bytes]:
"""
Converts the key and the value of a dict from str to bytes.
"""
out = {}
for key, value in str_dict.items():
out[key.encode()] = value.encode()
return out
|
43b62237b9c80c6e613c363aace17324647086b9
| 36,117 |
def gcd(x, y):
"""
Euclid's algorithm
- If n divides m evenly, then n is the GCD. Otherwise the GCD is the GCD of n and the remainder of m divided by n.
"""
if x % y == 0:
return y
else:
return gcd(y, x%y)
|
db31a4b36929c9b7e83508beee724b94eea27a12
| 36,119 |
def string_breakdown(the_string :str) -> list:
"""Accepts a string and returns a list of tuples of form [(char, count)]"""
checked = []
components = []
for char in the_string:
if char not in checked:
count = the_string.count(char)
checked.append(char)
components.append((char, count))
return components
|
2283297aff80d548625930482249a31284e49eae
| 36,121 |
def read_lines(path):
"""Return list of lines comprising specified file.
Newlines at the end are included.
"""
with open(path, "r") as file:
return file.readlines()
|
e0986c649ab6b64c665e8aa887c2625ad21b77be
| 36,122 |
from typing import List
def _sampling_from_alias_wiki(
alias: List[int],
probs: List[float],
random_val: float,
) -> int:
"""
Draw sample from a non-uniform discrete distribution using Alias sampling.
This implementation is aligned with the wiki description using 1 random number.
:param alias: the alias list in range [0, n)
:param probs: the pseudo-probability table
:param random_val: a random floating point number in the range [0.0, 1.0)
Return the picked index in the neighbor list as next node in the random walk path.
"""
n = len(alias)
pick = int(n * random_val)
y = n * random_val - pick
if y < probs[pick]:
return pick
else:
return alias[pick]
|
c67d2d698ace15c798cda51049b7ddd880c48a71
| 36,124 |
def get_item_attr(idmap, access):
"""
Utility for accessing dict by different key types (for get).
For example::
>>> idmap = {(1,): 2}
>>> get_item_attr(idmap, 1)
2
>>> idmap = {(1,): 2}
>>> get_item_attr(idmap, {"pk": 1})
2
>>> get_item_attr(idmap, (1,))
2
"""
if isinstance(access, dict):
keys = []
for names in sorted(access):
keys.append(access[names])
return idmap.get(tuple(keys))
elif isinstance(access, int):
return idmap.get((access,))
else:
return idmap.get(access)
|
abaf1250c34b94393851e7a0764af3bf1c3eb116
| 36,127 |
def round_to_second(dt):
"""
datetime对象round到秒
:param dt:
:return:
"""
res = dt.replace(microsecond=0)
return res
|
2be5f589be54bbb113b307d5b38ba5d6e0a13a32
| 36,131 |
from typing import List
from typing import Dict
from typing import Any
def split_docker_list(d_list: List[Dict[str, Any]]):
"""Splits the list of docker in test generation docker and test environment docker.
The split is done by checking for the 'generator' key in the dockers dict defined in the json file.
Only the docker for the test case / test data generation should contain that key. All others, which are used to set
the environment for running the tests, must not have it.
Args:
d_list: list of dictionaries with docker information
Returns:
(list, list):
- list of dictionaries with test generator docker information (can not contain more than one element)
- list of dictionaries with test environment docker information
Raises:
RuntimeError: if there is more than one test generator docker defined.
"""
test_d_list = []
generator_d_list = []
num_test_generator = 0
for docker in d_list:
if "generator" in [*docker]:
generator_d_list.append(docker)
num_test_generator += 1
if num_test_generator > 1:
error_msg = "More than one docker is defined as 'generator'. " \
"Only one item in dockerlist json file should contain the 'generator' key."
raise RuntimeError(error_msg)
else:
test_d_list.append(docker)
return generator_d_list, test_d_list
|
6c01f81db08270c60d08153448dd8989ff87ad4c
| 36,133 |
def round_to_int(number, precision):
"""Round a number to a precision"""
precision = int(precision)
rounded = (int(number) + precision / 2) // precision * precision
return rounded
|
75f7b23c3f6426dc3ed0c54f3baa491e3c658a14
| 36,138 |
def mod_exponent(base, power, mod):
"""
Modular exponential of a number
:param base : number which is going to be raised
:param power : power to which the number is raised
:param mod : number by modulo has to be performed
:return : number raised to power and modulo by mod [(base ^ power) % mod]
"""
res = 1 # Initialize result
base = base % mod # Update base if it is more than or equal mod_
while power > 0:
if power & 1: # if pow_ is odd multiply it with result
res = (res * base) % mod
power = power >> 1 # _pow must be even now
base = (base * base) % mod
return res
|
f0b0de989c8ab38a11ca41126e30234765571ca6
| 36,140 |
import requests
def post_input_file(input_text):
"""Posts a string as a file (multipart/form-data) named 'input' to the
REST API and returns the response.
"""
url = 'http://localhost:5000/parse'
return requests.post(url, files={'input': input_text})
|
4b6613e95f3221e1a92c88d449aa41a562182304
| 36,141 |
def _get_low_pressure(self, g_l, d_l, frac_cells):
"""
Obtains the coefficients of the (projected) lower-dimensional pressures
Parameters
----------
g_l : PorePy object
Lower-dimensional grid.
d_l : Dictionary
Lower-dimensional data dictionary.
frac_cells : NumPy nd-Array
Lower-dimensional fracture cells
Raises
------
ValueError
If the pressure has not been reconstructed
Returns
-------
p_low : NumPy nd-Array
Coefficients of the (projected) lower-dimensional pressures
"""
# Retrieve lower-dimensional reconstructed pressure coefficients
if "recon_p" in d_l[self.estimates_kw]:
p_low = d_l[self.estimates_kw]["recon_p"].copy()
else:
raise ValueError("Pressure must be reconstructed first")
p_low = p_low[frac_cells]
return p_low
|
374c812a39aa045b68d70515d62d13aad69655c3
| 36,147 |
def read_from_y(state):
"""Reads the contents of the Y scratch register."""
return state.y_register
|
f5d6f258f267d4c1392b393ffc680cdf8aa59748
| 36,150 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.