content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def construct_dataset_url(
request,
service_base,
url):
"""
Construct a url to a dataset, given a *catalog request*, *service_base*
and *url*.
It is assumed that *url* is relative to *service_base*.
"""
return "{}://{}{}{}".format(request.get_type(), request.get_host(),
service_base, url)
|
55ca366f53180c7db5c1ad4ec8f1132dd45c43f5
| 431,071 |
def nb_of_answers(pop):
""" Returns the number of possible
answer given a population.
"""
return pop[0]*2 + pop[1] + pop[2]
|
ef68316cdd094e6ac48a589fca76f9679dfdc0fa
| 149,838 |
def get_name_from_arn(arn):
"""
Extract the certificate name from an arn.
:param arn: IAM SSL arn
:return: name of the certificate as uploaded to AWS
"""
return arn.split("/", 1)[1]
|
8fe065a035d47ea33e2c13eed8ae0a86290eff42
| 340,216 |
def initialize_P(nS, nA):
"""Initializes a uniformly random model of the environment with 0 rewards.
Parameters
----------
nS: int
Number of states
nA: int
Number of actions
Returns
-------
P: np.array of shape [nS x nA x nS x 4] where items are tuples representing transition information
P[state][action] is a list of (prob, next_state, reward, done) tuples.
"""
P = [[[(1.0/nS, i, 0, False) for i in range(nS)] for _ in range(nA)] for _ in range(nS)]
return P
|
f3c75c4c1ef5171b5692607ed7a3cba262a29746
| 491,617 |
def simple_train_test_split(
df,
forecast_length: int = 10,
min_allowed_train_percent: float = 0.3,
verbose: int = 1,
):
"""
Uses the last periods of forecast_length as the test set, the rest as train
Args:
forecast_length (int): number of future periods to predict
min_allowed_train_percent (float): - forecast length cannot be greater than 1 - this
constrains the forecast length from being much larger than than the training data
note this includes NaNs in current configuration
Returns:
train, test (both pd DataFrames)
"""
assert forecast_length > 0, "forecast_length must be greater than 0"
if (forecast_length * min_allowed_train_percent) > int(
(df.shape[0]) - forecast_length
):
raise ValueError(
"forecast_length is too large, not enough training data, alter min_allowed_train_percent to override, or reduce validation number, if applicable"
)
train = df.head((df.shape[0]) - forecast_length)
test = df.tail(forecast_length)
if (verbose > 0) and ((train.isnull().sum(axis=0) / train.shape[0]).max() > 0.9):
print("One or more series is 90% or more NaN in this train split")
if (verbose >= 0) and ((test.isnull().sum(axis=0) / test.shape[0]).max() > 0.9):
print("One or more series is 90% or more NaN in this test split")
return train, test
|
5b7cb203fc97c8a7d6c22c3c8ed300d85ddf4c59
| 554,717 |
def converter(s):
"""Takes picobot code, as a string, and returns a picobot dictionary"""
# I made this so I could feed programs returned by GA back into Picobots
# and render them. It takes a Picobot program, formatted as follows:
# """
# 2 xxxS -> W 0
# 2 xxxx -> S 2
# 3 NExx -> W 1
# """
# (spacing important) and turns it into a dictionary that a Picobot object
# can use.
picobotDict = {}
# It splits the input string at the newline characters,
L = s.split('\n')
# and for each item in that list,
for item in L[1:]:
# splices the item into a properly formatted key and value.
key = (int(item[1]), str(item[3:7]))
value = (str(item[11]), int(item[13]))
# The key and value are added to the dictionary.
picobotDict[key] = value
# The dictionary is returned.
return picobotDict
|
487f1d45d1dfe66becfec50d1e96f3c8a546fae4
| 322,240 |
def subtract(x, y):
"""Takes two numbers and returns their difference."""
return x - y
|
19e71e4574250424203c20ae5111860567e51e19
| 250,861 |
def compress_name(champion_name):
"""To ensure champion names can be searched for and compared,
the names need to be reduced.
The process is to remove any characters not in the alphabet
(apostrophe, space, etc) and then convert everything to lowercase.
Note that reversing this is non-trivial, there are inconsistencies
in the naming scheme used.
Examples:
Jhin -> jhin
GALIO -> galio
Aurelion Sol -> aurelionsol
Dr. Mundo -> drmundo
kha'zix -> khazix
"""
compressed_name = "".join(c for c in champion_name if c.isalpha())
return compressed_name.lower()
|
de76dfd48436ae1ec66dc7e42357e6c52f15719a
| 688,787 |
def get_term_types_from_raw_data(raw_data_dict):
"""
Determines the types of terms defined for ThermoKin based on the
file contents. This allows for generation of latex expressions
based on these terms.
Parameters
----------
raw_data_dict : dict of str:{str:str}
Returns
-------
set of str
"""
term_types = set()
for v in raw_data_dict.values():
for k in v.keys():
term_types.add(k)
return term_types
|
76777f03bb40cefd36c16e7ba61693db41b15e4b
| 272,869 |
def I(x):
"""Ix = x"""
return x
|
d2b1f56bcbc308e2d953d62d4cb9594dba008ed0
| 390,274 |
def camel(term: str) -> str:
"""Convert the given string to camel case.
Args:
term: the term/word/phrase to convert
Returns:
The camel-cased version of the provided identifier.
"""
parts = iter(term.split("_"))
return next(parts) + "".join([chunk.title() for chunk in parts])
|
da60e6074d105159bdc93fc6bf263b5450658edf
| 190,323 |
def array_bounds(height, width, transform):
"""Return the bounds of an array given height, width, and a transform.
Return the `west, south, east, north` bounds of an array given
its height, width, and an affine transform.
"""
w, n = transform.xoff, transform.yoff
e, s = transform * (width, height)
return w, s, e, n
|
dcba40859972eaf049b782e7903196cb1281cd0c
| 434,634 |
import inspect
def get_class(meth):
""" Return the class of method meth
Taken from here: https://stackoverflow.com/a/25959545
"""
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return cls
meth = meth.__func__ # fallback to __qualname__ parsing
if inspect.isfunction(meth):
cls = getattr(inspect.getmodule(meth),
meth.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if isinstance(cls, type):
return cls
ret = getattr(meth, '__objclass__', None) # handle special descriptor objects
if ret == None:
raise ValueError("Couldn't find the class of method %s" % meth)
return ret
|
3e72ba44f0c783c9b05a6e5854512b08db07f8f5
| 322,904 |
def remove_comment(words):
"""remove comment string which starts with '#'."""
ret = []
for i in words:
if len(i) <= 0:
continue
if i.startswith("#"):
break
if i.find("#") != -1:
s = i.split("#")[0]
ret.append(s)
break
ret.append(i)
return ret
|
50fd30eb487de8097ca2ea154f93f5228314bbae
| 364,476 |
def parse_range_header(header_value, content_length):
"""
Returns the unit and a list of (start, end) tuples of ranges.
Raises ValueError if header is syntactically invalid or does not contain a range.
See spec for details: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
"""
unit = None
ranges = []
if '=' in header_value:
unit, byte_ranges_string = header_value.split('=')
# Parse the byte ranges.
for byte_range_string in byte_ranges_string.split(','):
byte_range_string = byte_range_string.strip()
# Case 0:
if '-' not in byte_range_string: # Invalid syntax of header value. # lint-amnesty, pylint: disable=no-else-raise
raise ValueError('Invalid syntax.')
# Case 1: -500
elif byte_range_string.startswith('-'):
first = max(0, (content_length + int(byte_range_string)))
last = content_length - 1
# Case 2: 500-
elif byte_range_string.endswith('-'):
first = int(byte_range_string[0:-1])
last = content_length - 1
# Case 3: 500-999
else:
first, last = byte_range_string.split('-')
first = int(first)
last = min(int(last), content_length - 1)
ranges.append((first, last))
if len(ranges) == 0:
raise ValueError('Invalid syntax')
return unit, ranges
|
4e66647ca8ad5b5f856fed41ee729b6c41e3883f
| 140,357 |
from typing import Optional
from typing import Sequence
from typing import Dict
import dataclasses
def asdict_filtered(obj, remove_keys: Optional[Sequence[str]] = None) -> Dict:
"""Returns the attributes of a dataclass in the form of a dict, with unwanted attributes removed.
Each config group has the term 'name', which is helpful in identifying the node that was chosen
in the config group (Eg. config group = optimizers, nodes = adam, sgd).
However, the 'name' parameter is not required for initializing any dataclasses. Hence it needs to be removed.
Args:
obj: The dataclass whose atrributes will be converted to dict
remove_keys: The keys to remove from the dict. The default is ['name'].
"""
if not dataclasses.is_dataclass(obj):
raise ValueError(f"Not a dataclass/dataclass instance")
if remove_keys is None:
remove_keys = ["name"]
# Clean the arguments
args = dataclasses.asdict(obj)
for key in remove_keys:
if key in args:
args.pop(key)
return args
|
d6b11b41a4ce7265b5cba870239dccf73dd6f330
| 65,388 |
def seconds_to_decimal(seconds):
"""Returns the number of seconds as a float.
Decimal places are cut off at two, with no rounding.
"""
decimal = str(round(seconds / 60 / 60, 4)).split(".")
if len(decimal[1]) > 2:
decimal[1] = decimal[1][:2]
return float(".".join(decimal))
|
f31d8a817270f2a43c74584bc1a91949a14a0ade
| 471,134 |
def pixel_shuffle_1d(x, upscale_factor):
"""
Performs a pixel shuffle on the input signal
:param x: The input tensor to be dimension shuffled
:param upscale_factor: The upsample factor
:return: The shuffled tensor
"""
batch_size, channels, steps = x.size()
channels //= upscale_factor
input_view = x.contiguous().view(batch_size, channels, upscale_factor, steps)
shuffle_out = input_view.permute(0, 1, 3, 2).contiguous()
return shuffle_out.view(batch_size, channels, steps * upscale_factor)
|
866f82de3b9de9c02d666417785a687e9c3401eb
| 96,165 |
def fixed_anchor_init(dim: int):
"""
Fixed anchors sizes for 2d and 3d
Args:
dim: number of dimensions
Returns:
dict: fixed params
"""
anchor_plan = {"stride": 1, "aspect_ratios": (0.5, 1, 2)}
if dim == 2:
anchor_plan["sizes"] = (32, 64, 128, 256)
else:
anchor_plan["sizes"] = ((4, 8, 16), (8, 16, 32), (16, 32, 64), (32, 64, 128))
anchor_plan["zsizes"] = ((2, 3, 4), (4, 6, 8), (8, 12, 16), (12, 24, 48))
return anchor_plan
|
ffa7f624772a081464b4c6781935fc33a72abf52
| 267,899 |
import torch
def Ux(diags, offdiags, x):
"""
Let U be an upper block-bidiagonal matrix whose
- diagonals are given by diags
- upper off-diagonals are given by offdiags
We would like to compute U@x
"""
n = diags.shape[0]
m = offdiags.shape[0]
# non-square matrix
if n == m:
return torch.einsum("ijk,ik...->ij", diags, x[:-1]) + torch.einsum(
"ijk,ik...->ij", offdiags, x[1:]
)
else:
leaf1 = torch.einsum("ijk,ik...->ij", diags, x)
leaf2 = torch.einsum("ijk,ik...->ij", offdiags, x[1:])
leaf_sum = leaf1[:-1] + leaf2
# remembering the final row of the matrix which just has a diagional element and not an upper diagional element
return torch.cat([leaf_sum, leaf1[-1].unsqueeze(0)], dim=0)
|
bb800ac00b6727254afeb223747cf4a7225778a1
| 350,110 |
def _float(text):
"""Return text as a float (PRIVATE)."""
if text is not None:
try:
return float(text)
except Exception:
return None
|
0ffab7e9ba5055f77f52665456552204a6d390bd
| 618,286 |
import hashlib
def _get_data_and_md5_loop(container, obj_hashkeys):
"""Get the MD5 of the data stored under the given container, one at a time in a loop.
:param container: a Container
:param obj_hashkeys: a list of object hash keys
:return: a dictionary where the keys are the object hash keys and the values are the MD5 hexdigests.
"""
retval = {}
for obj_hashkey in obj_hashkeys:
retrieved_content = container.get_object_content(obj_hashkey)
retval[obj_hashkey] = hashlib.md5(retrieved_content).hexdigest()
return retval
|
78c2337058e0df3f8fe79c26929f910c68ab0254
| 202,618 |
def call_statescript(hardware, function_num):
"""
Call a ECU StateScript method of index function_num
:param hardware: trodes.hardware object
:param function_num: the index of StateScript funciton defined in Trodes
:return message: the message sent from trodes (unpacked by msgpack.unpackb) to see if the calling is successful
"""
message = hardware.ecu_shortcut_message(function_num)
return message
|
030353f0b7732df081c76704770291f2c6b6353f
| 350,643 |
def getVIfromVIC(vicPage):
"""
Retrieves a VI image page name from a VIC page
Returns either a string or False (silently) in case of error.
Arguments:
vicPage: The VIC Page object
"""
try:
text = vicPage.get()
except:
return False
templates = vicPage.templatesWithParams()
for template in templates:
if template[0].find('VIC') == 0:
for param in template[1]:
if param.find('image') == 0:
imageName = param[6:len(param)]
imageName = imageName.lstrip().rstrip()
return "File:" + imageName
return False
|
8cf4ce7cf6b60f6c29ffa7da97036f207ef6754d
| 613,262 |
def geometric_expval(p):
"""
Expected value of geometric distribution.
"""
return 1. / p
|
3afb3adb7e9dafa03026f22074dfcc1f81c58ac8
| 5,647 |
def human_size(bytesize, p=1):
""" Return human-readable string of n bytes
Use p to set the precision
>>> human_size(42424242)
'40,5 MiB'
>>> human_size(42424242, 0)
'40 MiB'
>>> human_size(1024**3, 2)
'1024,00 MiB'
"""
i = 0
while bytesize > 1024:
bytesize /= 1024.0
i += 1
bytesize = (('%.' + str(p) + 'f') % bytesize).replace('.', ',')
return '%s %s' % (bytesize, ('b', 'KiB', 'MiB', 'GiB')[i])
|
c51e3dafd79752373b8e4f97bfab40f9d3acb644
| 168,820 |
def transform_keys(d, f):
"""Transform keys using a function"""
return {f(key): val for key, val in d.items()}
|
63c38c98314f477a824b3063e9979c74cdf75e62
| 546,897 |
def search_services(query, services, quiet):
"""
Search map services for the given query string.
query is a string to search for in featureclasses, databases, maps, or service names
services is a list of MapService objects to search through
quiet is a value in [0, 1, 2] that determines what to return:
0: all info
1: name and url
2: just url
returns a list of string representations of the matching MapService objects.
"""
cards = set()
for svc in services:
if (svc.uses_feature(query) or svc.uses_database(query) or
query.upper() in svc.mxd.upper() or query.upper() in svc.name.upper()):
if quiet >= 2:
cards.add(svc.veryquiet_repr())
elif quiet == 1:
cards.add(svc.quiet_repr())
else:
cards.add(repr(svc))
return list(cards)
|
df92cb9a2eee9425efb8ceaa9ad6a25b34d00b12
| 80,873 |
def get_any_of(getter, possible_keys, default=None):
"""Search for the value of any of `possible_keys` in `dictionary`, returning `default` if none are found.
>>> get_any_of( {"A":1}, ["C","D","A"], "UNDEFINED")
1
>>> get_any_of( {"X":1}, ["C","D","A"], "UNDEFINED")
'UNDEFINED'
"""
for key in possible_keys:
val = getter.get(key.upper(), None)
if val is None:
val = getter.get(key.lower(), None)
if val is not None and val not in ["undefined", "UNDEFINED"]:
return val
else:
return default
|
9a6f5612e00e1fed2734334f2d0c8d8aab628a3f
| 103,887 |
def configs_conflict(a, b):
"""Given two configurations, determine whether they overlap (i.e., have
nonzero parameters for at least one site in common).
"""
a_dict = dict(a)
b_dict = dict(b)
assert a_dict.keys() == b_dict.keys()
for ident, a_param in a_dict.items():
if a_param and b_dict[ident]:
return True
return False
|
16651a579b2a1f4d5b9d1f828987abf4884f6e82
| 258,087 |
def _feature_wrapper(f):
"""Wrapper for feature optimization.
This is a wrapper for use with multi-threaded versions.
Unfortunately Python threads are *terrible*, so this doesn't
actually get used.
Parameters
------
f : list
Array of inputs. f[0] is the name of the feature. f[1]
is the feature object itself. f[2] is N * fpumz (the
vector input to the feature during optimization). f[3]
is the ADMM parameter, rho.
Returns
-------
name : str
The name of the feature. (The same as the input.)
f_j : array
The array of fitted values returned by the feature.
"""
return f[0], f[1].optimize(f[2], f[3])
|
58bb3b126664930b352f4ea2a3a5171a352409d2
| 300,721 |
def get_total_signature_counts(base_array, sig_mask, counts):
"""
Total signature counts are the number of signature
query hits (where it was the only hit) for each
taxon times the number of times the query appeared
in the data.
"""
return ((base_array.T * sig_mask) * counts).T.sum(axis=0)
|
b4d1e1e39a2b6673b5c629c1af22e639192dffd5
| 253,104 |
import base64
def img_file_to_b64str(filename, urlsafe=False):
"""
Open an image file, and convert it into Base64 string.
This can help you store your image in a simple text blob of your SQL databases.
:param filename: The file you want to convert
:param urlsafe: Trigger using URL-Safe format. You'd better set it true if you want to post these string to an HTML page.
:return: Converted string.
"""
# Open image file as byte
file_byte = open(filename, 'rb').read()
if urlsafe:
file_base64 = base64.urlsafe_b64encode(file_byte)
else:
file_base64 = base64.standard_b64encode(file_byte)
file_base64_string = file_base64.decode('utf-8')
return file_base64_string
|
b3391a87803173dd1b5407b3f94d53c0bf09a735
| 239,778 |
def check_restrict(move_coord, restrict_list):
"""
Check move eligibility: if move candidate not on edge (restrict) or existing blocks (restrict)
:param move_coord: list
:param edge_list: list
:return: bool - True: eligible, False: ineligible
"""
return not bool(set(move_coord) & restrict_list)
|
a461c8b819164e797e3e231600eda3fbc1193ac3
| 155,100 |
def _normalize_paths(input_json):
"""Ensure that all paths are lower case (to prevent potential mis-matches,
since Windows is case-insensitive.
"""
normalized_json = input_json.copy()
for i in range(len(normalized_json['sources'])):
normalized_json['sources'][i] = normalized_json['sources'][i].lower()
return normalized_json
|
1c6bbe075e1409630a5b3778e6c2e86f4fe862ad
| 53,461 |
from typing import Pattern
from typing import Tuple
import re
def parse_vector(s: str, *, num_re: Pattern = re.compile(r"[\d.-]+")) -> Tuple[float, ...]:
"""Convert a vector string into a tuple."""
return tuple(map(float, num_re.findall(s)))
|
9221798ef280f9d82f3282071ea16affa441f3d6
| 631,121 |
def to_dict(et):
"""Convert Etree to dictionary
Values picked from node text - keys from tags.
"""
result = {}
for item in et:
if item.text:
result[item.tag] = item.text
else:
result[item.tag] = to_dict(item)
return result
|
8b6dafcce9410d677bb9212ffd057c2906680cc1
| 276,089 |
def constant_fn(val):
"""
Create a function that returns a constant
It is useful for learning rate schedule (to avoid code duplication)
:param val: (float)
:return: (function)
"""
def func(_):
return val
return func
|
19e3ef8b385b63d44f2c6695263c5f1049670e45
| 260,417 |
def filter_list(func, alist):
""" Filters a list using a function.
:param func: A function used for filtering.
:param alist: The list to filter.
:returns: The filtered list.
>>> from dautils import collect
>>> alist = ['a', 'a.color', 'color.b']
>>> collect.filter_list(lambda x: x.endswith('color'), alist)
['a.color']
"""
return [a for a in alist if func(a)]
|
f6ee47de5a3d0e892116e341050cf1b1bd7d9e71
| 439,385 |
def num_planets(tree):
"""Returns the total number of planets in the tree
:param tree: lxml etree
:returns: integer representing number of planets.
"""
return int(tree.xpath("count(.//planet)"))
|
653bda879ac5c944adec05d4b4a4c59691913288
| 334,455 |
def calculate_weights(layer_nodes):
"""
Calculate the number of weights required for a neural network,
where layer_nodes contains the number of nodes for each layer,
excluding bias units.
"""
n = len(layer_nodes)
total = 0
for i in range(n-1):
prev = layer_nodes[i]+1 # Add 1 to include bias unit
curr = layer_nodes[i+1]
total += prev*curr
print("Layer {}: d+1={} -> {} = {}".format(i+1,prev,curr,prev*curr))
return total
|
53da25069d4ce42ce14a1c7cd6f0752f6163dc0a
| 427,386 |
def Decompress(compmap,mask,emptymap):
"""
Decompressing map from 1D to 2D with missing values
:param compmap: compressed map
:param mask: mask map
:param emptymap: map with only 0 in
:return: decompressed 2D map
"""
dmap=emptymap.copy()
dmap[~mask.ravel()] = compmap[:]
dmap = dmap.reshape(mask.shape)
return dmap
|
01102274d8af291792d8745a93115c179fc23a93
| 310,162 |
def create_waveform_request(msg_id, time_range, station):
"""Create the text for a waveform request for the specified inputs"""
request_text = '''BEGIN GSE2.0
MSG_TYPE REQUEST
MSG_ID {msg_id}
TIME {time_range}
STA_LIST {station}
CHAN_LIST *
WAVEFORM GSE2.0
STOP
'''.format(msg_id=msg_id, time_range=time_range, station=station)
return request_text
|
d0f9c4aa24b8327186a79179d4a217cd0465fd14
| 344,087 |
def construct_package_url(base_url, dist, arch, sha256):
"""
Construct a package URL for a debian package using the 'by-hash' path.
See: https://wiki.debian.org/DebianRepository/Format#indices_acquisition_via_hashsums_.28by-hash.29
Example: http://us.archive.ubuntu.com/ubuntu/dists/bionic/by-hash/SHA256/
"""
return "{base_url}/dists/{dist}/binary-{arch}/by-hash/SHA256/{sha256}".format(
base_url = base_url,
dist = dist,
arch = arch,
sha256 = sha256,
)
|
acee4d440414fd65cca9d895857750dd3a4bee55
| 612,174 |
def _is_pkl_filepath(filepath):
"""Predicate the filepath is a pickle file."""
return filepath.endswith('.pkl')
|
f694c874d33fc413e4416fa0f005b6e810de3247
| 484,452 |
def longest_common_prefix(seq):
"""Find the longest common prefix between all items in sequence"""
seq0 = seq[0]
for i, seq0i in enumerate(seq0):
for j in seq:
if len(j) < i or j[i] != seq0i:
return i
return len(seq0)
|
409747d236f3cd6ee3e3139cf0624f9e437ad99c
| 159,098 |
def is_list(value):
"""
Tests the value to determine whether it is a list.
:param any value:
:return: True of the value is a list (an instance of the list class)
>>> is_list( 'Hello' )
False
>>> is_list( ['Hello'] )
True
"""
return isinstance(value, list)
|
bffc34bdfe9421418e36f303c3cd5afc7ed3c8dd
| 659,797 |
def _clean_alignment(row, decomp):
"""
Cleaning function for a pd.DataFrame to return the number
of components used in the decomposition.
Parameters
----------
alignment : pd.Series
A pd.Series object denoting the used alignment stimuli.
Must contain the substring provided in `decomp`
decomp : str
Must be a str in ['pca', 'srm']
"""
try:
decomp_n_comps, stim = row['alignment'].split(sep=' of ')
n_comps = decomp_n_comps[len(decomp):]
except ValueError: # Too many values to unpack
n_comps, stim = 'Full', row['alignment']
return {'n_comps': n_comps, 'stim': stim}
|
1cd15e47d3f471ad4d0d61f9e151b603a0d8b6c8
| 84,295 |
def get_resource_with_md5(project, status):
"""
Return a queryset of CodebaseResource from `project` that have `status` and
a non-empty size and md5.
"""
return (
project.codebaseresources.status(
status=status,
)
.exclude(md5__exact="")
.exclude(size__exact=0)
)
|
b431899f29bb8f865767c4c070dfbd37a124d431
| 480,844 |
import ipaddress
def validate_ip_address(address):
""" Validates an IP address """
try:
ip = ipaddress.ip_address(address)
is_valid = True
# print("IP address {} is valid. The object returned is {}".format(address, ip))
except ValueError:
is_valid = False
# print("IP address {} is not valid".format(address))
return is_valid
|
2d54fce0e4072007cd1efc1490cfef1fa03ac71b
| 581,516 |
def factory_class_name(model_class_name):
"""Return factory class name from model class"""
return model_class_name + 'Factory'
|
acfde8e129fb44f2db108a778b15938efbcc237b
| 22,671 |
def gen_run_entry_str(query_id, doc_id, rank, score, run_id):
"""A simple function to generate one run entry.
:param query_id: query id
:param doc_id: document id
:param rank: entry rank
:param score: entry score
:param run_id: run id
"""
return f'{query_id} Q0 {doc_id} {rank} {score} {run_id}'
|
657c59fea34e4aed2159337360c973dc99b53082
| 709,430 |
import re
def alphanum_key(string):
"""Return a comparable tuple with extracted number segments.
Adapted from: http://stackoverflow.com/a/2669120/176978
"""
convert = lambda text: int(text) if text.isdigit() else text
return [convert(segment) for segment in re.split('([0-9]+)', string)]
|
0e5e3f1d6aa43d393e1fb970f64e5910e7dc53fc
| 4,236 |
import math
def conditional(model, player, state_set, x):
"""
>>> from simple_model import *
>>> m = SimpleModel(0.1, 0.95)
>>> conditional(m, 0, set([(1, 1, 1)]), (1,1,0))
0.95
>>> conditional(m, 0, set([(1, 1, 1)]), (1,0,0))
0.0
>>> conditional(m, 0, get_satisfying_states(m), (1,0,0))
0.10000000000000002
>>> from thomas_model import *
>>> m = ThomasModel(0.1, 0.25)
>>> conditional(m, 0, get_satisfying_states(m), (1,1,1,0,0))
1.0
>>> conditional(m, 0, set([(1, 1, 1, 0, 0), (1, 1, 1, 0, 1), (1, 1, 1, 1, 1), (1, 1, 1, 1, 0)]), (1,1,1,0,0))
0.25000000000000017
>>> conditional(m, 0, set([(1, 1, 1, 0, 0), (1, 1, 1, 0, 1), (1, 1, 1, 1, 1), (1, 1, 1, 1, 0)]), (1,1,1,1,1))
1.0
>>> from extended_thomas_model import *
>>> m = ExtendedThomasModel(0.1, 0.25, 2)
>>> conditional(m, 0, get_satisfying_states(m), (1,1,0,0,0,0,0,0,0,0,0))
1.0
"""
possible_states = set(model.graphs[player][x])
state_set = state_set.intersection(possible_states)
norm = sum([model.probs[x] for x in possible_states])
if len(state_set) > 0:
p = sum([model.probs[x] for x in state_set])
p = math.exp( math.log(p) - math.log(norm) )
else:
p = 0.0
assert norm > 0
return p
|
3f5625f38b7246d6498d7320637e332ccd7ce693
| 100,765 |
import math
def get_nearest_location(x, places):
"""
Given a location `x`, and a list of locations, `places`,
returns the list index corresponding to the minimum distance,
and the minimum distance.
"""
min_dist, min_i = 1e10, 0
for i, place in enumerate(places):
name = place['name']
label_location = place['label_location']
latitude = float(label_location['latitude'])
longitude = float(label_location['longitude'])
r2 = (x[0] - latitude)**2 + (x[1] - longitude)**2
if r2 < min_dist:
min_dist = r2
min_i = i
return places[min_i], math.sqrt(min_dist)
|
251e97cc0ad2117f40e68e9690dfb6094e18205e
| 646,234 |
def PDFObjHasType(o, ty):
"""Return True if o, a PDF Object, has type ty."""
if o is None:
return False
return o[0] == ty
|
80df572170343174a19df80c36910f56be7f06b0
| 242,128 |
def __tokenize_text(text):
"""Convert text to lowercase, replace punctuation, remove words that are two character or less, and split the resulting string into a list.
>>> __tokenize_text('hi. I am, a, sentence.')
['sentence']
"""
text = text.lower()
punctuation = ',.<>:;"\'~`@#^*()-_+=|\\/?!&'
for p in punctuation:
text = text.replace(p, ' ')
words = text.split()
final_list = []
for word in words:
if len(word) > 2:
final_list.append(word)
return final_list
|
5c6af33377ac9443dbbe5f935773de85fc6748d8
| 501,447 |
def get_file_as_str(filename):
"""Read file and return ``str``."""
with open(filename, 'r') as f_in:
file_contents = f_in.read()
return file_contents
|
d447444d4df4a431180069e4d9d8f5a720e70f83
| 518,876 |
from pathlib import Path
def get_paths_from_file(filepath, must_exist=True):
"""Given a string filepath,, return the resolved path and parent."""
inpath = Path(filepath).expanduser().resolve()
if must_exist and not inpath.exists():
raise FileNotFoundError(filepath)
dirpath = inpath.parent
return inpath, dirpath
|
00dddeb1ad378037b5f4b2932660409c0d272040
| 206,918 |
def squash_duplicate_values(values):
"""Remove duplicates from values.
If a value has already been defined remove future values.
Args:
values (list): List of value tuples.
Returns:
values (list): List of value tuples with duplicated removed.
"""
tmp = {}
for item in values:
if item[0] not in tmp:
tmp[item[0]] = (item[1], item[2])
return [(key, tmp[key][0], tmp[key][1]) for key in tmp]
|
4183700c6009c44cfb52fbd3df692b1038287d10
| 371,342 |
def remove_items(headers, condition):
"""
Removes items from a dict whose keys satisfy
the given condition.
:param headers: a dict of headers
:param condition: a function that will be passed the header key as a
single argument and should return True if the header is to be removed.
:returns: a dict, possibly empty, of headers that have been removed
"""
removed = {}
keys = filter(condition, headers)
removed.update((key, headers.pop(key)) for key in keys)
return removed
|
81957656a66584dbf51a1d6d9fc8b5a47a17ec0b
| 472,991 |
def is_numeric(series):
""" Checks whether pandas series dtype is a float or integer.
Params:
series (pd.Series): Pandas series to check
Returns:
bool
"""
return series.dtype == 'float' or series.dtype == 'int'
|
f1b603acbda07bf558b111fd9a942b391b0c4fbc
| 235,130 |
import six
def _fetch_obj(node, subnodes):
"""Walk through nested dictionaries recursively,
retrieve the object at the requested location.
"""
assert isinstance(node, dict)
assert isinstance(subnodes, list)
assert len(subnodes) > 0
assert isinstance(subnodes[0], six.string_types)
if len(subnodes) == 1:
if subnodes[0] == '':
return node
else:
return node[subnodes[0]]
else:
return _fetch_obj(node[subnodes[0]], subnodes[1:])
|
d202f7852b0ca5794cb179e4bc6917a6ad8abe78
| 193,344 |
def updatedict(original, updates):
"""
Updates the original dictionary with items in updates.
If key already exists it overwrites the values else it creates it
Args:
original: original dictionary
updates: items to be inserted in the dictionary
Returns:
dict: updated dictionary
"""
for key, value in updates.items():
if key not in original or type(value) != type(original[key]):
original[key] = value
elif isinstance(value, dict):
original[key] = updatedict(original[key], value)
else:
original[key] = value
return original
|
4db020bf61051ae2af70d9d4d60c95a65bd8cd65
| 228,157 |
def fak(n):
""" Berechnet die Fakultaet der ganzen Zahl n. """
erg = 1
for i in range(2, n+1):
erg *= i
return erg
|
9df6f4fa912a25535369f4deb0a06baef8e6bdcc
| 2,994 |
def bmi_category(bmi):
"""
Function to calculate bmi category
Parameters:
bmi (float): BMI value
Returns:
bmi category (string)
"""
if bmi <= 18.4 :
return "Underweight"
elif bmi >=18.5 and bmi <= 24.9:
return "Normal weight"
elif bmi >=25 and bmi <= 29.9:
return "Overweight"
elif bmi >=30 and bmi <= 34.9:
return "Moderately obese"
elif bmi >= 35 and bmi <= 39.9:
return "Severely obese"
elif bmi > 40:
return "Very severely obese"
|
dfa982ba955b2e4710927786b0441c6d285df691
| 505,261 |
import math
def get_sample_size(population_size, confidence_level, confidence_interval):
""" Returns the approaprate sample size for a population.
Parameters
----------
population_size: int
Size of population.
confidence_level: float
Confidence Level.
confidence_interval: float
Confidence Interval.
Returns
-------
int
Sample Size
"""
Z = 0.0 # noqa
p = 0.5 # noqa
e = confidence_interval / 100.0 # noqa
N = population_size # noqa
n_0 = 0.0 # noqa
n = 0.0 # noqa
confidence_level_constant = (
[50, 0.67],
[68, 0.99],
[90, 1.64],
[95, 1.96],
[99, 2.57],
)
# LOOP THROUGH SUPPORTED CONFIDENCE LEVELS AND FIND THE NUM STD
# DEVIATIONS FOR THAT CONFIDENCE LEVEL
for i in confidence_level_constant:
if i[0] == confidence_level:
Z = i[1] # noqa
if Z == 0.0: # noqa
return -1
# CALC SAMPLE SIZE
n_0 = ((Z ** 2) * p * (1 - p)) / (e ** 2) # noqa
# ADJUST SAMPLE SIZE FOR FINITE POPULATION
n = n_0 / (1 + ((n_0 - 1) / float(N))) # noqa
return int(math.ceil(n))
|
e442ec2c50143f5b91bf56190f14b7578917e1bd
| 200,589 |
import re
def compile_masks(masks):
"""
Compiles a list of regular expressions.
:param masks: the regular expressions to compile
:type masks: list(str) or str
:returns: list(regular expression object)
"""
if not masks:
masks = []
elif not isinstance(masks, (list, tuple)):
masks = [masks]
return [
re.compile(mask)
for mask in masks
]
|
5650b79ee1a7a2525afa1ae445b89e4482bc4996
| 537,854 |
def _get_resource_loc(model_id):
"""returns folder_id and file_id needed to find location of edited photo"""
""" and live photos for version <= Photos 4.0 """
# determine folder where Photos stores edited version
# edited images are stored in:
# Photos Library.photoslibrary/resources/media/version/XX/00/fullsizeoutput_Y.jpeg
# where XX and Y are computed based on RKModelResources.modelId
# file_id (Y in above example) is hex representation of model_id without leading 0x
file_id = hex_id = hex(model_id)[2:]
# folder_id (XX) in above example if first two chars of model_id converted to hex
# and left padded with zeros if < 4 digits
folder_id = hex_id.zfill(4)[0:2]
return folder_id, file_id
|
12368df9345dd73ae69799d44b16dc988b48fd74
| 463,626 |
import json
def readJSONDict(filename):
"""
Retrieve stored dict from JSON file
"""
jsondict = {}
with open(filename, 'rb') as infile:
jsondata = infile.read()
jsondict = json.loads(jsondata)
return jsondict
|
7ce79e349538ff23409c5cebcce7317a1e2fe3ac
| 647,269 |
def true_false_converter(value):
"""
Helper function to convert booleans into 0/1 as SQlite doesn't have a boolean data type.
Converting to strings to follow formatting of other values in the input. Relying on later part of pipeline to change to int.
"""
if value == "True":
return '1'
elif value == "False":
return '0'
else:
return value
|
b81ac423f3b176e57cf9087c5bc241ff64fdcc85
| 684,973 |
import re
def _is_valid_slug( slug ):
""" Returns true if slug is valid. """
VALID_SLUG_RE = re.compile( "^[a-z0-9\-]+$" )
return VALID_SLUG_RE.match( slug )
|
4f3a428fbf3074eba6f990f60baec267c01b912e
| 521,290 |
def field_is_read_only(field_ir):
"""Returns true if the field is read-only."""
# For now, all virtual fields are read-only, and no non-virtual fields are
# read-only.
return field_ir.write_method.read_only
|
4969a6886138dda4b5104112dc9174443f8a860e
| 465,453 |
def calcule_promedios(matriz):
"""Función que recibe una matriz y devuelve una lista con los promedios de
cada fila"""
promedios = []
for lote in matriz:
promedio = 0
for item in lote:
promedio = promedio + item
promedio = promedio / len(lote)
promedios.append(promedio)
return promedios
|
6f3d971c5fc2f60b5984dcc3d98492c15d006968
| 131,900 |
def get_tokens_from_line(line):
"""
Given a line split it into tokens and return them.
Tokens are runs of characters separated by spaces. If there are no
tokens return an empty list.
Args:
line (str): line to convert to tokens
Returns:
list(str): The tokens
"""
# Does line have any content
if not line:
return []
# Does the line have any content after splitting it
line_tokens = line.split()
if not line_tokens:
return []
return line_tokens
|
59eb48127eecc04c3678e928ce6786feb2e98215
| 263,429 |
from typing import List
def find_min_conquered(lst: List[int]) -> int:
"""
Return the smallest value in <lst>. Can also be designed with parameters
b, and e, rather than slicing the lists in the current algorithm. O(lg(n))
>>> find_min_conquered([3, 4, 5, 6, 7])
3
>>> find_min_conquered([5, 6, 7, 3, 4])
3
>>> find_min_conquered([7, 3, 4, 5, 6])
3
>>> find_min_conquered([1, 2, 3, 4, 5, 6, 7, 8, 9])
1
>>> find_min_conquered([9, 12, 15, 17, 20, 1, 2, 3, 4, 5, 6, 7, 8])
1
>>> find_min_conquered([9, 12, 15, 1, 3, 4, 5, 6, 7, 8])
1
"""
n = len(lst)
if n < 2:
# then there is 1 item in the list.
return lst[0]
elif lst[0] < lst[-1]:
# then the list is not rotated
return lst[0]
elif lst[1] < lst[0]: # n == 2
# there are only 2 elements
return lst[1]
else:
# divide the list in half. Note: n is at least 3.
m = n // 2
if lst[m - 1] < lst[-1]:
# then the smallest must be before m
return find_min_conquered(lst[:m])
else:
return find_min_conquered(lst[m:])
|
7d95a17a5867866508b8aeb049dab83ee20118e7
| 566,591 |
def _build_rpc_profiling_key(
exec_type, func_name, current_worker_name, dst_worker_name
):
"""
Builds the key that RPC calls are profiled with using the autograd profiler.
This will be the name of the corresponding Event recorded in the profiler.
Args:
exec_type (RPCExecMode): Type of RPC/RRef call
func_name (str): Name of function being profiled.
current_worker_name (str): Name of current worker.
dst_worker_name (str): Name of the destination worker.
Returns:
String representing profiling key
"""
profile_key = "rpc_{rpc_type}#{func_name}({current_worker} -> {dst_worker})".format(
rpc_type=exec_type.value,
func_name=func_name,
current_worker=current_worker_name,
dst_worker=dst_worker_name,
)
return profile_key
|
257a34b6499c60773e8dd9a28eace374dfd10f1c
| 376,481 |
import six
def get_exception_message(exc):
"""Method to get exception message independent from python version.
"""
return exc.message if six.PY2 else str(exc.args[0])
|
6b20a3d6cfd89673d285f8d40195c9b3e18a7cfa
| 369,252 |
def get(template_url, session):
"""Gets the needed capabilities for the CloudFormation stack """
cfn_client = session.client('cloudformation')
template_details = cfn_client.get_template_summary(
TemplateURL=template_url)
try:
stack_capabilities = template_details['Capabilities']
except KeyError:
# May not be needed since it's not required when creating or updating a
# stack
stack_capabilities = []
return stack_capabilities
|
0596f539787d3b91109a1885a5a403f9124564af
| 196,721 |
def prod(itr, start=1) :
"""
Compute product between all elements of an iterable.
"""
val = start
for el in itr :
val *= el
return val
|
54080c6dd3471cbbbd2efdac125ec309c68bbe06
| 67,005 |
def _is_positive(integer_string):
"""
Check if a string is a strictly positive integer.
"""
return int(integer_string) > 0
|
10d1f93b544031e75364d5fe74c48ab76c030a06
| 560,293 |
import collections
def stringify(metrics_headers=()):
"""Convert the provided metrics headers to a string.
Iterate over the metrics headers (a dictionary, usually ordered) and
return a properly-formatted space-separated string
(e.g. foo/1.2.3 bar/3.14.159).
"""
metrics_headers = collections.OrderedDict(metrics_headers)
return ' '.join(['%s/%s' % (k, v) for k, v in metrics_headers.items()])
|
8b6214a0cfa5540d3515bc1e1a00e0970b4246b4
| 188,737 |
def is_js_registered(app, filename):
"""
Checks whether a given js file has been added to the Sphinx register.
"""
for js_file, _ in app.registry.js_files:
if filename == js_file:
return True
return False
|
dc80ae6852d16c8a75765fbd9bbfd8522b07ef31
| 147,796 |
def _after_name(method_name):
# type: (str) -> str
"""Return the name of after check method.
>>> _after_name('read')
'can_read__after'
"""
return 'can_' + method_name + '__after'
|
1515c535fd268d966f358b4b30b3325e744b8bba
| 431,292 |
def remove_list_redundancies(l):
"""
Used instead of list(set(l)) to maintain order
Keeps the last occurance of each element
"""
reversed_result = []
used = set()
for x in reversed(l):
if x not in used:
reversed_result.append(x)
used.add(x)
reversed_result.reverse()
return reversed_result
|
f36015841a6da69eca831535cdb86b7b1b73831d
| 169,537 |
def shift_entries(lst, idx, shift):
"""Shift select entries by [shift] """
return [int(x)+shift if i in idx else x for i, x in enumerate(lst)]
|
0308860aa4c33d9e6b65887a0e411cc0a7835b29
| 148,737 |
def extended_capwords(address: str, exceptions: list) -> str:
"""Capitalize words excluding indicated expressions.
Address points very often contain suffixes
or Roman numerals that shouldn't be capitalized.
Args:
address (str): A phrase to capitalize.
exceptions (list): A list of expressions that shouldn't be capitalized.
Returns:
str: An address with capitalized words.
"""
words = address.split()
capitalized = []
for word in words:
if word not in exceptions:
word = word.capitalize()
capitalized.append(word)
return ' '.join(capitalized)
|
f4d7cb9a98bfff0f5cf793b32a8ffffa2cc6439c
| 256,986 |
def _unpad_data(data: bytes) -> bytes:
"""
Removes padding from the data according to PKCS7 standard and returns data such that
len(new_data) = len(data) - pad_len
:param data: the data to unpad
:return: the original data without padding
"""
return data[:-data[-1]]
|
f791643ecc3ca3384f21759a4e66615dd81edbcf
| 226,244 |
def _is_ingredient_heading_1(line):
"""Returns True for the first heading line of ingredients section."""
return line.strip().lower() == 'amount measure ingredient -- preparation method'
|
81c9a552087b20c6138e303ff833a96d1d01c3fc
| 639,298 |
def column(lexdata, lexpos):
""" Calculate the (1-indexed) column number given the input data and the current position. """
last_cr = lexdata.rfind('\n', 0, lexpos)
if last_cr < 0:
return lexpos + 1
else:
return lexpos - last_cr
|
3fc081d7fe984eb577f13d9457d96c65f784722d
| 569,207 |
def traceset2xy(tset, xpos=None, ignore_jump=False):
"""Convert from a trace set to an array of x,y positions.
Parameters
----------
tset : :class:`TraceSet`
A :class:`TraceSet` object.
xpos : array-like, optional
If provided, evaluate the trace set at these positions. Otherwise
the positions will be constructed from the trace set object iself.
ignore_jump : bool, optional
If ``True``, ignore any jump information in the `tset` object
Returns
-------
:func:`tuple` of array-like
The x, y positions.
"""
return tset.xy(xpos, ignore_jump)
|
ffc309e185dbe536e7574798cf2162aaaf24ecd1
| 317,154 |
import textwrap
def format_code(source_code):
"""
Format source code
:param source_code: Source code as str
:return: Formatted source code as str
"""
return textwrap.dedent(source_code)
|
6f058188d2551eb2888d831e6dffc19fcbf500c3
| 538,202 |
import math
def _rescale_read_counts_if_necessary(n_ref_reads, n_total_reads,
max_allowed_reads):
"""Ensures that n_total_reads <= max_allowed_reads, rescaling if necessary.
This function ensures that n_total_reads <= max_allowed_reads. If
n_total_reads is <= max_allowed_reads, n_ref_reads and n_total_reads are just
returned. However, if n_total_reads > max_allowed_reads, then n_ref_reads and
n_total_reads are rescaled to new values n_ref_reads' and n_total_reads' so
that n_total_reads' == max_allowed_reads and n_ref_reads' / n_total_reads' ~
n_ref_reads / n_total_reads.
Args:
n_ref_reads: int. Number of reference supporting reads.
n_total_reads: int. Total number of reads.
max_allowed_reads: int. The maximum value allowed for n_total after
rescaling, if necessary.
Returns:
New values for n_ref_reads and n_total_reads.
"""
if n_total_reads > max_allowed_reads:
ratio = n_ref_reads / (1.0 * n_total_reads)
n_ref_reads = int(math.ceil(ratio * max_allowed_reads))
n_total_reads = max_allowed_reads
return n_ref_reads, n_total_reads
|
d09b343cee12f77fa06ab467335a194cf69cccb4
| 6,521 |
def _from_rgb(rgb):
"""translates an rgb tuple of int to a tkinter friendly color code
"""
return "#%02x%02x%02x" % rgb
|
464cab5007935824b8b7af28e97cc882ec0653ee
| 33,016 |
from bs4 import BeautifulSoup
def add_blockquote_class(soup_body: object) -> object:
"""For each blockquote in the input bs4 object, change the class to 'blockquote text-muted' so that the bootstrap
framework blockquote styling will be applied.
Args:
soup_body: bs4 object input
Returns:
modified bs4 object
"""
if not isinstance(soup_body, BeautifulSoup):
raise TypeError('Input must be a bs4.BeautifulSoup object')
for bq in soup_body.find_all('blockquote'):
bq['class'] = 'blockquote'
for p in bq.find_all('p'):
for dash in ['-- ', '--- ', '– ', '— ', '– ', '— ']:
if p.string[:len(dash)] == dash:
bqf = soup_body.new_tag('footer')
bqf['class'] = 'blockquote-footer'
bqf.string = p.string.replace(dash, '')
p.replace_with(bqf)
return soup_body
|
a80578fa735fb31a81d4a2e17fab2d56fa4b8c54
| 309,502 |
def tabState(state):
"""Returns css selector based on tab state"""
return 'active' if state else ''
|
2db0114c6e91936f880e3dfe2ca959ccdd5e9644
| 563,600 |
def qa(country, uv):
"""
Do qa on uv-index value:
Rule:
0 <= uv < 12 --> pass (P)
12 <= uv < 17 --> doubtful (D)
>= 17 --> error (E)
all else --> not applicable (NA)
"""
if not isinstance(uv, float):
return 'NA'
if 0 <= uv < 12:
return 'P'
elif 12 <= uv < 17:
return 'D'
elif uv >= 17:
return 'E'
else:
return 'E'
|
680c5cf21afbf7ab54e10c0b0bb0f05daea2873c
| 523,589 |
def expected_workload(L,p):
"""
Return expected number of votes that need to be counted.
L = input list of precincts: (size,name) pairs.
p = input list of auditing probabilities.
"""
return sum([p[i]*L[i][0] for i in range(len(L))])
|
33434476c95780660ccef05d1de3f30ee429ffdd
| 116,045 |
def is_pixel_inside(dim, coord):
"""
Check if the pixel coordinate is inside the image
"""
if (len(dim)<2) or (len(coord)<2):
raise Exception("Dimensions should be >= 2! Check!")
if (0<=coord[0]<=dim[0]) and (0<=coord[1]<=dim[1]):
return True
else:
return False
|
337de0143a35f06a0e86019901cb9a670b4b074d
| 90,682 |
def is_palindrome(input_string):
"""
Checks if a string is a palindrome.
:param input_string: str, any string
:return: boolean, True if palindrome else False
>>> is_palindrome("madam")
True
>>> is_palindrome("aabb")
False
>>> is_palindrome("race car")
False
>>> is_palindrome("")
True
"""
if input_string is None or len(input_string) == 0:
return True
if input_string[0] != input_string[-1]:
return False
return is_palindrome(input_string[1:-1])
|
165df98dd983a2d84ad30bafbb70168d9599bd8d
| 47,687 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.