content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def blueScaleForPPMsize( ppmSize ):
"""
Returns blueZone value for given PPM size,
up to which overshoots will be suppressed.
"""
return (float(ppmSize) - 2.04) / 1000.0
|
fa558d5a0e7e7c2066f1d3bc1eabf1b0fd1eea33
| 695,071 |
def UrlEscape(url):
"""Scapes XML entities.
Args:
url: potentially with XML invalid characters.
Returns:
same URL after replacing those with XML entities.
"""
return url.replace("'", "'").replace("'", """)
|
f51933904238114bd6b5e135d1b27710aa3858c8
| 695,072 |
def format_orbit_notes(text: str) -> str:
"""Given orbit notes returns them as HTML"""
template = '<div class="orbit-notes"><p>{}</p></div>'
html_text = text.replace("\n", "</p><p>")
return template.format(html_text)
|
259ede244822cec07a3abff2f566643d8b0d117e
| 695,074 |
def remove_empty_keys(dirty_dict):
"""
Remove empty keys from a dictionary. This method is useful when passing jsons
in which a null field will update the value to null and you don't want that.
"""
clean_dict = {}
for k, v in dirty_dict.items():
if v:
clean_dict[k] = v
return clean_dict
|
53e6dbd89640ba3c3529a055871f11093af6897a
| 695,076 |
def _ValidateConfigFile(config_contents, required_parameters):
"""Validates the config file contents, checking whether all values are
non-empty.
Args:
config_contents: A config dictionary.
required_parameters: A list of parameters to check for.
Returns:
True if valid.
"""
for parameter in required_parameters:
if parameter not in config_contents:
return False
value = config_contents[parameter]
if not value or type(value) is not str:
return False
return True
|
a115792ea1b4087f50e78718496874ea24debbf6
| 695,078 |
def dictmerge(D, others):
"""
Merge a dictionary with other dictionaries.
**Parameters**\n
D: dict
Main dictionary.
others: list/tuple/dict
Other dictionary or composite dictionarized elements.
**Return**\n
D: dict
Merged dictionary.
"""
if type(others) in (list, tuple): # Merge D with a list or tuple of dictionaries
for oth in others:
D = {**D, **oth}
elif type(others) == dict: # Merge D with a single dictionary
D = {**D, **others}
return D
|
a082416be21c998021decec68edbc336fc8382cd
| 695,079 |
def _select_features(example, feature_list=None):
"""Select a subset of features from the example dict."""
feature_list = feature_list or ["inputs", "targets"]
return {f: example[f] for f in feature_list if f in example}
|
3c8c0363b45ca0d6f44642979c30fa1854371b41
| 695,098 |
import re
def parse_age(age):
"""
Convert a human-friendly duration string into an ISO 8601 duration
Parameters
----------
age : str
Returns
-------
str
"""
m = re.fullmatch(r"(\d+)\s*(y(ear)?|m(onth)?|w(eek)?|d(ay)?)s?", age, flags=re.I)
if m:
qty = int(m.group(1))
unit = m.group(2)[0].upper()
return f"P{qty}{unit}"
else:
raise ValueError(age)
|
d4df09570d080172db5388ae7e06c2f36c264e6e
| 695,099 |
from typing import Dict
from typing import Tuple
def create_default_domain_param_map_omo() -> Dict[int, Tuple[str, str]]:
"""
Create the default mapping from indices to domain parameters (as used in the `BayRn` algorithm).
:return: `dict` where the key is the index and the value is a tuple of domain parameter and the associated domain
distribution parameter
"""
return {
0: ("m", "mean"),
1: ("m", "std"),
2: ("k", "mean"),
3: ("k", "std"),
5: ("d", "mean"),
6: ("d", "std"),
}
|
262124901fc40d54477235d8b75776ed27cf3108
| 695,101 |
import yaml
def print_yml(analysis: dict):
"""
Converts the any analysis dictionary into YML output
"""
return yaml.dump(
analysis,
allow_unicode=True,
default_flow_style=False,
)
|
fc5e9bd6693fa3a3b637e0f472287e2423ff9678
| 695,104 |
def post(path):
"""
@post 装饰器
>>> from transwarp.web import post
>>> @post('/post/:id')
... def testpost():
... return '200'
...
>>> testpost.__web_route__
'/post/:id'
>>> testpost.__web_method__
'POST'
>>> testpost()
'200'
"""
def _decorator(func):
func.__web_route__ = path
func.__web_method__ = 'POST'
return func
return _decorator
|
25b8c9e6795cceae753f4a3a96c214f0f3c23cad
| 695,110 |
def depth_first(start, children_func):
"""Return a depth-first traversal of a tree.
Args:
start: the root of the tree.
children_func: function taking a node to its sequence of children.
Returns:
a list of nodes in depth-first order
"""
seen = set()
result = []
def traversal(node):
if node in seen:
return
seen.add(node)
for i in children_func(node):
traversal(i)
result.append(node)
traversal(start)
return result
|
027577a6df00898b1349dc3a780ee730a7365e6c
| 695,113 |
def indent(level):
"""For use as a mako filter.
Returns a function that indents a block of text to the provided level.
"""
def indent_text_to_level(text, level):
result = ""
indentation = level * " "
for line in text.splitlines(True):
result = result + indentation + line
return result
return lambda text: indent_text_to_level(text, level)
|
942785a20693a5bada69f775c3ecb6ce82decaf4
| 695,115 |
def getBBHeight(bb):
"""
**SUMMARY**
(Dev Zone)
Get height of the bounding box
**PARAMETERS**
bb - Bounding Box represented through 2 points (x1,y1,x2,y2)
**RETURNS**
height of the bounding box
"""
return bb[3]-bb[1]+1
|
250500f66b19c292e3f1a287ed08bb645539da57
| 695,116 |
def get_sentiment_emoji(sentiment):
"""Returns an emoji representing the sentiment score."""
if sentiment == 0:
return ":neutral_face:"
elif sentiment > 0:
return ":thumbsup:"
else: # sentiment < 0:
return ":thumbsdown:"
|
5e58199faf0fb846c40417e7b9ffa1a3701d82f6
| 695,122 |
from typing import Any
def useful_tree_predicate(
y: Any, raw_predictions: Any, previous_loss: float, current_loss: float
) -> bool:
"""This predicated tells whether `current_loss < previous_loss`.
This implies that only usefull trees (the ones that lower the
overall loss) will be added to the ensemble. Trees that increase the
loss will be discarded.
"""
return current_loss < previous_loss
|
2ff691951454413a9c446d3c06c3cf61fa2f92b4
| 695,123 |
def repr_args(args, kwargs):
"""Stringify a set of arguments.
Arguments:
args: tuple of arguments as a function would see it.
kwargs: dictionary of keyword arguments as a function would see it.
Returns:
String as you would write it in a script.
"""
args_s = ("{}, " if kwargs else "{}").format(", ".join(map(repr, args))) if args else "" # noqa
kws = ", ".join(["{}={!r}".format(it[0], it[1]) for it in kwargs.items()])
return str(args_s) + str(kws)
|
f1d6f1645bc0997c4f0d0360ab9e43da129b8415
| 695,124 |
import random
def create_random_list(size=10):
"""
Generates of random list with size, made of random integers
:param size: Size of the list to generate
:return: List of integers with random data (based on size of list)
"""
mainList = []
for i in range(size):
mainList.append(random.choice(range(size))) # append random integers based on 'size'
return mainList
|
853caee84236f2469673cd0d3899e49284f3154f
| 695,126 |
def checksums2dict(checksums: list) -> dict:
"""
Converts a list of checksums to a dict for easier look up of a block
:param checksums: tuple of checksums
:return: dictionary of {checksum: index}
"""
result = {}
for index, checksum in enumerate(checksums):
if checksum not in result:
result[checksum] = index
return result
|
e0d9fbcad5444a0f4cf4097a2ef2494828c53415
| 695,131 |
def get_extension(path):
"""Return the extension after the last '.' in a path. """
idx = path.rfind('.')
if idx >= 0:
return path[idx + 1:]
return ''
|
ece0a1dc19b8a2b91294bce95ac65e7507c6640e
| 695,134 |
def get_datetime(filename):
"""Reads and generates datetime
Youtube compatible datetime format"""
# Opening and reading the date file
file = open(filename, "r+")
date = file.read()
# Appending 0 to date if less than 10
if int(date) < 10:
date = "0" + date
# Youtube compatible datetime format
datetime = "2021-08-" + date + "T00:00:00Z"
# Closing the file
file.close()
return datetime
|
f7b299b98ee7a98714bb1a71187b2863c30aedca
| 695,139 |
def balanced ( expression , left = '([' , right = ')]' ) :
"""Simple utility to check balanced parenthesis/brackets, etc...
>>> expression = ' .... '
>>> ok = balanced ( expression )
"""
assert left and len(left) == len ( right ) ,\
'balanced: invalid left/right arguments!'
stack = []
for i in expression :
if i in left : stack.append ( i )
elif i in right :
pos = right.index ( i )
if stack and left[ pos ] == stack [ -1 ] :
stack.pop()
else :
return False
return True if not stack else False
|
79c3dc4ec41f063e96783501597009ce9b2f3d14
| 695,142 |
def parser_of_connection(data):
""" Parse the special measurement id for connection events
Args:
data (dict): see the following example
{u'msm_id': 7000, u'timestamp': 1470134587, u'prefix': u'80.100.0.0/15', u'event': u'disconnect',
u'controller': u'ctr-ams07', u'prb_id': 15093, u'type': u'connection', u'asn': 3265}
Returns:
dict, event name as key, either disconnect or connect, timestamp as value
"""
event = data.get('event', None)
tstp = data.get('timestamp', None)
return {str(event): tstp}
|
c05c7d77c38739b3593119f2fc3d6e8ff129645a
| 695,149 |
def auto_raytracing_grid_resolution(source_fwhm_parcsec, grid_resolution_scale=0.0002, ref=10., power=1.):
"""
This function returns a resolution factor in units arcsec/pixel appropriate for magnification computations with
finite-size background sources. This fit is calibrated for source sizes (interpreted as the FWHM of a Gaussian) in
the range 0.1 -100 pc.
:param source_fwhm_parcsec: the full width at half max of a Gaussian background source
:return: an appropriate grid resolution for finite-size background magnification computation
"""
grid_resolution = grid_resolution_scale * (source_fwhm_parcsec / ref) ** power
return grid_resolution
|
14891a9a79f4a8efe0d41ac15b4b8879016ba92f
| 695,150 |
def pad_bits(bit_list, desired_size):
"""
Adds 0's to the head of bit_list so that bit_list has a length equal to desired_size. In other
words, adds [desired_size - len(bit_list)] 0's to the bit_
Args:
bit_list: a list of 0's and 1's
desired_size: an integer representing the desired size of the bit list
Returns:
The input bit_list with leading 0's inserted so that bit_list is of size desired_size.
Raises:
ValueError: The desired size of the padded list is smaller than the binary to be padded
"""
if len(bit_list) > desired_size:
raise ValueError("Binary number is larger than desired size!")
num_zeros_needed = desired_size-len(bit_list)
padded_list = [0] * (num_zeros_needed) + bit_list
return padded_list
|
3b886b2b5d3a91129d9b2b889bc588064b7f09c8
| 695,151 |
def strip_paths(paths):
"""
Remove repeated edges
"""
res_all = []
for path in paths:
res = []
for node in path:
if len(res) < 2:
res.append(node)
continue
if node == res[-2]:
res.pop()
continue
else:
res.append(node)
res_all.append(res)
return res_all
|
cba6867eb337ccea57ec9ab49b42b394c45d07c0
| 695,152 |
def expand_boundaries(boundaries, start_boundary, expansion):
"""Push boundaries out by exapnsion from starting boundary"""
return boundaries[:start_boundary] + [b + expansion for b in boundaries[start_boundary:]]
|
aef59860765e58570f03396aa8c0916a8ff85ff0
| 695,153 |
import re
def parse_map_file(path):
"""
Parse libgccjit.map, returning the symbols in the API as a list of str.
"""
syms = []
with open(path) as f:
for line in f:
m = re.match('^\s+([a-z_]+);$', line)
if m:
syms.append(m.group(1))
return syms
|
b0f78cf1a7ebe45ae845fbacef3b7712a9d53fdc
| 695,155 |
def drop_outliers_quantile(df, upper=0.99, lower=0):
"""
Drop Outliers by Quantiles
Deletes observations classified as outliers. The classification method
analyzes if the observation is out of the established quantile bounds.
Parameters
----------
df : pandas DataFrame
DataFrame to be cleaned. `df` must contain only nummerical values.
upper : float
Upper quantile boundary. Float value between 0 and 1. Must be bigger
than `lower`. Default value is 0.99.
lower : float
Lower quantile boundary. Float value between 0 and 1. Must be smaller
than `upper`. Default value is 0.
Returns
-------
pandas DataFrame
"""
n_initial_rows = df.shape[0]
drop_list = set()
quant_upper = df.quantile(upper)
quant_lower = df.quantile(lower)
print('-' * 25)
print('OUTLIERS DELETION: QUANTILE METHOD\n')
for el in df.columns.values:
drop_list = drop_list | \
set(df[el][df[el] > quant_upper[el]].index.values) | \
set(df[el][df[el] < quant_lower[el]].index.values)
drop_list = list(set(drop_list))
count = len(drop_list)
df.drop(drop_list, inplace=True)
print('Lower quantile: {} | Upper quantile: {}'.format(lower, upper))
print('N of deleted rows: {} | % of deleted rows: {}%'.format(
count, round(100 * (count / n_initial_rows), 3)))
return df
|
cf2e9493790e879d43f1ace33ca25b981ff8a546
| 695,158 |
def get_my_env(app):
"""
Gets the env name of the currently running environment
:param app: handle to Pyramid app
:return: current env
"""
# Return value is presumably one of the above-declared environments
return app.registry.settings.get('env.name')
|
43570747b0592a1d4e0f9890ddfd55c265bccc25
| 695,167 |
import re
def re_contains(a, b):
"""Return True if a regex search with pattern b yields a match in a
Args:
a (str): Pattern to search
b (str): Regex pattern to use in search
Returns:
result (bool): Whether b contains a or not.
"""
try:
regexp = re.compile(b, flags=re.IGNORECASE)
except(TypeError):
raise TypeError('Value must be a string that can be compiled to regex expression')
return bool(re.search(regexp, a))
|
ca49393fab219c97d5c54190961b1973aca70582
| 695,172 |
def merge_resources(resource1, resource2):
"""
Updates a copy of resource1 with resource2 values and returns the merged dictionary.
Args:
resource1: original resource
resource2: resource to update resource1
Returns:
dict: merged resource
"""
merged = resource1.copy()
merged.update(resource2)
return merged
|
e3b2f27e29fb773b119ad22ab89b297e0425a65d
| 695,178 |
def personal_top_three(scores):
"""
Return the top three scores from scores.
If there are fewer than three scores, return the scores.
param: list of scores
return: highest three scores from scores.
"""
# Sort the scores in descending order
scores.sort(reverse=True)
if len(scores) < 3:
return scores
# Return the first three
return [scores[0], scores[1], scores[2]]
|
846e773d72c8fca14f6dca8cd11c4a96f1d31cb9
| 695,181 |
def simple_closure(s, implications):
"""
Input: A set of implications and an attribute set s
Output: The closure of s with respect to implications
Examples
========
>>> from fca.implication import Implication
>>> cd2a = Implication(set(('c', 'd')), set(('a')))
>>> ad2c = Implication(set(('a', 'd')), set(('c')))
>>> ab2cd = Implication(set(('a', 'b')), set(('c', 'd')))
>>> imps = [cd2a, ad2c, ab2cd]
>>> print simple_closure(set('a'), imps)
set(['a'])
>>> print simple_closure(set(), imps)
set([])
>>> simple_closure(set(['b', 'c', 'd']), imps) == set(['a', 'b', 'c', 'd'])
True
>>> a2bc = Implication(set(('a')), set(('b', 'c')))
>>> ce2abd = Implication(set(('c', 'e')), set(('a', 'b', 'd')))
>>> de2abc = Implication(set(('d', 'e')), set(('a', 'b', 'c')))
>>> cd2abe = Implication(set(('c', 'd')), set(('a', 'b', 'e')))
>>> imps = [a2bc, ce2abd, de2abc, cd2abe]
>>> simple_closure(set(['b', 'a']), imps) == set(['a', 'b', 'c'])
True
>>> simple_closure(set(['a', 'e']), imps) == set(['a', 'b', 'c', 'd', 'e'])
True
>>> imps = [ce2abd, a2bc, de2abc, cd2abe]
>>> simple_closure(set(['a', 'e']), imps) == set(['a', 'b', 'c', 'd', 'e'])
True
"""
unused_imps = implications[:]
new_closure = s.copy()
changed = True
while changed:
changed = False
for imp in unused_imps:
if imp.premise <= new_closure:
new_closure |= imp.conclusion
changed = True
unused_imps.remove(imp)
return new_closure
|
05ff32be462b5949bb1ff62917c28d32a05cde84
| 695,185 |
def _draw_mask_on_image(src_image, mask):
"""
Draw a mask on an image.
Parameters
----------
src_image : np.ndarray
Image.
mask : np.ndarray
Mask.
Returns
-------
np.ndarray
Image with mask.
"""
dst_image = src_image.copy()
dst_image_g = dst_image[:, :, 1]
dst_image_g[mask <= 127] = 255
dst_image_b = dst_image[:, :, 0]
dst_image_b[mask > 127] = 255
return dst_image
|
01994bc50219c548ab457c63033ef59beffef5e9
| 695,189 |
import math
def n(i,pv,fv,pmt):
"""Calculate the number of periods in an annuity"""
n = math.log((fv*i+pmt)/(pv*i+pmt)) / math.log(1+i)
return n
|
b7219ae4d69f059bf7c0b34e7101bcbcdb991914
| 695,192 |
def _get_variables(formula, type_):
"""Finds all the variables in the formula of the specific pysmt type.
Args:
formula (FNode): The pysmt formula to examine.
type_: The pysmt type to find (e.g: REAL, BOOL).
Returns:
set(FNode): The set of all the variables in the formula of the specific type.
"""
return {a for a in formula.get_free_variables() if a.get_type() == type_}
|
493a8ac1cb47f1d854acf92dda92ef71986113da
| 695,195 |
def prompt(prompt_message: str = "Enter a string", default: str = "") -> str:
"""
Prints a message and waits for user input.
Args:
prompt_message: string to be printed
default: string to be returned if no value is entered
Returns:
string entered or default value
"""
default = str(default)
try:
if default != "":
in1 = input(prompt_message + " [" + default + "]: ")
else:
in1 = input(prompt_message + ": ")
except KeyboardInterrupt:
return ""
if in1 == "":
return default
else:
return in1
|
0b8de5d4c3886de947887525684ed40a952d200e
| 695,197 |
def detect_combiner(env):
"""Detect if mkdocscombine exe is detected on the system, or use user specified option"""
if 'Mkdocs_Combine' in env:
return env.Detect(env['Mkdocs_Combine'])
else:
return env.Detect('mkdocscombine')
|
9ab1237d835d5c03ab232eb66e9f99cb8b8b35b3
| 695,201 |
def escape_html(string: str, quote=True) -> str:
"""
Replace special characters "&", "<" and ">" to HTML-safe sequences.
:param string: the string
:param quote: If the optional flag quote is true (the default), the quotation mark characters, both double quote (") and single quote (') characters are also translated.
:return: the escaped string
"""
string = string.replace("&", "&") # Must be done first!
string = string.replace("<", "<")
string = string.replace(">", ">")
if quote:
string = string.replace('"', """)
string = string.replace("'", "'")
return string
|
a21a40e658cc09b4d24a94e00a54c97c6f135a80
| 695,205 |
def blank_if_zero(n):
"""Return str(n) unless n is zero, in which case return ""."""
if n == 0:
return ""
return str(n)
|
d2249e29c43a05efa9bee6b380b5c8636f4c0dd9
| 695,206 |
def remove_null_values(data):
""" Removed null value
Arguments:
data {dict}: data
Returns:
data {dict}: update data
"""
return {k: v for k, v in data.items() if v is not None}
|
702c0c308aaf9b7926535121eea7de0383412634
| 695,209 |
def format_epoch_score(epoch: int, loss: float) -> str:
"""Formats the results obtained at the end of an epoch in
a string used for logging.
Args:
epoch (int): current epoch
loss (float): current associated loss
Returns:
str: formatted string
"""
return f"Epoch {epoch}: {loss}"
|
fc2c816ca32c7e6e9d899bc89757d9a5424e7f00
| 695,211 |
import unicodedata
def is_number(string: str) -> bool:
"""See if a given string is a number (int or float)"""
try:
float(string)
return True
except ValueError:
pass
try:
unicodedata.numeric(string)
return True
except (TypeError, ValueError):
pass
return False
|
e71ecaa10b619b6eac34001cc2f964d3b1d993f8
| 695,213 |
from typing import Optional
def validate_opex_per_capacity(opex_per_capacity: Optional[float]) -> Optional[float]:
"""
Validates the opex per capacity of an object.
Opex per capacity is always optional.
:param opex_per_capacity: The opex per capacity of the object.
:return: The validated opex per capacity.
"""
if opex_per_capacity is None:
# Skip validation if no value provided
return None
if opex_per_capacity < 0:
raise ValueError("Opex per capacity must be zero or positive.")
return opex_per_capacity
|
a9297cd0bb20a0ddcd37d946152b032d9938cf66
| 695,214 |
def get_structure_from_prev_run(vasprun, outcar=None):
"""
Process structure from previous run.
Args:
vasprun (Vasprun): Vasprun that contains the final structure
from previous run.
outcar (Outcar): Outcar that contains the magnetization info from
previous run.
Returns:
Returns the magmom-decorated structure that can be passed to get
Vasp input files, e.g. get_kpoints.
"""
structure = vasprun.final_structure
site_properties = {}
# magmom
if vasprun.is_spin:
if outcar and outcar.magnetization:
site_properties.update({"magmom": [i['tot']
for i in outcar.magnetization]})
else:
site_properties.update({"magmom": vasprun.parameters['MAGMOM']})
# ldau
if vasprun.parameters.get("LDAU", False):
for k in ("LDAUU", "LDAUJ", "LDAUL"):
vals = vasprun.incar[k]
m = {}
l_val = []
s = 0
for site in structure:
if site.specie.symbol not in m:
m[site.specie.symbol] = vals[s]
s += 1
l_val.append(m[site.specie.symbol])
if len(l_val) == len(structure):
site_properties.update({k.lower(): l_val})
else:
raise ValueError("length of list {} not the same as"
"structure".format(l_val))
return structure.copy(site_properties=site_properties)
|
c6d272d2bff74b408e1a16e758db975837822e79
| 695,215 |
def newPage(doc, pno=-1, width=595, height=842):
"""Create and return a new page object.
"""
doc._newPage(pno, width=width, height=height)
return doc[pno]
|
f908615439b911805d98d45e164ebe41565b5a1a
| 695,217 |
from typing import List
from typing import Dict
def find_values(item, keys: List[str]) -> Dict:
"""Find values for keys in item, if present.
Parameters
----------
item : Any
Any item
keys : List[str]
Keys whose value to retrieve in item
Returns
-------
Dict
Mapping of key -> value for keys found in item.
Raises
------
ValueError
If one key is found whose value is a tuple, list or dict.
"""
if isinstance(item, (list, tuple)):
values = {}
for it in item:
values.update(find_values(it, keys))
return values
elif isinstance(item, dict):
values = dict()
for key, value in item.items():
if key in keys:
if isinstance(value, (list, tuple, dict)):
raise ValueError(f"Cannot find value for '{key}'. Type must be literal but got {type(value)}")
values[key] = value
else:
values.update(find_values(value, keys))
return values
else:
return {}
|
bab4c8a9695113390654e4eaf971559a98f4eb71
| 695,223 |
def _toList(qvar):
""" Make qvar a list if not."""
if type(qvar) != type([]): qvar = [qvar]
return qvar
|
96644b04ee791d901a44731daddcb636ed549b46
| 695,225 |
def create_members_api_bp_from_app(app):
"""Create members api blueprint."""
ext = app.extensions["invenio-communities"]
# control blueprint endpoints registration
return ext.members_resource.as_blueprint()
|
b6899ab54fe26419b5ca10f747719555aa50d2de
| 695,228 |
import re
def macs_species(genome):
"""Convert genome to macs2 species encoding"""
if re.match('^hg[0-9]+$', genome):
return 'hs'
elif re.match('^mm[0-9]+$', genome):
return 'mm'
raise Exception('Unknown species {}'.format(genome))
|
ecacca48f7bb7cd1eaa5e21a58ea2efba00627ae
| 695,229 |
import socket
def is_port_opened(port, hostname='127.0.0.1'):
""" Checks if the specified port is opened
:param port: The port to check
:param hostname: The hostname to check, defaults to '127.0.0.1'
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((hostname, port))
if result == 0:
return True
return False
|
5b9e1a80988c99289d71cdf616cc36800dcf4848
| 695,232 |
def parse_filename(args, start_date):
"""Parse filename from args and start_date
Arguments:
args: the arguments provided to the script
Returns:
filename: the filename to use for the csv data
"""
if args.filename:
filename = args.filename
# depending on date args either append the month or the start_date
elif args.last_month:
filename = (__file__.replace('.py', '_{}.csv').format(start_date.strftime('%B')))
else:
filename = __file__.replace('.py', '_{}.csv').format(start_date)
return filename
|
c68a3036ec43b4657b88c7f2f623c689c721cf43
| 695,236 |
def make_gauge(name, value, m_type='gauge'):
"""Return a dict for use as a gauge."""
return {
'name': name,
'value': value,
'type': m_type
}
|
f1c5b11a481ad17eaf23169f9401b08cdff674d3
| 695,238 |
def has_triple_string_quotes(string_contents: str) -> bool:
"""Tells whether string token is written as inside triple quotes."""
if string_contents.startswith('"""') and string_contents.endswith('"""'):
return True
elif string_contents.startswith("'''") and string_contents.endswith("'''"):
return True
return False
|
708d6848c8e7f58f8f93818f7f3a5095bf4e2fa3
| 695,239 |
def check_managability(user, note, action):
"""
Determine if user can edit or delete this note. This note can be edited or deleted if at least one of the
following criteria is met:
- user is an admin
- user is the author of the note
- user is member of a group in groups AND note is proper permission is set
- note is public and proper permission is set
Args:
user: django User object
note: Note object
action: (str) name of action to check for (edit or delete)
Returns (bool):
True if this note can be managed by the provided user
False if this note can not be managed by the provided user
"""
if action not in 'editdelete':
return False
if user.is_superuser:
return True
if note.author == user:
return True
if note.scope == 'group' and \
any(i in user.groups.all() for i in note.groups.all()) and \
action in note.permissions:
return True
if note.scope == 'public' and action in note.permissions:
return True
return False
|
2b7ab8dea97f0f0c4e67b3d3f212438a130e7b92
| 695,241 |
def ipow(a, b, N):
""" Returns a raised to the power b modulo N by squaring exponentiation """
res = 1
while b > 0:
if b % 2 == 1:
res = (res * a) % N
a = (a * a) % N
b >>= 1
return res % N
|
9b2047cb35c9b76db1597377a8bd2de23c37b0e8
| 695,242 |
def _is_tcp_syn(tcp_flags):
"""
Passed a TCP flags object (hex) and return 1 if it
contains a TCP SYN and no other flags
"""
if tcp_flags == 2:
return 1
else:
return 0
|
7072b33e641099a97bdba850bfd11c9aeccc1223
| 695,247 |
def indexToWorld(flatmap_index, map_width, map_resolution, map_origin = [0,0]):
"""
Converts a flatmap index value to world coordinates (meters)
flatmap_index: a linear index value, specifying a cell/pixel in an 1-D array
map_width: number of columns in the occupancy grid
map_resolution: side lenght of each grid map cell in meters
map_origin: the x,y position in grid cell coordinates of the world's coordinate origin
Returns a list containing x,y coordinates in the world frame of reference
"""
# convert to x,y grid cell/pixel coordinates
grid_cell_map_x = flatmap_index % map_width
grid_cell_map_y = flatmap_index // map_width
# convert to world coordinates
x = map_resolution * grid_cell_map_x + map_origin[0]
y = map_resolution * grid_cell_map_y + map_origin[1]
return [x,y]
|
210a4c730072d855d825f08f2b41fbc05dbf04c2
| 695,249 |
def addext(name_str, ext_str):
"""Add a file extension to a filename
:param name_str: The filename that will get the extension
:param ext_str: The extension (no leading ``.`` required)
:returns: The filename with the extension
"""
if not ext_str:
return name_str
return name_str + "." + ext_str
|
834484ac16e979dd221eed08c0c62bd8ca2a716a
| 695,250 |
def roll(image, delta):
"""Roll an image sideways."""
xsize, ysize = image.size
delta = delta % xsize
if delta == 0: return image
part1 = image.crop((0, 0, delta, ysize))
part2 = image.crop((delta, 0, xsize, ysize))
image.paste(part1, (xsize - delta, 0, xsize, ysize))
image.paste(part2, (0, 0, xsize - delta, ysize))
return image
|
28e212e7309749f9a93d6d51587ae939c51ae082
| 695,252 |
def elt0(list, context):
"""return first member of reduction"""
return list[0]
|
43015dcad789f1e9f8bc0d87f42d947ce53505a4
| 695,253 |
def get_factorial(num: int):
""" Returns the factorial of `num` """
answer = 1
for i in range(num, 0, -1):
answer *= i
return answer
|
8dbdeb545c76fa4860240d80059a46c82a032202
| 695,255 |
def rflink_to_brightness(dim_level: int) -> int:
"""Convert RFLink dim level (0-15) to 0-255 brightness."""
return int(dim_level * 17)
|
8924a7de8892b0d1e339c5917ae25ff6b29e7f2b
| 695,260 |
import logging
def MergeDictionaryValues(old_dict, new_dict):
"""Attempts to merge the given dictionaries.
Warns if a key exists with different values in both dictionaries. In this
case, the new_dict value trumps the previous value.
Args:
old_dict: Existing dictionary.
new_dict: New dictionary.
Returns:
Result of merging the two dictionaries.
Raises:
ValueError: If the keys in each dictionary are not unique.
"""
common_keys = set(old_dict) & set(new_dict)
if common_keys:
conflicting_keys = set(key for key in common_keys
if old_dict[key] != new_dict[key])
if conflicting_keys:
def FormatKey(key):
return ('\'{key}\' has conflicting values \'{old}\' and \'{new}\'. '
'Using \'{new}\'.').format(key=key,
old=old_dict[key],
new=new_dict[key])
for conflicting_key in conflicting_keys:
logging.warning(FormatKey(conflicting_key))
result = old_dict.copy()
result.update(new_dict)
return result
|
2836386f78363a1a0cede442ac0d6e81e3cd71e4
| 695,263 |
def make_pounds(coins, bill):
"""
Find how many ways there are to make bill from the given list of coins
:param coins List of coins
:type coins list
:param bill Coin/note to make change for
:type bill int
:return: Number of ways to make change for the given currency
:rtype: int
"""
ways_to_make_bill = [0] * (bill + 1)
ways_to_make_bill[0] = 1
for x in range(len(coins)):
for n in range(coins[x], bill + 1):
ways_to_make_bill[n] += ways_to_make_bill[n - coins[x]]
return ways_to_make_bill[bill]
|
c1cb1e82bdb50e4ada3b4d02b3ff7b692bb91f5a
| 695,266 |
def find_shablons(key_word, array):
"""
Receive the `key_word` as string shablon, find all words in `array`
which contain it and return list of those words
Parameters
----------
key_word : str
array: list
Returns
-------
list
"""
result_list = []
key_word = key_word.lower()
for comp in array:
comp = str(comp)
comp_lower = comp.lower()
if comp_lower.find(key_word) != -1:
result_list.append(comp)
return result_list
|
fe959369e204910723a9c32caeaa9f8712590bce
| 695,269 |
def iso_string_to_sql_date_mysql(x: str) -> str:
"""
Provides MySQL SQL to convert an ISO-8601-format string (with punctuation)
to a ``DATE``, just by taking the date fields (without any timezone
conversion). The argument ``x`` is the SQL expression to be converted (such
as a column name).
"""
return f"STR_TO_DATE(LEFT({x}, 10), '%Y-%m-%d')"
|
6798578e8633e819e7e55622e53b2cd2be583fc5
| 695,270 |
from typing import Dict
def get_config_input(user_cfg: Dict[str, dict]) -> Dict[str, dict]:
"""
Get the input configuration
:param user_cfg: user configuration
:type user_cfg: dict
:return cfg: partial configuration
:rtype cfg: dict
"""
cfg = {}
if "input" in user_cfg:
cfg["input"] = user_cfg["input"]
return cfg
|
bbcaf21c3c337b14e33ac9ce6469368987eaac4b
| 695,275 |
from typing import Dict
from typing import List
def prepare_data_for_markdown_formatting(scans: Dict) -> List[List[str]]:
"""Method responsible for formatting the data into the correct form for the MarkdownTableWriter.
Args:
scans: Dictionary containing the scans, from the virustotal api response.
Returns:
data: List of lists, each containing the name of the antivirus, and the result of its scan.
"""
data = []
for antivirus, result in scans.items():
status = '_Malicous_' if result['detected'] is True else '_Safe_'
row = [antivirus, status]
data.append(row)
return data
|
89c1560b40579cc1944bde8c04be8fcdf28a64ad
| 695,277 |
def docstring(docstr, sep="\n"):
"""
Decorator: Append to a function's docstring.
"""
def _decorator(func):
if func.__doc__ == None:
func.__doc__ = docstr
else:
func.__doc__ = sep.join([func.__doc__, docstr])
return func
return _decorator
|
21bbd5e158a183ebda28e313e8b4180ba3928233
| 695,278 |
def link_easy(sid):
"""
Creates an html link to a dataset page in Easy.
:param sid: a dataset id
:return: link to the page for that dataset
"""
prefix = 'https://easy.dans.knaw.nl/ui/datasets/id/'
return '<a target="_blank" href="{}{}">{}</a>'.format(prefix, sid, sid)
|
f329ac256e459bf79572b8686a1b6be003efccae
| 695,279 |
import base64
def _get_signed_query_params(credentials, expiration, string_to_sign):
"""Gets query parameters for creating a signed URL.
:type credentials: :class:`oauth2client.client.AssertionCredentials`
:param credentials: The credentials used to create a private key
for signing text.
:type expiration: int or long
:param expiration: When the signed URL should expire.
:type string_to_sign: string
:param string_to_sign: The string to be signed by the credentials.
:raises AttributeError: If :meth: sign_blob is unavailable.
:rtype: dict
:returns: Query parameters matching the signing credentials with a
signed payload.
"""
if not hasattr(credentials, 'sign_blob'):
raise AttributeError('you need a private key to sign credentials.'
'the credentials you are currently using %s '
'just contains a token. see https://googlecloud'
'platform.github.io/gcloud-python/stable/gcloud-'
'auth.html#setting-up-a-service-account for more '
'details.' % type(credentials))
_, signature_bytes = credentials.sign_blob(string_to_sign)
signature = base64.b64encode(signature_bytes)
service_account_name = credentials.service_account_email
return {
'GoogleAccessId': service_account_name,
'Expires': str(expiration),
'Signature': signature,
}
|
6f7fd8d24240ee604fdfe3b5d63ff8638e601f77
| 695,280 |
def is_closed(request):
"""Check if the request is closed."""
return request.is_closed
|
a2949d8f185f56f52e5b637066353ce935df28c3
| 695,281 |
def get_dataset_params(dataset):
"""
Get the capacities to test for the particular dataset.
:param dataset: string of dataset
:return: list of capacities
"""
if dataset == 'cub200':
capacity_list = [2, 4, 8, 16]
else:
raise NotImplementedError('Dataset not supported.')
return capacity_list
|
a8e859da52085233426957397d8d0fc50628d5b8
| 695,283 |
import math
def sched_cos(start, end, pos):
"""Cosine scheduler."""
return start + (1 + math.cos(math.pi * (1 - pos))) * (end - start) / 2
|
d20afee0f5d11b792bc926b4f13d1353e0208619
| 695,284 |
import re
def replace_aea_fetch_statements(
content: str, old_string: str, new_string: str, type_: str
) -> str:
"""Replace statements of the type: 'aea fetch <old_string>'."""
if type_ == "agents":
content = re.sub(
fr"aea +fetch +{old_string}", f"aea fetch {new_string}", content
)
return content
|
0bd3838d642c4a6120e2b1fcad4fe8995ab7425f
| 695,285 |
def bigquery_serialize_date(py_date):
"""
Convert a python date object into a serialized format that Bigquery accepts.
Accurate to days.
Bigguery format: 'YYYY-[M]M-[D]D'
https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
Args:
py_date (datetime.date): The date to convert.
Returns:
(str): The Serialized date.
"""
return py_date.strftime('%Y-%m-%d')
|
b730fdfe6696011601d466ad65010b5b566749e7
| 695,286 |
def get_model(obj):
"""
Finds model root element for the given object.
"""
p = obj
while hasattr(p, 'parent'):
p = p.parent
return p
|
d61bc104296db17fe178307503a8494e19c9718b
| 695,287 |
import torch
def focal_loss(input_values, gamma):
"""Computes the focal loss"""
p = torch.exp(-input_values)
loss = (1 - p) ** gamma * input_values
return loss.mean()
|
7246eade4f4175065c845386dbeb0e26b4f63b81
| 695,289 |
import click
def main(args=None):
"""Console script for flowsym."""
click.echo("Welcome to FlowSym, a Python API used to simulate flow "
"cytometry data!"
"\n\n"
"If you don't see any warning messages above then you're "
"all set!")
click.echo("See documentation at ")
return 0
|
46dbe0ebde8f896a7113bc4c8a415003b44e9553
| 695,291 |
from typing import Tuple
from typing import Union
def is_secret_key(key: str) -> Tuple[bool, Union[str, None]]:
"""Check if the Airflow Flask webserver secret key is valid.
:param key: Key to check.
:return: Validity, and an error message if not valid.
"""
key_bytes = bytes(key, "utf-8")
message = None
key_length = len(key_bytes)
if key_length < 16:
message = f"Secret key should be length >=16, but is length {key_length}."
return False, message
return True, message
|
77327659da0935da96be5597b296cce7173d7bf2
| 695,292 |
def correct_img_dimension(width, height, threshold_w, threshold_h):
"""
return a new width and height that is as close as to the thresholds, while keeping
aspect ratio same
:param width:
:param height:
:param threshold_w:
:param threshold_h:
:return: tuple of new width and height
"""
isWidthGreater = False
if width > height:
isWidthGreater = True
ratio = height / width
if isWidthGreater:
return (threshold_w, ratio * threshold_h)
return (threshold_w * (1 / ratio), threshold_h)
|
30b3b30bff06004b3df19e036972be71c05e3ff1
| 695,294 |
def format_schema(schema):
"""Convert Schema to fixture format.
"""
return {
"fields": {
"name": schema.name,
"datastore_id": schema.datastore_id,
"workspace_id": str(schema.workspace_id),
"created_at": str(schema.created_at),
"updated_at": str(schema.updated_at),
"object_id": schema.object_id,
"tags": schema.tags,
},
"model": "definitions.schema",
"pk": schema.pk,
}
|
cbb310f7e6d8dfac43eecc153138fbbbe7c6f274
| 695,296 |
def grep(pat,list,case=1):
"""Simple minded grep-like function.
grep(pat,list) returns occurrences of pat in list, None on failure.
It only does simple string matching, with no support for regexps. Use the
option case=0 for case-insensitive matching."""
# This is pretty crude. At least it should implement copying only references
# to the original data in case it's big. Now it copies the data for output.
out=[]
if case:
for term in list:
if term.find(pat)>-1: out.append(term)
else:
lpat=pat.lower()
for term in list:
if term.lower().find(lpat)>-1: out.append(term)
if len(out): return out
else: return None
|
00e073adf85ed0f08fa76d09b61c82cc0cfaaf93
| 695,297 |
def file_to_list(filename):
"""
Read lines of text file to a list.
@param filename: path to file
@type filename: string
@return list of lines
"""
with open(filename) as fil:
meat = fil.readlines()
return [i.strip() for i in meat]
|
7d5db114798470e75e2f9e750a666cee57520820
| 695,298 |
def ssl_config(module):
"""
Creates a ssl config dictionary to be used for ssl auth.
"""
ctx = module.params['ssl_auth']
try:
return {
'cert': ctx['cert'],
'serverca': ctx['serverca'],
'no_ssl_verify': ctx.get('verify', True),
'authtype': 'ssl'
}
except KeyError as e:
module.fail_json(changed=False,
skipped=False,
failed=True,
error='Missing ssl_auth "%s" key.' % e.args[0])
|
2581d58acb25fb5d0c6bb1bfc877619c3c6ed52c
| 695,299 |
import unittest
def expected_failure_if(expect):
"""
Unit test decorator to expect failure under conditions.
@param expect: Flag to check if failure is expected
@type expect: bool
"""
if expect:
return unittest.expectedFailure
else:
return lambda orig: orig
|
4cf083167e44328811a8df1bb9b8fc55dd741661
| 695,304 |
def getDist(ind1,ind2,distMat):
"""
distMat is a distance matrix. distMat[i,j] == distance from i to j
"""
return distMat[ind1,ind2]
|
c9303fd806cd765295f437ee533952692f89702c
| 695,305 |
def sleep_onset_latency(predictions):
"""
Calculates sleep onset latency on an array of sleep/wake predictions in one minute epochs. This corresponds to the
total number of minutes awake before the first sleep period.
Parameters
----------
predictions : array-like
Binary sleep/wake predictions. Awake encoded as 1 and sleep as 0.
Returns
-------
sol : int
Total number of minutes spent awake before the first sleep period.
"""
first_sleep_epoch = predictions.argmin()
sol = predictions[0:first_sleep_epoch].sum()
return int(sol)
|
6595aba1d22d9555c8998c6bb940045951b2503c
| 695,306 |
def initialize_LIP_dict(LIP_feature_collection):
"""
Initialize the dictionary which contains the LIP fraction remaining for all LIPs.
Parameters
----------
LIP_feature_collection : feature collection
feature collection of LIPs
Returns
-------
LIP_fracs : dictionary
with keys = LIP Ids, values = LIP fraction remaining
"""
# get the unique ID associated with each LIP geometry
LIP_Ids = []
for feature in LIP_feature_collection:
LIP_Id = feature.get_feature_id().get_string()
LIP_Ids.append(LIP_Id)
# create a dictionary: key = LIP Id, value = LIP fraction remaining
ones = [1]*len(LIP_Ids)
LIP_fracs = dict(zip(LIP_Ids, ones))
return LIP_fracs
|
e105b84781105599bc92a6e1eece9b4f8ef2e4e9
| 695,307 |
def version_tuple_to_str(version):
"""Join version tuple to string."""
return '.'.join(map(str, version))
|
2567dd8481fe9dc6b1fc71cb7669aed204aaec9a
| 695,308 |
def _pat_mergeable(p1, p2):
"""
Compare two *AbstractionPattern* instances for equality regarding an
interpretation merging operation. Evidence and hypothesis comparison is
assumed to be positive, so only the automata and the initial and final
states are compared.
"""
if p1 is None or p2 is None:
return p1 is p2
return p1.automata is p2.automata and p1.istate == p2.istate and p1.fstate == p2.fstate
|
f0c418fd63784e5e6ea8cf02ac17ec315eba809d
| 695,309 |
def pytest_report_header(config):
""" return a string in test report header """
return "Hey this are the tests"
|
e64bf912f78e8524d99126569d0423c821158498
| 695,314 |
def valid_moves(board):
"""Returns a list of all valid moves in the position"""
moves = []
# Go through each space, if it's not X or O, append it
for space in board:
if space != "X" and space != "O":
moves.append(space)
# Return moves at the end
return moves
|
1447314b16ab611ab796fcbb1e5582b98d6ae88e
| 695,318 |
def _format_vendor_id(vendor_id: str) -> str:
"""Strips vendor name from vendor_id field.
Example:
>>> _format_vendor_id("0x1234 (Nice Vendor Inc.)") # "0x1234"
"""
return vendor_id.split(maxsplit=1)[0]
|
e181330ca164ea4fdbf6ea2e57e20b707351dcfc
| 695,325 |
def search_result(doc_info, index):
"""
The search_results function displays the results of the query performed by the
user into the search engine
Args:
doc_info (list): A list containing document information
index (int): An integeter containing the ranking
Returns:
A string containing one result from the search
"""
return f"""
<div style="font-size: 125%; display: inline-flex; flex-direction: row">
<div>
{index + 1}.  
</div>
<div>
<div>
File Path: {doc_info[0]}
</div>
<div>
URL: <a href="{doc_info[1]}" target="_blank">{doc_info[1]}</a>
</div>
</div>
</div>
<br>
<br>
"""
|
56378bc310343ed104a3e507e5295952b2cbc62f
| 695,326 |
def user_mention(user_id: str) -> str:
"""
Return a mention of a user that can be sent over a Discord message. This is a convenience
method for cases where the user_id is known but you don't have or need the full discord.User
or discord.Member object.
"""
if not user_id.isnumeric():
raise ValueError("Discord ID must be numeric")
return '<@{}>'.format(user_id)
|
bcd093e3d49db48dd32765b477f4f7438230b4fc
| 695,328 |
def is_array(type_ir):
"""Returns true if type_ir is an array type."""
return type_ir.HasField("array_type")
|
ac51de921484113d56923cea74324470706883b7
| 695,330 |
def parse_input(event):
"""Parses all input required from step function."""
input_request = event["input"]
return {
"batch_id": input_request["transformation_step_output"]["batch_id"],
"output_sns_arn": input_request.get("destinationSnsArn"),
"execution_id": event["execution_id"],
}
|
ed85b61e7c9e68dbbee910d7d6c1eaf342255aa0
| 695,331 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.