content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def _nw_score_(s1, s2, insert=lambda c: -2,
delete=lambda c: -2,
substitute=lambda c1, c2: 2 if c1 == c2 else -1):
"""Compute Needleman Wunsch score for aligning two strings.
This algorithm basically performs the same operations as Needleman Wunsch
alignment, but is made more memory efficient by storing only two columns of
the optimal alignment matrix. As a consequence, no reconstruction is
possible.
Args:
s1 (iterable): iterable to which we should align
s2 (iterable): iterable to be aligned
insert (lambda): function returning penalty for insertion (default -2)
delete (lambda): function returning penalty for deletion (default -2)
substitute (lambda): function returning penalty for substitution
(default -1)
Returns:
: last column of optimal matching matrix
"""
# lengths of two strings are further used for ranges, therefore 1 is added
# to every length
m = len(s1) + 1
n = len(s2) + 1
# score will be a two dimensional matrix
score = [[0 for i in xrange(n)], [0 for i in xrange(n)]]
# character of first and second string, respectively
c1 = c2 = ''
# iterator over the second string
s2_it = xrange(1, n)
# indices of current and previous column in the error matrix (will be
# swapped along the way)
crnt = 0
prev = 1
prev_j = 0
# base case when the first string is shorter than second
for j in s2_it:
prev_j = j - 1
score[crnt][j] = score[crnt][prev_j] + insert(s2[prev_j])
# iterate over the first string
for i in xrange(1, m):
# swap current and previous columns
prev, crnt = crnt, prev
# get current character of the first string
c1 = s1[i - 1]
# calculate the base case when len = 0
score[crnt][0] = score[prev][0] + delete(c1)
for j in s2_it:
prev_j = j - 1
c2 = s2[prev_j]
# current cell will be the maximum over insertions, deletions, and
# substitutions applied to adjacent cells
# substitution (covers cases when both chars are equal)
score[crnt][j] = max(score[prev][prev_j] + substitute(c1, c2),
# deletion
score[prev][j] + delete(c1),
# insertion
score[crnt][prev_j] + insert(c2))
# return last computed column of scores
return score[crnt]
| 6,100 |
def load_gisaid_data(
*,
device="cpu",
min_region_size=50,
include={},
exclude={},
end_day=None,
columns_filename="results/usher.columns.pkl",
features_filename="results/usher.features.pt",
feature_type="aa",
) -> dict:
"""
Loads the two files columns_filename and features_filename,
converts the input to PyTorch tensors and truncates the data according to
``include`` and ``exclude``.
:param str device: torch device to use
:param dict include: filters of data to include
:param dict exclude: filters of data to exclude
:param end_day: last day to include
:param str columns_filename:
:param str features_filename:
:param str feature_type: Either "aa" for amino acid features or "nuc" for
nucleotide features.
:returns: A dataset dict
:rtype: dict
"""
logger.info("Loading data")
include = include.copy()
exclude = exclude.copy()
if end_day:
logger.info(f"Load gisaid data end_day: {end_day}")
# Load column data.
with open(columns_filename, "rb") as f:
columns = pickle.load(f)
# Clean up location ids (temporary; this should be done in preprocess_gisaid.py).
columns["location"] = list(map(pyrocov.geo.gisaid_normalize, columns["location"]))
logger.info(f"Training on {len(columns['day'])} rows with columns:")
logger.info(", ".join(columns.keys()))
# Aggregate regions smaller than min_region_size to country level.
fine_regions = get_fine_regions(columns, min_region_size)
# Filter features into numbers of mutations and possibly genes.
usher_features = torch.load(features_filename)
mutations = usher_features[f"{feature_type}_mutations"]
features = usher_features[f"{feature_type}_features"].to(
device=device, dtype=torch.get_default_dtype()
)
keep = [m.count(",") == 0 for m in mutations] # restrict to single mutations
if include.get("gene"):
re_gene = re.compile(include.pop("gene"))
keep = [k and bool(re_gene.search(m)) for k, m in zip(keep, mutations)]
if exclude.get("gene"):
re_gene = re.compile(exclude.pop("gene"))
keep = [k and not re_gene.search(m) for k, m in zip(keep, mutations)]
if include.get("region"):
gene, region = include.pop("region")
lb, ub = sarscov2.GENE_STRUCTURE[gene][region]
for i, m in enumerate(mutations):
g, m = m.split(":")
if g != gene:
keep[i] = False
continue
match = re.search("[0-9]+", m)
assert match is not None
pos = int(match.group())
if not (lb < pos <= ub):
keep[i] = False
mutations = [m for k, m in zip(keep, mutations) if k]
if mutations:
features = features[:, keep]
else:
warnings.warn("No mutations selected; using empty features")
mutations = ["S:D614G"] # bogus
features = features[:, :1] * 0
logger.info("Loaded {} feature matrix".format(" x ".join(map(str, features.shape))))
# Construct the list of clades.
clade_id_inv = usher_features["clades"]
clade_id = {k: i for i, k in enumerate(clade_id_inv)}
clades = columns["clade"]
# Generate sparse_data.
sparse_data: dict = Counter()
countries = set()
states = set()
state_to_country_dict = {}
location_id: dict = OrderedDict()
skipped_clades = set()
num_obs = 0
for day, location, clade in zip(columns["day"], columns["location"], clades):
if clade not in clade_id:
if clade not in skipped_clades:
skipped_clades.add(clade)
if not clade.startswith("fine"):
logger.warning(f"WARNING skipping unsampled clade {clade}")
continue
# Filter by include/exclude
row = {
"location": location,
"day": day,
"clade": clade,
}
if not all(re.search(v, row[k]) for k, v in include.items()):
continue
if any(re.search(v, row[k]) for k, v in exclude.items()):
continue
# Filter by day
if end_day is not None:
if day > end_day:
continue
# preprocess parts
parts = location.split("/")
if len(parts) < 2:
continue
parts = tuple(p.strip() for p in parts[:3])
if len(parts) == 3 and parts not in fine_regions:
parts = parts[:2]
location = " / ".join(parts)
# Populate countries on the left and states on the right.
if len(parts) == 2: # country only
countries.add(location)
p = location_id.setdefault(location, len(countries) - 1)
else: # state and country
country = " / ".join(parts[:2])
countries.add(country)
c = location_id.setdefault(country, len(countries) - 1)
states.add(location)
p = location_id.setdefault(location, -len(states))
state_to_country_dict[p] = c
# Save sparse data.
num_obs += 1
t = day // TIMESTEP
c = clade_id[clade]
sparse_data[t, p, c] += 1
logger.warning(f"WARNING skipped {len(skipped_clades)} unsampled clades")
state_to_country = torch.full((len(states),), 999999, dtype=torch.long)
for s, c in state_to_country_dict.items():
state_to_country[s] = c
logger.info(f"Found {len(states)} states in {len(countries)} countries")
location_id_inv = [None] * len(location_id)
for k, i in location_id.items():
location_id_inv[i] = k
assert all(location_id_inv)
# Generate weekly_clades tensor from sparse_data.
if end_day is not None:
T = 1 + end_day // TIMESTEP
else:
T = 1 + max(columns["day"]) // TIMESTEP
P = len(location_id)
C = len(clade_id)
weekly_clades = torch.zeros(T, P, C)
for tps, n in sparse_data.items():
weekly_clades[tps] = n
logger.info(f"Dataset size [T x P x C] {T} x {P} x {C}")
logger.info(
f"Keeping {num_obs}/{len(clades)} rows "
f"(dropped {len(clades) - int(num_obs)})"
)
# Construct sparse representation.
pc_index = weekly_clades.ne(0).any(0).reshape(-1).nonzero(as_tuple=True)[0]
sparse_counts = dense_to_sparse(weekly_clades)
# Construct time scales centered around observations.
time = torch.arange(float(T)) * TIMESTEP / GENERATION_TIME
time -= time.mean()
# Construct lineage <-> clade mappings.
lineage_to_clade = usher_features["lineage_to_clade"]
clade_to_lineage = usher_features["clade_to_lineage"]
lineage_id_inv = sorted(lineage_to_clade)
lineage_id = {k: i for i, k in enumerate(lineage_id_inv)}
clade_id_to_lineage_id = torch.zeros(len(clade_to_lineage), dtype=torch.long)
for c, l in clade_to_lineage.items():
clade_id_to_lineage_id[clade_id[c]] = lineage_id[l]
lineage_id_to_clade_id = torch.zeros(len(lineage_to_clade), dtype=torch.long)
for l, c in lineage_to_clade.items():
lineage_id_to_clade_id[lineage_id[l]] = clade_id[c]
dataset = {
"clade_id": clade_id,
"clade_id_inv": clade_id_inv,
"clade_id_to_lineage_id": clade_id_to_lineage_id,
"clade_to_lineage": usher_features["clade_to_lineage"],
"features": features,
"lineage_id": lineage_id,
"lineage_id_inv": lineage_id_inv,
"lineage_id_to_clade_id": lineage_id_to_clade_id,
"lineage_to_clade": usher_features["lineage_to_clade"],
"location_id": location_id,
"location_id_inv": location_id_inv,
"mutations": mutations,
"pc_index": pc_index,
"sparse_counts": sparse_counts,
"state_to_country": state_to_country,
"time": time,
"weekly_clades": weekly_clades,
}
return dataset
| 6,101 |
def _upload_blob_bucket(bucket_name, source_file_name, destination):
"""Uploads a file to the bucket."""
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination)
blob.upload_from_filename(source_file_name)
| 6,102 |
def perform_query(query, terms, nodes, names, back_prop, prefix="", lineage=False):
"""
Search database based on a name or the ontological id.
"""
# The query is an exact match, print info.
if names.get(query) or terms.get(query):
# Get the GO or SO id
uid = names.get(query) or query
if lineage:
show_lineage(start=uid, terms=terms, back_prop=back_prop, nodes=nodes)
return
# Filter for SO: or GO: ids
if prefix and not uid.startswith(prefix):
return
# Get the parents.
parents = back_prop.get(uid,[])
# Fetch the name and definition
name, definition = terms[uid]
formatted_printer(name=name, uid=uid,
definition=definition,
parents=parents, nodes=nodes,
terms=terms)
return
# Search for names containing query.
search(query=query, terms=terms, prefix=prefix)
| 6,103 |
def get_symbol_size(sym):
"""Get the size of a symbol"""
return sym["st_size"]
| 6,104 |
def stake_increase(nmr, model_id):
"""Increase your stake by `value` NMR."""
click.echo(napi.stake_increase(nmr, model_id))
| 6,105 |
def print_initial_mlperf_config(params, seed):
"""Prints MLPerf config."""
mlp_log.mlperf_print('cache_clear', value=True)
mlp_log.mlperf_print('init_start', value=None)
mlp_log.mlperf_print('global_batch_size', params['batch_size'])
mlp_log.mlperf_print('opt_name', value=FLAGS.optimizer)
mlp_log.mlperf_print('opt_base_learning_rate', params['learning_rate'])
mlp_log.mlperf_print('opt_learning_rate_warmup_epochs',
params['lr_warmup_epochs'])
mlp_log.mlperf_print('opt_learning_rate_decay_boundary_epochs',
params['lr_decay_epochs'])
mlp_log.mlperf_print('opt_learning_rate_decay_factor',
params['lr_decay_factor'])
mlp_log.mlperf_print('opt_weight_decay', params['weight_decay'])
mlp_log.mlperf_print('train_samples', FLAGS.num_train_images)
mlp_log.mlperf_print('eval_samples', FLAGS.num_eval_images)
mlp_log.mlperf_print('seed', int(seed))
mlp_log.mlperf_print('opt_momentum', params['momentum'])
mlp_log.mlperf_print('oversampling', params['oversampling'])
mlp_log.mlperf_print('training_input_shape', params['input_shape'])
mlp_log.mlperf_print('validation_input_shape', params['val_input_shape'])
mlp_log.mlperf_print('validation_overlap', params['overlap'])
mlp_log.mlperf_print('opt_learning_rate_warmup_factor', 1)
mlp_log.mlperf_print('opt_initial_learning_rate',
params['init_learning_rate'])
mlp_log.mlperf_print('submission_benchmark', 'unet3d')
mlp_log.mlperf_print('gradient_accumulation_steps', 1)
mlp_log.mlperf_print('samples_per_epoch', params['samples_per_epoch'])
| 6,106 |
def rotate(dst: ti.template(), src: ti.template(), t: float):
"""
rotate src to dst
"""
rot = ti.Matrix.rotation2d(2 * math.pi * t)
for i, j in ti.ndrange(N,N):
p = (ti.Vector([i,j]) / N - 0.5)
if p.norm() > 0.5:
dst[i,j] = [0.0, 0.0, 0.0]
continue
p = rot @ p + 0.5
ij = int(p * N)
dst[i, j] = src[ij[0], ij[1]]
| 6,107 |
def csv_to_table(db_engine_url: str, csv_path: str, table_name: str):
"""
Converting the csv to sql and updating the database with the values.
# Connecting to the database and using the pandas method to handle conversion
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_sql.html
:param db_engine_url:
:param csv_path:
:param table_name:
:return:
"""
logger.debug("Table name: {0}".format(table_name))
# Connecting to the database
connection = init_db_connection(db_engine_url)
# reads the csv from file
csv_pd = pd.read_csv(csv_path)
logger.debug("CSV File: {0}".format(csv_pd))
# convert the csv to sql, appends values if table is already there
csv_pd.to_sql(name=table_name, con=connection, if_exists='replace')
| 6,108 |
def plot_lightcurve(catalog, source):
"""Print info for CATALOG and SOURCE"""
catalog = source_catalogs[catalog]
source = catalog[source]
print()
print(source)
print()
# Generic info dict
# source.pprint()
# Specific source info print-out
# if hasattr(source, 'print_info'):
# source.print_info()
| 6,109 |
def parse_args(argv):
"""Parse and validate command line flags"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--base-image',
type=functools.partial(
validation_utils.validate_arg_regex, flag_regex=IMAGE_REGEX),
default='gcr.io/google-appengine/python:latest',
help='Name of Docker image to use as base')
# In some cases, gcloud sets an environment variable to indicate
# the location of the application configuration file, rather than
# using the --config flag. The order of precedence from highest
# to lowest is:
#
# 1) --config flag
# 2) $GAE_APPLICATION_YAML_PATH environment variable
# 3) a file named "app.yaml" in the current working directory
parser.add_argument(
'--config',
type=functools.partial(
validation_utils.validate_arg_regex, flag_regex=PRINTABLE_REGEX),
default=(os.environ.get(GAE_APPLICATION_YAML_PATH) or 'app.yaml'),
help='Path to application configuration file'
)
parser.add_argument(
'--source-dir',
type=functools.partial(
validation_utils.validate_arg_regex, flag_regex=PRINTABLE_REGEX),
default='.',
help=('Application source and output directory'))
args = parser.parse_args(argv[1:])
return args
| 6,110 |
def to_unnamed_recursive(sexpr, scheme):
"""Convert all named column references to unnamed column references."""
def convert(n):
if isinstance(n, NamedAttributeRef):
n = toUnnamed(n, scheme)
n.apply(convert)
return n
return convert(sexpr)
| 6,111 |
def SetupPyMOLObjectNamesForComplex(FileIndex, PyMOLObjectNames):
"""Stetup groups and objects for complex. """
PDBFileRoot = OptionsInfo["InfilesInfo"]["InfilesRoots"][FileIndex]
PDBGroupName = "%s" % PDBFileRoot
PyMOLObjectNames["PDBGroup"] = PDBGroupName
PyMOLObjectNames["PDBGroupMembers"] = []
ComplexGroupName = "%s.Complex" % PyMOLObjectNames["PDBGroup"]
PyMOLObjectNames["ComplexGroup"] = ComplexGroupName
PyMOLObjectNames["PDBGroupMembers"].append(ComplexGroupName)
PyMOLObjectNames["Complex"] = "%s.Complex" % ComplexGroupName
CompositeMeshGroupName = "%s.2Fo-Fc" % (ComplexGroupName)
CompositeMapName = "%s.Map" % (CompositeMeshGroupName)
CompositeMeshName = "%s.Mesh" % (CompositeMeshGroupName)
CompositeVolumeName = "%s.Volume" % (CompositeMeshGroupName)
CompositeSurfaceName = "%s.Surface" % (CompositeMeshGroupName)
PyMOLObjectNames["ComplexCompositeEDGroup"] = CompositeMeshGroupName
PyMOLObjectNames["ComplexCompositeEDMap"] = CompositeMapName
PyMOLObjectNames["ComplexCompositeEDMesh"] = CompositeMeshName
PyMOLObjectNames["ComplexCompositeEDVolume"] = CompositeVolumeName
PyMOLObjectNames["ComplexCompositeEDSurface"] = CompositeSurfaceName
PyMOLObjectNames["ComplexCompositeEDGroupMembers"] = []
PyMOLObjectNames["ComplexCompositeEDGroupMembers"].append(CompositeMapName)
if OptionsInfo["VolumeComplex"]:
PyMOLObjectNames["ComplexCompositeEDGroupMembers"].append(CompositeVolumeName)
if OptionsInfo["MeshComplex"]:
PyMOLObjectNames["ComplexCompositeEDGroupMembers"].append(CompositeMeshName)
if OptionsInfo["SurfaceComplex"]:
PyMOLObjectNames["ComplexCompositeEDGroupMembers"].append(CompositeSurfaceName)
if PyMOLObjectNames["SetupDiffEDMapObjects"]:
DiffMeshGroupName = "%s.Fo-Fc" % ComplexGroupName
DiffMapName = "%s.Map" % DiffMeshGroupName
DiffVolumeName = "%s.Volume" % DiffMeshGroupName
DiffMesh1Name = "%s.Mesh1" % DiffMeshGroupName
DiffSurface1Name = "%s.Surface1" % DiffMeshGroupName
DiffMesh2Name = "%s.Mesh2" % DiffMeshGroupName
DiffSurface2Name = "%s.Surface2" % DiffMeshGroupName
PyMOLObjectNames["ComplexDiffEDGroup"] = DiffMeshGroupName
PyMOLObjectNames["ComplexDiffEDMap"] = DiffMapName
PyMOLObjectNames["ComplexDiffEDVolume"] = DiffVolumeName
PyMOLObjectNames["ComplexDiffEDMesh1"] = DiffMesh1Name
PyMOLObjectNames["ComplexDiffEDSurface1"] = DiffSurface1Name
PyMOLObjectNames["ComplexDiffEDMesh2"] = DiffMesh2Name
PyMOLObjectNames["ComplexDiffEDSurface2"] = DiffSurface2Name
PyMOLObjectNames["ComplexDiffEDGroupMembers"] = []
PyMOLObjectNames["ComplexDiffEDGroupMembers"].append(DiffMapName)
if OptionsInfo["VolumeComplex"]:
PyMOLObjectNames["ComplexDiffEDGroupMembers"].append(DiffVolumeName)
if OptionsInfo["MeshComplex"]:
PyMOLObjectNames["ComplexDiffEDGroupMembers"].append(DiffMesh1Name)
if OptionsInfo["SurfaceComplex"]:
PyMOLObjectNames["ComplexDiffEDGroupMembers"].append(DiffSurface1Name)
if OptionsInfo["MeshComplex"]:
PyMOLObjectNames["ComplexDiffEDGroupMembers"].append(DiffMesh2Name)
if OptionsInfo["SurfaceComplex"]:
PyMOLObjectNames["ComplexDiffEDGroupMembers"].append(DiffSurface2Name)
PyMOLObjectNames["ComplexGroupMembers"] = []
PyMOLObjectNames["ComplexGroupMembers"].append(PyMOLObjectNames["Complex"])
PyMOLObjectNames["ComplexGroupMembers"].append(PyMOLObjectNames["ComplexCompositeEDGroup"])
if PyMOLObjectNames["SetupDiffEDMapObjects"]:
PyMOLObjectNames["ComplexGroupMembers"].append(PyMOLObjectNames["ComplexDiffEDGroup"])
| 6,112 |
def setup(bot):
"""Setup
The function called by Discord.py when adding another file in a multi-file project.
"""
bot.add_cog(General(bot))
| 6,113 |
def login_process():
"""Process login."""
email_address = request.form.get("email")
password = request.form.get("password")
user = User.query.filter_by(email_address=email_address).first()
if not user:
flash("Please try again!")
return redirect('/')
if user.password != password:
flash("Incorrect password")
return redirect('/')
session["user_id"] = user.user_id
flash("Logged in")
return redirect('/dashboard')
| 6,114 |
def get_doc_translations(doctype, name):
"""
Returns a dict custom tailored for the document.
- Translations with the following contexts are handled:
- doctype:name:docfield
- doctype:name
- doctype:docfield (Select fields only)
- 'Select' docfields will have a values dict which will have
translations for each option
document(doctype, name) {
[lang_code_1]: {
title: lang_1_title,
status: {
value: lang_1_status,
values: {
option_1: lang_1_option_1,
...
}
}
},
[lang_code_2]: {
title: lang_2_title,
}
}
"""
context = f"{doctype}:"
translations = frappe.db.sql("""
SELECT
t.language,
t.source_text,
t.context,
t.translated_text
FROM `tabTranslation` t
WHERE
t.context LIKE %(context)s
""", {
"context": f"{context}%"
}, as_dict=1)
tr_dict = frappe._dict()
if not len(translations):
return tr_dict
doc = frappe.get_cached_doc(doctype, name)
value_fieldname_dict = None
def get_value_fieldname_dict():
nonlocal value_fieldname_dict
if value_fieldname_dict is not None:
return value_fieldname_dict
d = frappe._dict()
for fieldname in frappe.get_meta(doctype).get_valid_columns():
v = doc.get(fieldname)
if not v:
continue
if v not in d:
d[v] = []
d[v].append(fieldname)
value_fieldname_dict = d
return value_fieldname_dict
for t in translations:
if t.language not in tr_dict:
tr_dict[t.language] = frappe._dict()
ctx = t.context.split(":")
if len(ctx) == 3 and ctx[1] == name:
# Docfield translation
# doctype:name:docfield
fieldname = t.context.split(":")[2]
if t.source_text == "*" or doc.get(fieldname) == t.source_text:
tr_dict[t.language][fieldname] = t.translated_text
elif len(ctx) == 2 and ctx[1] != name:
# Select DocField
select_df = ctx[1]
if select_df not in [x.fieldname for x in frappe.get_meta(doctype).get_select_fields()]:
continue
select_tr = tr_dict[t.language].setdefault(
select_df, frappe._dict(value=None, values=frappe._dict()))
select_tr.get("values")[t.source_text] = t.translated_text
if doc.get(select_df) == t.source_text:
select_tr.value = t.translated_text
elif len(ctx) == 2:
# Document Translation
# doctype:name
d = get_value_fieldname_dict()
if t.source_text in d:
for fieldname in d[t.source_text]:
if tr_dict[t.language].get(fieldname, None):
continue
tr_dict[t.language][fieldname] = t.translated_text
return tr_dict
| 6,115 |
def test_account_created(requestbin, login, ui_account):
"""
Test:
- Create account
- Get webhook response for created
- Assert that webhook response is not None
- Assert that response xml body contains right account name
"""
webhook = requestbin.get_webhook("created", str(ui_account.entity_id))
assert webhook is not None
xml = Et.fromstring(webhook)
name = xml.find(".//org_name").text
assert name == ui_account.entity_name
| 6,116 |
def fetch_newer_version(
installed_version=scancode_version,
new_version_url='https://pypi.org/pypi/scancode-toolkit/json',
force=False,
):
"""
Return a version string if there is an updated version of scancode-toolkit
newer than the installed version and available on PyPI. Return None
otherwise.
Limit the frequency of update checks to once per week.
State is stored in the scancode_cache_dir.
If `force` is True, redo a PyPI remote check.
"""
installed_version = packaging_version.parse(installed_version)
try:
state = VersionCheckState()
current_time = datetime.datetime.utcnow()
# Determine if we need to refresh the state
if ('last_check' in state.state and 'latest_version' in state.state):
last_check = datetime.datetime.strptime(
state.state['last_check'],
SELFCHECK_DATE_FMT
)
seconds_since_last_check = total_seconds(current_time - last_check)
one_week = 7 * 24 * 60 * 60
if seconds_since_last_check < one_week:
latest_version = state.state['latest_version']
if force:
latest_version = None
# Refresh the version if we need to or just see if we need to warn
if latest_version is None:
try:
latest_version = fetch_latest_version(new_version_url)
state.save(latest_version, current_time)
except Exception:
# save an empty version to avoid checking more than once a week
state.save(None, current_time)
raise
latest_version = packaging_version.parse(latest_version)
# Our git version string is not PEP 440 compliant, and thus improperly
# parsed via most 3rd party version parsers. We handle this case by
# pulling out the "base" release version by split()-ting on "post".
#
# For example, "3.1.2.post351.850399ba3" becomes "3.1.2"
if isinstance(installed_version, packaging_version.LegacyVersion):
installed_version = installed_version.split('post')
installed_version = installed_version[0]
installed_version = packaging_version.parse(installed_version)
# Determine if our latest_version is older
if (installed_version < latest_version
and installed_version.base_version != latest_version.base_version):
return str(latest_version)
except Exception:
msg = 'There was an error while checking for the latest version of ScanCode'
logger.debug(msg, exc_info=True)
| 6,117 |
def app(request):
"""
Default view for Person Authority App
"""
return direct_to_template(request,
'person_authority/app.html',
{'app':APP})
| 6,118 |
def is_xbar(top, name):
"""Check if the given name is crossbar
"""
xbars = list(filter(lambda node: node["name"] == name, top["xbar"]))
if len(xbars) == 0:
return False, None
if len(xbars) > 1:
log.error("Matching crossbar {} is more than one.".format(name))
raise SystemExit()
return True, xbars[0]
| 6,119 |
def index():
""" Root URL response """
return "Reminder: return some useful information in json format about the service here", status.HTTP_200_OK
| 6,120 |
def backproject(depth, K):
"""Backproject a depth map to a cloud map
depth: depth
----
organized cloud map: (H,W,3)
"""
H, W = depth.shape
X, Y = np.meshgrid(np.asarray(range(W)) - K[0, 2], np.asarray(range(H)) - K[1, 2])
return np.stack((X * depth / K[0, 0], Y * depth / K[1, 1], depth), axis=2)
| 6,121 |
async def test_refresh_codes(hass, lock_data, caplog):
"""Test refresh_codes"""
await setup_ozw(hass, fixture=lock_data)
state = hass.states.get("lock.smartcode_10_touchpad_electronic_deadbolt_locked")
assert state is not None
assert state.state == "locked"
assert state.attributes["node_id"] == 14
entry = MockConfigEntry(
domain=DOMAIN, title="frontdoor", data=CONFIG_DATA, version=2
)
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
servicedata = {"entity_id": "lock.kwikset_touchpad_electronic_deadbolt_frontdoor"}
await hass.services.async_call(DOMAIN, SERVICE_REFRESH_CODES, servicedata)
await hass.async_block_till_done()
assert (
"Problem retrieving node_id from entity lock.kwikset_touchpad_electronic_deadbolt_frontdoor because the entity doesn't exist."
in caplog.text
)
servicedata = {"entity_id": "lock.smartcode_10_touchpad_electronic_deadbolt_locked"}
await hass.services.async_call(DOMAIN, SERVICE_REFRESH_CODES, servicedata)
await hass.async_block_till_done()
assert "DEBUG: Index found valueIDKey: 71776119310303256" in caplog.text
| 6,122 |
def get_parent_dir(os_path: str) -> str:
"""
Get the parent directory.
"""
return str(Path(os_path).parents[1])
| 6,123 |
def test_is_admin():
"""Returns True if the program is ran as administrator.
Returns False if not ran as administrator.
"""
try:
is_admin = (os.getuid() == 0)
except AttributeError:
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == 1:
return 1
else:
return 0
| 6,124 |
def parse_work_url(work_url):
"""Extract work id from work url
Args:
work_url (str): work url
Returns:
str: bdrc work id
"""
work_id = ""
if work_url:
work_url_parts = work_url.split("/")
work_id = work_url_parts[-1]
return work_id
| 6,125 |
def room():
"""Create a Room instance for all tests to share."""
return Room({"x": 4, "y": 4, "z": 4}, savable=False)
| 6,126 |
def lab_pull(tag, bucket, project, force):
""" Pulls Lab Experiment from minio to current directory """
home_dir = os.path.expanduser('~')
lab_dir = os.path.join(home_dir, '.lab')
if not os.path.exists(lab_dir):
click.secho('Lab is not configured to connect to minio. '
'Run <lab config> to set up access points.',
fg='red')
raise click.Abort()
if project is not None:
if os.path.exists(project):
click.secho('Directory '+project+' already exists.', fg='red')
raise click.Abort()
_pull_from_minio(tag, bucket, project, force)
| 6,127 |
def searchLiteralLocation(a_string, patterns):
"""assumes a_string is a string, being searched in
assumes patterns is a list of strings, to be search for in a_string
returns a list of re span object, representing the found literal if it exists,
else returns an empty list"""
results = []
for pattern in patterns:
regex = pattern
match = re.search(regex, a_string)
if match:
results.append((match, match.span()))
return results
| 6,128 |
def UIOSelector_Highlight(inUIOSelector):
"""
Highlight (draw outline) the element (in app) by the UIO selector.
:param inUIOSelector: UIOSelector - List of items, which contains condition attributes
:return:
"""
# Check the bitness
lSafeOtherProcess = UIOSelector_SafeOtherGet_Process(inUIOSelector)
if lSafeOtherProcess is None:
UIO_Highlight(UIOSelector_Get_UIO(inUIOSelector))
else:
# Run function from other process with help of PIPE
lPIPEResuestDict = {"ModuleName": "UIDesktop", "ActivityName": "UIOSelector_Highlight",
"ArgumentList": [inUIOSelector],
"ArgumentDict": {}}
# Отправить запрос в дочерний процесс, который отвечает за работу с Windows окнами
ProcessCommunicator.ProcessChildSendObject(lSafeOtherProcess, lPIPEResuestDict)
# Get answer from child process
lPIPEResponseDict = ProcessCommunicator.ProcessChildReadWaitObject(lSafeOtherProcess)
if lPIPEResponseDict["ErrorFlag"]:
raise Exception(
f"Exception was occured in child process (message): {lPIPEResponseDict['ErrorMessage']}, (traceback): {lPIPEResponseDict['ErrorTraceback']}")
else:
return lPIPEResponseDict["Result"]
return True
| 6,129 |
def _calc_paths ():
"""
Essentially Floyd-Warshall algorithm
"""
def dump ():
for i in sws:
for j in sws:
a = path_map[i][j][0]
#a = adjacency[i][j]
if a is None: a = "*"
print a,
print
sws = switches.values()
path_map.clear()
for k in sws:
for j,port in adjacency[k].iteritems():
if port is None: continue
path_map[k][j] = (1,None)
path_map[k][k] = (0,None) # distance, intermediate
#dump()
for k in sws:
for i in sws:
for j in sws:
if path_map[i][k][0] is not None:
if path_map[k][j][0] is not None:
# i -> k -> j exists
ikj_dist = path_map[i][k][0]+path_map[k][j][0]
if path_map[i][j][0] is None or ikj_dist < path_map[i][j][0]:
# i -> k -> j is better than existing
path_map[i][j] = (ikj_dist, k)
#print "--------------------"
#dump()
| 6,130 |
def clear_cts_counters(device):
""" Clear CTS credentials
Args:
device ('obj'): device to use
Returns:
None
Raises:
SubCommandFailure: Failed to clear cts counters
"""
try:
device.execute('clear cts role-based counters')
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to clear cts counters"
)
| 6,131 |
def completeMessage_BERT(mod, tok, ind, max_length=50):
"""
Sentence Completion of the secret text from BERT
"""
tokens_tensor = torch.tensor([ind])
outInd = mod.generate(tokens_tensor, max_length=50)
outText=tok.decode(outInd[0].tolist())
newText=outText[len(tok.decode(ind)):]
newText=newText.split(sep=".", maxsplit=1)[0]
newText="".join((newText, "."))
outInd=ind+tok.encode(newText)
return outInd
| 6,132 |
def fast_star(x, y, points=20, outer=100, inner=50, **kwargs):
""" Draws a star with the given points, outer radius and inner radius.
The current stroke, strokewidth and fill color are applied.
"""
scale = gcd(inner, outer)
iscale = inner / scale
oscale = outer / scale
cached = _stars.get((points, iscale, oscale), [])
if not cached:
radii = [oscale, iscale] * int(points+1); radii.pop() # which radius?
f = pi / points
v = [(r*sin(i*f), r*cos(i*f)) for i, r in enumerate(radii)]
cached.append(precompile(lambda:(
glBegin(GL_TRIANGLE_FAN),
glVertex2f(0, 0),
[glVertex2f(vx, vy) for (vx, vy) in v],
glEnd()
)))
cached.append(precompile(lambda:(
glBegin(GL_LINE_LOOP),
[glVertex2f(vx, vy) for (vx, vy) in v],
glEnd()
)))
_stars[(points, iscale, oscale)] = cached
fill, stroke, strokewidth, strokestyle = color_mixin(**kwargs)
for i, clr in enumerate((fill, stroke)):
if clr is not None and (i == 0 or strokewidth > 0):
if i == 1:
glLineWidth(strokewidth)
if strokestyle != _strokestyle:
glLineDash(strokestyle)
glColor4f(clr[0], clr[1], clr[2], clr[3] * _alpha)
glPushMatrix()
glTranslatef(x, y, 0)
glScalef(scale, scale, 1)
glCallList(cached[i])
glPopMatrix()
| 6,133 |
def vshcoefs():
"""Test plot of a tangential vector function given by vsh
coefficients."""
theta = numpy.linspace(0.0, math.pi, num=32)
phi = numpy.linspace(0.0, 2*math.pi, num=32)
PHI, THETA = numpy.meshgrid(phi, theta)
cfl = [[[0.0,1.0,-1.0]],[[0.0,0*1.0j,0*1.0j]]]
cfs = vshCoefs(cfl)
E_th, E_ph = vsh.vsfun(cfs,THETA,PHI)
tvecfun.plotvfonsph(THETA, PHI, E_th, E_ph,
vcoord='sph',projection='equirectangular')
#tvecfun.plotvfonsph3D(THETA, PHI, E_th, E_ph)
| 6,134 |
def kill(pyngrok_config=None):
"""
Terminate the ``ngrok`` processes, if running, for the given config's ``ngrok_path``. This method will not
block, it will just issue a kill request.
:param pyngrok_config: A ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary,
overriding :func:`~pyngrok.conf.get_default()`.
:type pyngrok_config: PyngrokConfig, optional
"""
if pyngrok_config is None:
pyngrok_config = conf.get_default()
process.kill_process(pyngrok_config.ngrok_path)
_current_tunnels.clear()
| 6,135 |
def _select_train_and_seat_type(train_names, seat_types, query_trains):
"""
选择订票车次、席别
:param train_names 预定的车次列表
:param seat_types 预定席别列表
:param query_trains 查询到火车车次列表
:return select_train, select_seat_type
"""
def _select_trains(query_trains, train_names=None):
if train_names:
select_trains = []
# 根据订票车次次序,选择车次
for train_name in train_names:
for train in query_trains:
if train['train_name'] == train_name:
select_trains.append(copy.deepcopy(train))
return select_trains
else:
return query_trains
def _select_types(trains, seat_types):
select_train = None
select_seat_type = None
for train in trains:
for seat_type in seat_types:
seat_type_left_ticket = train.get(seat_type, '')
if _check_seat_type_is_booking(seat_type_left_ticket):
select_seat_type = seat_type
select_train = copy.deepcopy(train)
return select_train, select_seat_type
else:
return None, None
_logger.debug('train_names:%s seat_types:%s' % (json.dumps(train_names, ensure_ascii=False),
json.dumps(seat_types, ensure_ascii=False)))
trains = _select_trains(query_trains, train_names)
# debug trains
for i in range(min(len(trains), len(train_names or ['']))):
_logger.debug('query left tickets train info. %s' % json.dumps(trains[i], ensure_ascii=False))
return _select_types(trains, seat_types)
| 6,136 |
def load_real_tcs():
""" Load real timecourses after djICA preprocessing """
try:
return sio.loadmat(REAL_TC_DIR)['Shat'][0]
except KeyError:
try:
return sio.loadmat(REAL_TC_DIR)['Shat_'][0]
except KeyError:
print("Incorrect key")
pass
| 6,137 |
def create_fsaverage_forward(epoch, **kwargs):
"""
A forward model is an estimation of the potential or field distribution for a known source
and for a known model of the head. Returns EEG forward operator with a downloaded template
MRI (fsaverage).
Parameters:
epoch: mne.epochs.Epochs
MNE epoch object containing portions of raw EEG data built around specified timestamp(s).
kwargs: arguments
Specify any of the following arguments for the mne.make_forward_solution() function. These include midist=5.0, n_jobs=1.
Returns:
mne.forward.forward.Forward:
Forward operator built from the user_input epoch and the fsaverage brain.
"""
defaultKwargs = { 'n_jobs': 1, 'mindist': 5.0 }
kwargs = { **defaultKwargs, **kwargs }
# Download fsaverage brain files (to use as 3D MRI brain for model)
fs_dir = fetch_fsaverage(verbose=True)
subjects_dir = op.dirname(fs_dir)
subject = 'fsaverage'
trans = 'fsaverage' # MNE has a built-in fsaverage transformation
src = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif')
bem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif')
# Make forward
fwd = mne.make_forward_solution(epoch.info,
trans=trans,
src=src,
bem=bem,
eeg=True,
**kwargs)
return fwd
| 6,138 |
def attach_client_to_session(
session, project_name, dataset_name, client_state):
"""Attach the client state to the session, by saving its id.
Args:
session
project_name
dataset_name
client_state: nc_models.ClientState
"""
sel_id_name = client_id_name(project_name, dataset_name)
session[sel_id_name] = client_state.id
| 6,139 |
def make_file(path):
"""
Factory function for File strategies
:param str path: A local relative path or s3://, file:// protocol urls
:return:
"""
try:
if not is_valid_url(path):
return LocalFile(os.path.abspath(path))
url_obj = urlparse(path)
if url_obj.scheme == 'file':
return LocalFile(url_obj.path)
if url_obj.scheme == 's3':
return S3File(url_obj.path, url_obj.netloc, boto3.resource('s3'))
raise Exception()
except Exception:
raise ValueError('Path %s is not a valid file or s3 url' % path)
| 6,140 |
def sort(cfile):
"""
Sort the ExoMol .trans files by wavenumber for MARVELized .states files
Parameters
----------
cfile: String
A repack configuration file.
"""
banner = 70 * ":"
args = parser(cfile)
files, dbtype, outfile, tmin, tmax, dtemp, wnmin, wnmax, dwn, \
sthresh, pffile, chunksize, ncpu = args
if dbtype != "exomol":
sys.exit(0)
missing = [file for file in files if not os.path.exists(file)]
if len(missing) > 0:
miss_list = '\n '.join(missing)
print(f"\n{banner}\n"
" File(s) not Found Error: These files are missing:\n"
f" {miss_list}"
f"\n{banner}\n")
sys.exit(0)
# Parse input files:
nfiles = len(files)
suff, mol, isot, pf, states = [], [], [], [], []
for dfile in files:
s, m, iso, p, st = u.parse_file(dfile, dbtype)
suff.append(s)
mol.append(m)
isot.append(iso)
pf.append(p)
if st is not None:
states.append(st)
# Uncompress states:
allstates = np.unique(states)
sdelete, sproc = [], []
for state in allstates:
if state.endswith(".bz2"):
proc = subprocess.Popen(["bzip2", "-dk", state])
sproc.append(proc)
sdelete.append(os.path.realpath(state).replace(".bz2", ""))
isotopes = list(np.unique(isot))
niso = len(isotopes)
zbuffer = np.amin([2, nfiles])
tdelete, tproc = [], []
for idx in range(zbuffer):
if files[idx].endswith(".bz2"):
print(f"Unzipping: '{files[idx]}'.")
proc = subprocess.Popen(["bzip2", "-dk", files[idx]])
tproc.append(proc)
tdelete.append(files[idx].replace(".bz2", ""))
for proc in sproc:
proc.communicate()
iso = np.zeros(nfiles, int)
for i in range(nfiles):
iso[i] = isotopes.index(isot[i])
lblargs = []
for j in range(niso):
i = isot.index(isotopes[j])
elow, degen = u.read_states(states[i])
lblargs.append([elow, degen, j])
# Turn isotopes from string to integer data type:
isotopes = np.asarray(isotopes, int)
# Create queues and start worker processes:
task_queue = mp.Queue()
done_queue = mp.Queue()
for i in range(ncpu):
mp.Process(target=sort_worker, args=(task_queue, done_queue)).start()
zproc = []
for i in range(nfiles):
# Make sure current files are uncompressed:
tproc[i].communicate()
# Uncompress following set:
if zbuffer < nfiles and files[zbuffer].endswith(".bz2"):
print(f"Unzipping: '{files[zbuffer]}'.")
proc = subprocess.Popen(["bzip2", "-dk", files[zbuffer]])
tproc.append(proc)
tdelete.append(files[zbuffer].replace(".bz2", ""))
zbuffer += 1
# Initialize lbl object (not reading yet):
j = iso[i]
print(f"Reading: '{files[i]}'.")
lbl = u.lbl(files[i], dbtype, *lblargs[j])
nlines = lbl.nlines
chunksize = int(nlines/ncpu) + 1
chunks = np.linspace(0, nlines, ncpu+1, dtype=int)
for k in range(ncpu):
args = lbl.lblfile, lbl.llen, lbl.elow, chunks[k], chunks[k+1], k
task_queue.put(args)
wn = [None] * ncpu
for k in range(ncpu):
w, idx = done_queue.get()
wn[idx] = w
all_wn = np.concatenate(wn)
wn_sort = np.argsort(np.argsort(all_wn))
lines = np.zeros(nlines, f'U{lbl.llen}')
lbl.file.seek(0)
for k in range(nlines):
lines[wn_sort[k]] = lbl.file.readline()
sort_file = lbl.lblfile.replace('trans.bz2', 'trans.sort')
with open(sort_file, 'w') as f:
f.writelines(lines)
proc = subprocess.Popen(["bzip2", "-z", sort_file])
zproc.append(proc)
lbl.close()
os.remove(tdelete[i])
for k in range(ncpu):
task_queue.put('STOP')
# Delete unzipped set:
for state in sdelete:
os.remove(state)
for proc in zproc:
proc.communicate()
| 6,141 |
def midi_to_hz(notes):
"""Hello Part 6! You should add documentation to this function.
"""
return 440.0 * (2.0 ** ((np.asanyarray(notes) - 69.0) / 12.0))
| 6,142 |
def test_eq_score():
"""Test the score function when the score found should be returned"""
not_reported_score = 1
score_function = ScoreFunction(match_type='integer', equal=True)
score_function.set_not_reported(not_reported_score)
assert score_function.get_score(3) == 3
assert score_function.get_score(12) == 12
assert score_function.get_score(None) == not_reported_score
| 6,143 |
def bond_stereo_parities(chi, one_indexed=False):
""" Parse the bond stereo parities from the stereochemistry layers.
:param chi: ChI string
:type chi: str
:param one_indexed: Return indices in one-indexing?
:type one_indexed: bool
:returns: A dictionary mapping bond keys onto parities
:rtype: dict[frozenset[int]: bool]
"""
ste_lyr_dct = stereo_layers(chi)
bnd_ste_dct = _bond_stereo_parities(ste_lyr_dct, one_indexed=one_indexed)
return bnd_ste_dct
| 6,144 |
def filter_camera_angle(places):
"""Filter camera angles for KiTTI Datasets"""
bool_in = np.logical_and((places[:, 1] < places[:, 0] - 0.27), (-places[:, 1] < places[:, 0] - 0.27))
# bool_in = np.logical_and((places[:, 1] < places[:, 0]), (-places[:, 1] < places[:, 0]))
return places[bool_in]
| 6,145 |
def neural_log_literal_function(identifier):
"""
A decorator for NeuralLog literal functions.
:param identifier: the identifier of the function
:type identifier: str
:return: the decorated function
:rtype: function
"""
return lambda x: registry(x, identifier, literal_functions)
| 6,146 |
def get_factors(n: int) -> list:
"""Returns the factors of a given integer.
"""
return [i for i in range(1, n+1) if n % i == 0]
| 6,147 |
def fetch_tables():
""" Used by the frontend, returns a JSON list of all the tables including metadata. """
return jsonify([
{
"tab": "animeTables",
"name": "Anime",
"tables": [
{
"id": "englishAnimeSites",
"title": "English Streaming Sites",
"type": "anime"
},
{
"id": "foreignAnimeSites",
"title": "Foreign Streaming Sites",
"type": "anime"
},
{
"id": "downloadSites",
"title": "Download Only Sites",
"type": "animeDownload"
}
]
},
{
"tab": "mangaTables",
"name": "Manga",
"tables": [
{
"id": "englishMangaAggregators",
"title": "Aggregators",
"type": "manga"
},
{
"id": "foreignMangaAggregators",
"title": "Non-English Aggregators",
"type": "manga"
},
{
"id": "englishMangaScans",
"title": "Scans",
"type": "manga"
},
{
"id": "foreignMangaScans",
"title": "Non-English Scans",
"type": "manga"
}
]
},
{
"tab": "lightNovelTables",
"name": "Novels",
"tables": [
{
"id": "lightNovels",
"title": "Light Novels",
"type": "novel"
},
{
"id": "visualNovels",
"title": "Visual Novels",
"type": "novel"
}
]
},
{
"tab": "applicationsTables",
"name": "Applications",
"tables": [
{
"id": "iosApplications",
"title": "iOS",
"type": "application"
},
{
"id": "androidApplications",
"title": "Android",
"type": "application"
},
{
"id": "windowsApplications",
"title": "Windows",
"type": "application"
},
{
"id": "macOSApplications",
"title": "macOS",
"type": "application"
},
{
"id": "browserExtensions",
"title": "Browser Extensions",
"type": "application"
}
]
},
{
"tab": "hentaiTables",
"name": "Hentai",
"tables": [
{
"id": "hentaiAnimeSites",
"title": "Hentai Anime Streaming Sites",
"type": "anime"
},
{
"id": "hentaiDoujinshiSites",
"title": "Hentai Manga/Image Boards/LN sites",
"type": "novel"
},
{
"id": "hentaiDownloadSites",
"title": "Hentai Download",
"type": "animeDownload"
},
{
"id": "hentaiApplications",
"title": "Hentai Applications",
"type": "application"
}
]
},
{
"tab": "vpnTables",
"name": "VPN",
"tables": [
{
"id": "vpnServices",
"title": "VPNs",
"type": "vpn"
}
]
}
])
| 6,148 |
def configure_logger(logger, logfile):
"""Configure logger"""
formatter = logging.Formatter(
"%(asctime)s :: %(levelname)s :: %(message)s")
file_handler = RotatingFileHandler(logfile, "a", 1000000, 1)
# Add logger to file
if (config.w.conf_file.get_w_debug().title() == 'True'):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
| 6,149 |
def base_positive_warps():
"""
Get warp functions associated with domain (0,inf), scale 1.0
Warp function is defined as f(x) = log(exp(x)-1)
Returns
-------
Callable[torch.Tensor,torch.Tensor],
Callable[torch.Tensor,torch.Tensor],
Callable[torch.Tensor,torch.Tensor]
Function from (0,inf) to R, from R to (0,inf),
and log of derivative of function from (0,inf) to R
"""
warpf = utils.invsoftplus
iwarpf = utils.softplus
logdwarpf = lambda x: x - utils.invsoftplus(x)
return warpf, iwarpf, logdwarpf
| 6,150 |
def dev_work_create():
"""
Create work order.
:return:
"""
db_ins = current_user.dbs
audits = User.query.filter(User.role == 'audit')
form = WorkForm()
if form.validate_on_submit():
sql_content = form.sql_content.data
db_ins = form.db_ins.data
shard = form.shard.data
if form.backup.data:
is_backup = True
else:
is_backup = False
sql_content = sql_content.rstrip().replace("\n", " ")
# Only Create and Alter can be used with table shard
shard_create = re.search('\s*create\s+', sql_content, flags=re.IGNORECASE)
shard_alter = re.search('\s*alter\s+', sql_content, flags=re.IGNORECASE)
shard_judge = shard_create or shard_alter
if shard != '0' and not shard_judge:
flash('Only Create and Alter sql can be used when using table shard!')
return redirect(url_for('.dev_work_create'))
# split joint sql with shard numbers
if shard != '0' and shard_judge:
split_sql = sqlparse.split(sql_content)
format_table = re.sub(" +", " ", split_sql[1])
sql_content = ''
for count in range(int(shard)):
format_table_list = format_table.split(' ')
shard_name = '`' + str(format_table_list[2].strip('`')) + '_' + str(count) + '`'
format_table_list[2] = shard_name
sql_content += ' '.join(format_table_list)
sql_content = split_sql[0] + sql_content
if sql_content[-1] == ';':
work = Work()
work.name = form.name.data
work.db_name = form.db_ins.data
work.shard = form.shard.data
work.backup = is_backup
work.dev_name = current_user.name
work.audit_name = form.audit.data
work.sql_content = sql_content
result = sql_auto_review(sql_content, db_ins)
if result or len(result) != 0:
json_result = json.dumps(result)
work.status = 1
for row in result:
if row[2] == 2:
work.status = 2
break
elif re.match(r"\w*comments\w*", row[4]):
work.status = 2
break
work.auto_review = json_result
work.create_time = datetime.now()
db.session.add(work)
db.session.commit()
if current_app.config['MAIL_ON_OFF'] == 'ON':
auditor = User.query.filter(User.name == work.audit_name).first()
mail_content = "<p>Proposer:" + work.dev_name + "</p>" + "<p>Sql Content:" + work.sql_content + \
"</p>" + "<p>A new work sheet.</p>"
send_mail.delay('【inception_mysql】New work sheet', mail_content, auditor.email)
return redirect(url_for('.dev_work'))
else:
flash('The return of Inception is null. May be something wrong with the SQL sentence ')
return redirect(url_for('.dev_work_create'))
else:
flash('SQL sentences does not ends with ; Please Check!')
return redirect(url_for('.dev_work_create'))
return render_template('dev/work_create.html', form=form, db_ins=db_ins, audits=audits)
| 6,151 |
def blur(x, mean=0.0, stddev=1.0):
"""
Resize to smaller size (AREA) and then resize to original size (BILINEAR)
"""
size = tf.shape(x)[:2]
downsample_factor = 1 + tf.math.abs(tf.random.normal([], mean=mean, stddev=stddev))
small_size = tf.to_int32(tf.to_float(size)/downsample_factor)
x = tf.image.resize_images(x, small_size, method=tf.image.ResizeMethod.AREA)
x = tf.image.resize_images(x, size, method=tf.image.ResizeMethod.BILINEAR)
return x
| 6,152 |
def curve(ini_file,
cali_fits,
out_lc_png=None,
fig_set=None,
noplot=False,
overwrite=False,
log=None,
extra_config=None):
"""
plot light curve, calibration with giving data
:param ini_file:
:param cali_fits:
:param out_lc_png: if None, plot only
:param fig_set: figure settings
:param noplot: if True, no online plot, only save, if out none, do nothing
:param overwrite:
:param log:
:param extra_config:
:return:
"""
ini = conf(ini_file, extra_config)
lf = logfile(log, level=ini["log_level"])
if out_lc_png and os.path.isfile(out_lc_png) and not overwrite:
lf.show("SKIP: " + out_lc_png + "")
return
if not out_lc_png and noplot:
lf.show("NO Plot and NO Save, Do Nothing!")
return
if not os.path.isfile(cali_fits):
lf.show("SKIP -- FILE NOT EXISTS: " + cali_fits, logfile.ERROR)
return
# init fig_set, if given key not provided, use default value
if fig_set is None:
fig_set = {}
def_fig_set = {
"step_tgt_chk": None, # steps between target curve and 1st checker
"step_chk_chk": None, # steps between checkers
"marker_tgt": "rs", # color and marker of target
"marker_chk": ("b*", "g*", "m*", "c*"), # color and marker of checker
"xlim": None, # if None, use default
"ylim": None, # if None, use reversed default
"bjd_0": 0, # subtract BJD with this, if 0, use original
"figsize": (20, 10), # figure size
}
for k in def_fig_set:
if k not in fig_set:
fig_set[k] = def_fig_set[k]
_curve_(ini, cali_fits, fig_set, noplot, out_lc_png, lf)
lf.close()
| 6,153 |
def test_regression_gch(sample_inputs_fixture):
"""
Tandem turbines with the upstream turbine yawed and yaw added recovery
correction enabled
"""
sample_inputs_fixture.floris["wake"]["properties"][
"velocity_model"
] = VELOCITY_MODEL
sample_inputs_fixture.floris["wake"]["properties"][
"deflection_model"
] = DEFLECTION_MODEL
floris = Floris(input_dict=sample_inputs_fixture.floris)
floris.farm.turbines[0].yaw_angle = 5.0
# With GCH off (via conftest), GCH should be same as Gauss
floris.farm.flow_field.calculate_wake()
test_results = turbines_to_array(floris.farm.turbine_map.turbines)
if DEBUG:
print_test_values(floris.farm.turbine_map.turbines)
for i in range(len(floris.farm.turbine_map.turbines)):
check = yawed_baseline
assert test_results[i][0] == approx(check[i][0])
assert test_results[i][1] == approx(check[i][1])
assert test_results[i][2] == approx(check[i][2])
assert test_results[i][3] == approx(check[i][3])
# With GCH on, the results should change
floris.farm.wake.deflection_model.use_secondary_steering = True
floris.farm.wake.velocity_model.use_yaw_added_recovery = True
floris.farm.wake.velocity_model.calculate_VW_velocities = True
floris.farm.flow_field.reinitialize_flow_field()
floris.farm.flow_field.calculate_wake()
test_results = turbines_to_array(floris.farm.turbine_map.turbines)
if DEBUG:
print_test_values(floris.farm.turbine_map.turbines)
for i in range(len(floris.farm.turbine_map.turbines)):
check = gch_baseline
assert test_results[i][0] == approx(check[i][0])
assert test_results[i][1] == approx(check[i][1])
assert test_results[i][2] == approx(check[i][2])
assert test_results[i][3] == approx(check[i][3])
| 6,154 |
def test_extend_dict_key_value(minion_opts, local_salt):
"""
Test the `extend_dict_key_value` Jinja filter.
"""
rendered = render_jinja_tmpl(
"{{ {} | extend_dict_key_value('foo:bar:baz', [42]) }}",
dict(opts=minion_opts, saltenv="test", salt=local_salt),
)
assert rendered == "{'foo': {'bar': {'baz': [42]}}}"
rendered = render_jinja_tmpl(
"{{ foo | extend_dict_key_value('bar:baz', [42, 43]) }}",
dict(
foo={"bar": {"baz": [1, 2]}},
opts=minion_opts,
saltenv="test",
salt=local_salt,
),
)
assert rendered == "{'bar': {'baz': [1, 2, 42, 43]}}"
# Edge cases
rendered = render_jinja_tmpl(
"{{ {} | extend_dict_key_value('foo:bar:baz', 'quux') }}",
dict(opts=minion_opts, saltenv="test", salt=local_salt),
)
assert rendered == "{'foo': {'bar': {'baz': ['q', 'u', 'u', 'x']}}}"
# Beware! When supplying a dict, the list gets extended with the dict coerced to a list,
# which will only contain the keys of the dict.
rendered = render_jinja_tmpl(
"{{ {} | extend_dict_key_value('foo:bar:baz', {'foo': 'bar'}) }}",
dict(opts=minion_opts, saltenv="test", salt=local_salt),
)
assert rendered == "{'foo': {'bar': {'baz': ['foo']}}}"
# Test incorrect usage
template = "{{ {} | extend_dict_key_value('bar:baz', 42) }}"
expected = r"Cannot extend {} with a {}.".format(type([]), int)
with pytest.raises(SaltRenderError, match=expected):
render_jinja_tmpl(
template, dict(opts=minion_opts, saltenv="test", salt=local_salt)
)
| 6,155 |
def improve(update, close, guess=1, max_updates=100):
"""Iteratively improve guess with update until close(guess) is true or
max_updates have been applied."""
k = 0
while not close(guess) and k < max_updates:
guess = update(guess)
k = k + 1
return guess
| 6,156 |
def images_to_sequence(tensor):
"""Convert a batch of images into a batch of sequences.
Args:
tensor: a (num_images, height, width, depth) tensor
Returns:
(width, num_images*height, depth) sequence tensor
"""
num_image_batches, height, width, depth = _shape(tensor)
transposed = tf.transpose(tensor, [2, 0, 1, 3])
return tf.reshape(transposed, [width, num_image_batches * height, depth])
| 6,157 |
def transform_regions(regions: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""
Transform aggregated region data for map
regions -- aggregated data from region pipeline
"""
records = []
for record in regions:
if "latitude" in record["_id"].keys():
if record["admin3"]:
id = record["admin3"]
search_term = "admin3"
elif record["admin2"]:
id = record["admin2"]
search_term = "admin2"
elif record["admin1"]:
id = record["admin1"]
search_term = "admin1"
else:
id = country_name(record["country"])
if id is None:
continue
search_term = "country"
new_record = {
"_id": id,
"casecount": record["casecount"],
"country": country_name(record["country"]),
"country_code": record["country"],
"admin1": record["admin1"],
"admin2": record["admin2"],
"admin3": record["admin3"],
"lat": record["_id"]["latitude"],
"long": record["_id"]["longitude"],
"search": search_term,
}
logging.info(new_record)
records.append(new_record)
return records
| 6,158 |
def _file_to_import_exists(storage_client: storage.client.Client,
bucket_name: str, filename: str) -> bool:
"""Helper function that returns whether the given GCS file exists or not."""
storage_bucket = storage_client.get_bucket(bucket_name)
return storage.Blob(
bucket=storage_bucket, name=filename).exists(storage_client)
| 6,159 |
def run(args, options):
""" Compile a file and output a Program object.
If options.merge_opens is set to True, will attempt to merge any
parallelisable open instructions. """
prog = Program(args, options)
VARS['program'] = prog
if options.binary:
VARS['sint'] = GC_types.sbitintvec.get_type(int(options.binary))
VARS['sfix'] = GC_types.sbitfixvec
for i in 'cint', 'cfix', 'cgf2n', 'sintbit', 'sgf2n', 'sgf2nint', \
'sgf2nuint', 'sgf2nuint32', 'sgf2nfloat', 'sfloat', 'cfloat', \
'squant':
del VARS[i]
print('Compiling file', prog.infile)
# make compiler modules directly accessible
sys.path.insert(0, 'Compiler')
# create the tapes
exec(compile(open(prog.infile).read(), prog.infile, 'exec'), VARS)
prog.finalize()
if prog.req_num:
print('Program requires:')
for x in prog.req_num.pretty():
print(x)
if prog.verbose:
print('Program requires:', repr(prog.req_num))
print('Cost:', 0 if prog.req_num is None else prog.req_num.cost())
print('Memory size:', dict(prog.allocated_mem))
return prog
| 6,160 |
def get_relative_poses(
num_frames: int,
frames: np.ndarray,
selected_track_id: Optional[int],
agents: List[np.ndarray],
agent_from_world: np.ndarray,
current_agent_yaw: float,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Internal function that creates the targets and availability masks for deep prediction-type models.
The futures/history offset (in meters) are computed. When no info is available (e.g. agent not in frame)
a 0 is set in the availability array (1 otherwise).
Note: return dtype is float32, even if the provided args are float64. Still, the transformation
between space is performed in float64 to ensure precision
Args:
num_frames (int): number of offset we want in the future/history
frames (np.ndarray): available frames. This may be less than num_frames
selected_track_id (Optional[int]): agent track_id or AV (None)
agents (List[np.ndarray]): list of agents arrays (same len of frames)
agent_from_world (np.ndarray): local from world matrix
current_agent_yaw (float): angle of the agent at timestep 0
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: position offsets, angle offsets, extent, availabilities
"""
# How much the coordinates differ from the current state in meters.
positions_m = np.zeros((num_frames, 2), dtype=agent_from_world.dtype)
yaws_rad = np.zeros((num_frames, 1), dtype=np.float32)
extents_m = np.zeros((num_frames, 2), dtype=np.float32)
availabilities = np.zeros((num_frames,), dtype=np.float32)
for i, (frame, frame_agents) in enumerate(zip(frames, agents)):
if selected_track_id is None:
agent_centroid_m = frame["ego_translation"][:2]
agent_yaw_rad = rotation33_as_yaw(frame["ego_rotation"])
agent_extent = (EGO_EXTENT_LENGTH, EGO_EXTENT_WIDTH)
else:
# it's not guaranteed the target will be in every frame
try:
agent = filter_agents_by_track_id(frame_agents, selected_track_id)[0]
agent_centroid_m = agent["centroid"]
agent_yaw_rad = agent["yaw"]
agent_extent = agent["extent"][:2]
except IndexError:
availabilities[i] = 0.0 # keep track of invalid futures/history
continue
positions_m[i] = agent_centroid_m
yaws_rad[i] = agent_yaw_rad
extents_m[i] = agent_extent
availabilities[i] = 1.0
# batch transform to speed up
positions_m = transform_points(positions_m, agent_from_world) * availabilities[:, np.newaxis]
yaws_rad = angular_distance(yaws_rad, current_agent_yaw) * availabilities[:, np.newaxis]
return positions_m.astype(np.float32), yaws_rad, extents_m, availabilities
| 6,161 |
def read_mat_cplx_bin(fname):
"""
Reads a .bin file containing floating-point values (complex) saved by Koala
Parameters
----------
fname : string
Path to the file
Returns
-------
buffer : ndarray
An array containing the complex floating-point values read from the file
See Also
--------
write_mat_cplx_bin
Example
-------
>>> buf = read_mat_cplx_bin('test/file_cplx.bin')
>>> buf
array([[ 0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j, ...,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j],
[ 0.00000000e+00 +0.00000000e+00j,
4.97599517e-09 +9.14632536e-10j,
5.99623329e-09 -1.52811275e-08j, ...,
1.17636354e-07 -1.01500063e-07j,
6.33714581e-10 +5.61812996e-09j,
0.00000000e+00 +0.00000000e+00j],
...,
[ 0.00000000e+00 +0.00000000e+00j,
-1.26479121e-08 -2.92324431e-09j,
-4.59448168e-09 +9.28236474e-08j, ...,
-4.15031316e-08 +1.48466597e-07j,
4.41099779e-08 -1.27046489e-08j,
0.00000000e+00 +0.00000000e+00j],
[ -0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j, ...,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j]], dtype=complex64)
"""
kcplx_header_dtype = numpy.dtype([
("width", "i4"),
("height", "i4")
])
f = open(fname, 'rb')
kcplx_header = numpy.fromfile(f, dtype=kcplx_header_dtype, count=1)
shape = (kcplx_header['height'], kcplx_header['width'])
#print kcplx_header
tmp = numpy.fromfile(f, dtype='float32')
f.close()
real_tmp = (tmp[0:kcplx_header['height']*kcplx_header['width']]).reshape(shape)
imag_tmp = (tmp[kcplx_header['height']*kcplx_header['width']:]).reshape(shape)
#print tmp
#print 'array = {}'.format(len(tmp))
return real_tmp + 1j*imag_tmp
| 6,162 |
def test_init_subclass_attrs():
"""
`__init_subclass__` works with attrs classes as long as slots=False.
"""
@attr.s(slots=False)
class Base:
def __init_subclass__(cls, param, **kw):
super().__init_subclass__(**kw)
cls.param = param
@attr.s
class Attrs(Base, param="foo"):
pass
assert "foo" == Attrs().param
| 6,163 |
def DeleteDataBundle(**kwargs):
"""
Deletes a Data Bundle by ID.
:param kwargs:
:return:
"""
data_bundle_id = kwargs['data_bundle_id']
del data_bundles[data_bundle_id]
return(kwargs, 200)
| 6,164 |
def determineactions(repo, deficiencies, sourcereqs, destreqs):
"""Determine upgrade actions that will be performed.
Given a list of improvements as returned by ``finddeficiencies`` and
``findoptimizations``, determine the list of upgrade actions that
will be performed.
The role of this function is to filter improvements if needed, apply
recommended optimizations from the improvements list that make sense,
etc.
Returns a list of action names.
"""
newactions = []
knownreqs = supporteddestrequirements(repo)
for d in deficiencies:
name = d.name
# If the action is a requirement that doesn't show up in the
# destination requirements, prune the action.
if name in knownreqs and name not in destreqs:
continue
newactions.append(d)
# FUTURE consider adding some optimizations here for certain transitions.
# e.g. adding generaldelta could schedule parent redeltas.
return newactions
| 6,165 |
def filter_ccfs(ccfs, sc_thresh, min_ccf):
"""
Remove noisy ccfs from irrelevant experiments
:param ccfs: 2d array
:param sc_thresh: int
number of sign changes expected
:param min_ccf: float
cutoff value for a ccf to be above the noise threshold
:return:
"""
if sc_thresh is None:
sc_thresh = np.inf
asign = np.sign(ccfs)
signchange = ((np.roll(asign, 1) - asign) != 0).astype(int)
signchange[:, 0] = 0
# (np.sum(signchange, axis=1) <= sc_thresh) &
### Do not cross correlate with a lag greater than 1/2 of the dataset when the timeseries is short
### throw out these cross correlations in filtered time-series
max_lag = ccfs.shape[1]
# if max_lag < 10:
# max_lag = int(np.ceil(ccfs.shape[1]/2.0))
filtered_ccf = ccfs[(np.sum(signchange, axis=1) <= sc_thresh) & (np.max(np.abs(ccfs), axis=1) > min_ccf),
:max_lag + 1]
return filtered_ccf
| 6,166 |
def market_data(symbol, expirationDate, strike, optionType, info=None):
"""Gets option market data from information. Takes time to load pages."""
assert all(isinstance(i, str) for i in [symbol, expirationDate, strike, optionType])
return robin_stocks.options.get_option_market_data(symbol, expirationDate, strike, optionType, info=None)
| 6,167 |
def close_logger(logger):
"""Filehandles etc are not closed automatically, so close them here"""
if logger is not None:
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
| 6,168 |
def get_documents_embeddings (y, embedder, column):
"""
Given a Dataframe containing study_id and a text column, return a numpy array of embeddings
The idea of this function is to prevent to embed two times the same text (for computation efficiency)
Parameters:
-----------
y: Dataframe containing study_id, and a text column
embedder: Object of embedding creator containing a transform function
column: column containing the text to Embed
Output:
-------
Numpy array of size (n, embedding_size)
"""
# Getting reports DF
reports_df = y[["study_id", column]].fillna("").drop_duplicates("study_id").reset_index(drop=True)
reports_list = reports_df[column].astype(str).values
# Getting BERT embeddings
reports_embeddings = embedder.fit_transform(reports_list)
output = pd.merge(
y[["study_id"]],
reports_df[["study_id"]].join(
pd.DataFrame(reports_embeddings)
),
left_on="study_id",
right_on="study_id",
how="left"
).iloc[:,1:].values
return output
| 6,169 |
def index(request):
"""view fonction de la page d'accueil
Render templates de la page d'accueil
"""
return render(request, "t_myapp/index.html")
| 6,170 |
def cidr_mask_to_subnet_mask(mask_num):
"""
掩码位数转换为点分掩码
:param mask_num: 掩码位数, 如 16
:return: 十进制点分ipv4地址
"""
return convert_to_ipv4(cidr_mask_to_ip_int(mask_num), stype='int')
| 6,171 |
def correlate(A,B,
rows=None,columns=None, mode_row='zero', mode_column='zero'):
"""Correlate A and B.
Input:
------
A,B : array
Input data.
columns : int
Do correlation at columns 0..columns, defaults to the number of columns in A.
rows : int
Do correlation at columns 0..rows, defaults to the number of rows in A.
mode_row, mode_column : string
How values outside boundaries are handled ('zero' or 'mirror').
Output:
-------
Y : array
Rows-by-columns array of correlation values.
"""
A,B = atype([A,B],[np.double,np.double])
assert A.ndim == 2 and B.ndim == 2, "Input arrays must be two dimensional"
A_r,A_c = A.shape
B_r,B_c = B.shape
columns = columns or A_c
rows = rows or A_r
assert rows <= A_r and columns <= A_c, \
"columns and rows cannot be larger than dimensions of A"
modes = {'zero': 0,
'mirror': 1}
output = np.empty((rows,columns),dtype=np.double)
_lib.correlate(A_r, A_c, A,
B_r, B_c, B,
rows, columns,
modes[mode_row], modes[mode_column],
output)
return output
| 6,172 |
def question_print( instr ) :
"""
function question_print
by Charles Stanier charles-stanier@uiowa.edu Aug 9, 2019
purpose: this takes a string and prints it with added question marks
modification history: none
input arguments: instr is intended to be a variable of type str (string)
"""
newstr = '? ' + instr + ' ?' # this concatenates three strings together
# and assigns the result to a new variable newstr
print(newstr)
| 6,173 |
def plot_two_series(A, B, variable, title):
"""Plot two series using the same `date` index.
Parameters
----------
A, B: pd.DataFrame
Dataframe with a `date` key and a variable
passed in the `variable` parameter. Parameter A
represents the "Observed" series and B the "Predicted"
series. These will be labelled respectively.
variable: str
Variable to use in plot.
title: str
Plot title.
"""
plt.figure(figsize=(14, 4))
plt.xlabel('Observed and predicted')
ax1 = A.set_index('date')[variable].plot(
color='#d35400', grid=True, label='Observed', title=title)
ax2 = B.set_index('date')[variable].plot(
color='grey', grid=True, label='Predicted')
ax1.set_xlabel("Predicted Week")
ax1.set_ylabel("Predicted Values")
plt.legend()
plt.show()
| 6,174 |
def getChrLenList(chrLenDict, c):
""" Given a chromosome length dictionary keyed on chromosome names and
a chromosome name (c) this returns a list of all the runtimes for a given
chromosome across all Step names.
"""
l = []
if c not in chrLenDict:
return l
for n in chrLenDict[c]:
l.append(chrLenDict[c][n])
return l
| 6,175 |
def build_arg_parser():
"""
Build an argument parser using argparse. Use it when python version is 2.7 or later.
"""
parser = argparse.ArgumentParser(description="Smatch table calculator -- arguments")
parser.add_argument("--fl", type=argparse.FileType('r'), help='AMR ID list file')
parser.add_argument('-f', nargs='+', help='AMR IDs (at least one)')
parser.add_argument("-p", nargs='*', help="User list (can be none)")
parser.add_argument("--fd", default=isi_dir_pre, help="AMR File directory. Default=location on isi machine")
parser.add_argument('-r', type=int, default=4, help='Restart number (Default:4)')
parser.add_argument('-v', action='store_true', help='Verbose output (Default:False)')
return parser
| 6,176 |
def query_sessions(user_id: Optional[int]) -> TList[Session]:
"""
Return all user's sessions
:param user_id: current user ID (None if user auth is disabled)
:return: list of session objects
"""
adb = get_data_file_db(user_id)
return [Session(db_session) for db_session in adb.query(DbSession)]
| 6,177 |
def positionalencoding3d(d_model, dx, dy, dz):
"""
:param d_model: dimension of the model
:param height: height of the positions
:param width: width of the positions
:return: d_model*height*width position matrix
"""
# if d_model % 6 != 0:
# raise ValueError("Cannot use sin/cos positional encoding with "
# "odd dimension (got dim={:d})".format(d_model))
pe = torch.zeros(d_model, dx, dy, dz)
# Each dimension use half of d_model
interval = int(d_model // 6) * 2
div_term = torch.exp(torch.arange(0., interval, 2) * -(math.log(10000.0) / interval))
pos_x = torch.arange(0., dx).unsqueeze(1) * div_term
pos_y = torch.arange(0., dy).unsqueeze(1) * div_term
pos_z = torch.arange(0., dz).unsqueeze(1) * div_term
pe[0:interval:2, :, :, :] = torch.sin(pos_x).T.unsqueeze(2).unsqueeze(3).repeat(1, 1, dy, dz)
pe[1:interval:2, :, :, :] = torch.cos(pos_x).T.unsqueeze(2).unsqueeze(3).repeat(1, 1, dy, dz)
pe[interval:int(interval * 2):2, :, :] = torch.sin(pos_y).T.unsqueeze(1).unsqueeze(3).repeat(1, dx, 1, dz)
pe[interval + 1:int(interval * 2):2, :, :] = torch.cos(pos_y).T.unsqueeze(1).unsqueeze(3).repeat(1, dx, 1, dz)
pe[int(interval * 2):int(interval * 3):2, :, :] = torch.sin(pos_z).T.unsqueeze(1).unsqueeze(2).repeat(1, dx, dy, 1)
pe[int(interval * 2) + 1:int(interval * 3):2, :, :] = torch.cos(pos_z).T.unsqueeze(1).unsqueeze(2).repeat(1, dx, dy, 1)
return pe
| 6,178 |
def make_headers(context: TraceContext) -> Headers:
"""Creates dict with zipkin headers from supplied trace context.
"""
headers = {
TRACE_ID_HEADER: context.trace_id,
SPAN_ID_HEADER: context.span_id,
FLAGS_HEADER: '0',
SAMPLED_ID_HEADER: '1' if context.sampled else '0',
}
if context.parent_id is not None:
headers[PARENT_ID_HEADER] = context.parent_id
return headers
| 6,179 |
def test_2k7campeonatojuizforano(caplog):
"""Very hard to parse comments (line braks, few markers)"""
TWDA = twda._TWDA()
with open(
os.path.join(os.path.dirname(__file__), "2k7campeonatojuizforano.html")
) as f:
TWDA.load_html(f)
assert len(TWDA) == 1
assert TWDA["2k7campeonatojuizforano"].to_json() == {
"id": "2k7campeonatojuizforano",
"event": "Campeonato Juizforano 2007",
"place": "Juiz de Fora, Brazil",
"date": "2007-12-16",
"players_count": 23,
"player": "Pedro Paulo de Sousa Mendes",
"name": "Imbued at Last",
"comments": textwrap.dedent(
"""
Description: The deck's goal is to setup as fast as you can by
depleting your library and to use Unity/Anthelios to cycle back
whatever master you need the most at the time.
It was enough. I never needed more than this.
Heart is no good when it shows up late, but this is a small price
to pay when compared to how good it is when I draw it early (it was
decisive in the final table).
I only packed the extremely necessary events, so I wouldn't draw
any extra table hate, and was lucky enough to put all 3 in play in
every game. The decks runs wonderfully with those 3 on the table.
"""
)[1:],
"crypt": {
"cards": [
{"count": 4, "id": 201386, "name": 'Travis "Traveler72" Miller'},
{"count": 3, "id": 200689, "name": 'Jennie "Cassie247" Orne'},
{"count": 2, "id": 201103, "name": 'Paul "Sixofswords29" Moreton'},
{"count": 2, "id": 200479, "name": 'François "Warden" Loehr'},
{"count": 1, "id": 200656, "name": 'Jack "Hannibal137" Harmon'},
],
"count": 12,
},
"library": {
"cards": [
{
"cards": [
{"count": 2, "id": 100067, "name": "Angel of Berlin"},
{"count": 1, "id": 100135, "name": "The Barrens"},
{
"count": 1,
"id": 100348,
"name": "The Church of Vindicated Faith",
},
{
"comments": "saved me a lot of times, "
"unfortunately I couldn't pack "
"more than one.",
"count": 1,
"id": 100545,
"name": "Direct Intervention",
},
{"count": 1, "id": 100775, "name": "Fortschritt Library"},
{"count": 6, "id": 101198, "name": "Memories of Mortality"},
{
"comments": "no comments needed.",
"count": 1,
"id": 101210,
"name": "Millicent Smith, Puritan Vampire " "Hunter",
},
{"count": 3, "id": 101355, "name": "The Parthenon"},
{"count": 1, "id": 101654, "name": "Rötschreck"},
{
"comments": "useful either to speed deck "
"depletion or to trade for "
"something useful under "
"Anthelios.",
"count": 4,
"id": 101800,
"name": "The Slaughterhouse",
},
{
"comments": "crucial contest in the final " "table.",
"count": 1,
"id": 101811,
"name": "Smiling Jack, The Anarch",
},
{"count": 1, "id": 101958, "name": "Tension in the Ranks"},
{"count": 1, "id": 102074, "name": "Unity"},
{
"comments": "not as effective as I "
"expected, but also not a "
"hassle because it's trifle.",
"count": 1,
"id": 102151,
"name": "Wash",
},
],
"count": 25,
"type": "Master",
},
{
"cards": [
{"count": 4, "id": 101557, "name": "React with Conviction"},
{"count": 5, "id": 101705, "name": "Second Sight"},
{"count": 5, "id": 101886, "name": "Strike with Conviction"},
],
"count": 14,
"type": "Conviction",
},
{
"cards": [
{"count": 1, "id": 100079, "name": "Aranthebes, The Immortal"}
],
"count": 1,
"type": "Action",
},
{
"cards": [
{"count": 1, "id": 100298, "name": "Carlton Van Wyk"},
{"count": 1, "id": 101333, "name": "Ossian"},
{"count": 1, "id": 102173, "name": "Wendell Delburton"},
],
"count": 3,
"type": "Ally",
},
{
"cards": [
{"count": 1, "id": 100474, "name": "The Crusader Sword"},
{"count": 1, "id": 100903, "name": "Heart of Nizchetus"},
{"count": 1, "id": 101014, "name": "Ivory Bow"},
],
"count": 3,
"type": "Equipment",
},
{
"cards": [
{"count": 2, "id": 100322, "name": "Champion"},
{"count": 2, "id": 100551, "name": "Discern"},
{"count": 1, "id": 101592, "name": "Rejuvenate"},
{
"comments": "I started to win a game when I "
"had those three in play.",
"count": 3,
"id": 102120,
"name": "Vigilance",
},
],
"count": 8,
"type": "Power",
},
{
"cards": [{"count": 3, "id": 100534, "name": "Determine"}],
"count": 3,
"type": "Reaction",
},
{
"cards": [
{"count": 1, "id": 100074, "name": "Anthelios, The Red Star"},
{"count": 1, "id": 100612, "name": "Edge Explosion"},
{"count": 1, "id": 102079, "name": "The Unmasking"},
],
"count": 3,
"type": "Event",
},
],
"count": 60,
},
}
assert caplog.record_tuples == [
# original file errors - the fixed version has none
# (
# "krcg",
# logging.WARNING,
# "[ 58][2k7campeonatojuizforano] failed to parse \"I couldn't pack more "
# 'than one."',
# ),
# (
# "krcg",
# logging.WARNING,
# '[ 65][2k7campeonatojuizforano] failed to parse "or to trade for '
# 'something useful under Anthelios."',
# ),
# (
# "krcg",
# logging.WARNING,
# '[ 70][2k7campeonatojuizforano] failed to parse "because it\'s trifle."',
# ),
]
| 6,180 |
def shiftRightUnsigned(col, numBits):
"""Unsigned shift the given value numBits right.
>>> df = spark.createDataFrame([(-42,)], ['a'])
>>> df.select(shiftRightUnsigned('a', 1).alias('r')).collect()
[Row(r=9223372036854775787)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits)
return Column(jc)
| 6,181 |
def scalar_sub(x: Number, y: Number) -> Number:
"""Implement `scalar_sub`."""
_assert_scalar(x, y)
return x - y
| 6,182 |
def predict_attack(h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11,h12,h13):
"""
Parameters:
-name:h1
in:query
type:number
required=True
-name:h5
in:query
type:number
required:True
-name:h4
in:query
type:number
required:True
-name:h8
in:query
type:number
required:True
-name:h9
in:query
type:number
required:True
-name:h10
in:query
type:number
required:True
-name:h11
in:query
type:number
required:True
-name:h12
in:query
type:number
required:True
DESCRIPTION:output varaibles
"""
if h2=='male':
h2=0
else:
h2=1
if h3=='angina':
h3=0
elif h3=='atypical anigna':
h3=1
elif h3=='non-anignal pain':
h3=2
else:
h3=3
if h6=='greater than 120':
h6=1
else:
h6=0
if h7=='normal':
h7=0
elif h7=='ST-t normal':
h7=1
else:
h7=2
if h13=='yes':
h13=1
else:
h13=0
res=classifier.predict([[h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11,h12,h13]])
return res
| 6,183 |
def _check_varrlist_integrity(vlist):
"""Return true if shapes and datatypes are the same"""
shape = vlist[0].data.shape
datatype = vlist[0].data.dtype
for v in vlist:
if v.data.shape != shape:
raise(Exception("Data shapes don't match"))
if v.data.dtype != datatype:
raise(Exception("Data types don't match"))
return True
| 6,184 |
def subclassfactory(fact_method):
"""fact_method takes the same args as init and returns the subclass appropriate to those args
that subclass may in turn override the same factory method and choose amoung it's subclasses.
If this factory method isn't overridden in the subclass an object of that class is inited.
fact_method is made into a cls method and must take at least a cls argument
"""
@wraps(fact_method)
@classmethod
def wrapper(cls, *args, **kwargs):
subclass = fact_method(cls, *args, **kwargs)
submeth = getattr(subclass, fact_method.__name__)
curmeth = getattr(cls, fact_method.__name__)
if (submeth.__func__ == curmeth.__func__):
return subclass(*args, **kwargs)
else:
return submeth(*args, **kwargs)
return wrapper
| 6,185 |
def test_get_partitions():
"""Test getting the partitions property."""
assert_true(type(m.partitions) is tuple)
| 6,186 |
def simulate_var1(x_tnow, b, mu, sigma2, m_, *, j_=1000, nu=10**9,
init_value=True):
"""For details, see here.
Parameters
----------
x_tnow : array, shape(n_, )
b : array, shape(n_,n_)
mu : array, shape(n_, )
sigma2 : array, shape(n_,n_)
m_ : int
nu: int
j_ : int
init_value : boolean
Returns
-------
x_tnow_thor : array, shape(j_, m_+1, n_)
"""
n_ = np.shape(sigma2)[0]
# Step 1: Monte Carlo scenarios of projected paths of the risk drivers
x_tnow_thor = np.zeros((j_, m_, n_))
for m in range(0, m_):
epsi = simulate_t(mu, sigma2, nu, j_).reshape((j_, -1))
if m > 0:
x_prec = x_tnow_thor[:, m-1, :]
else:
x_prec = np.tile(x_tnow, (j_, 1))
x_tnow_thor[:, m, :] = x_prec @ b.T + epsi
# Step 2: Include the initial value as starting node, if selected
if init_value:
x_tnow = np.tile(x_tnow, (j_, 1))
x_tnow = np.expand_dims(x_tnow, axis=1)
x_tnow_thor = np.concatenate((x_tnow, x_tnow_thor), axis=1)
return x_tnow_thor
| 6,187 |
def is_drom(insee_city: Optional[str] = None, insee_region: Optional[str] = None) -> bool:
"""
Est-ce que le code INSEE de la ville ou de la région correspond à un DROM ?
Args:
insee_city: Code INSEE de la ville
insee_region: Code INSEE de la région
Returns:
Vrai ssi le code INSE est un DROM
"""
if insee_city is not None:
return insee_city[:2] in {'97', '98'}
elif insee_region is not None: # Les codes région ne suivent pas la nomenclature des codes département
return insee_region in {'01', '02', '03', '04', '06'}
| 6,188 |
def pack_rows(rows, bitdepth):
"""Yield packed rows that are a byte array.
Each byte is packed with the values from several pixels.
"""
assert bitdepth < 8
assert 8 % bitdepth == 0
# samples per byte
spb = int(8 / bitdepth)
def make_byte(block):
"""Take a block of (2, 4, or 8) values,
and pack them into a single byte.
"""
res = 0
for v in block:
res = (res << bitdepth) + v
return res
for row in rows:
a = bytearray(row)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
n = float(len(a))
extra = math.ceil(n / spb) * spb - n
a.extend([0] * int(extra))
# Pack into bytes.
# Each block is the samples for one byte.
blocks = group(a, spb)
yield bytearray(make_byte(block) for block in blocks)
| 6,189 |
def reset_user_pwd(username: str) -> int:
"""
:param username: 用户名
:return: 结果代码: 1: 成功, 0: 失败
"""
return update_user_info(username=username, args={
'password': '12345678'
})
| 6,190 |
def configurationChanged(options, jsonFile):
"""
We received a new JSON configuration file
"""
audit("configurationChanged " + jsonFile)
if options["ignorefile"] == "yes":
trace("skipping database reconfiguration because skip_configuration_file exists")
return
if not os.path.isfile(jsonFile):
die("json file %s does not exist" % jsonFile)
try:
inp = json.load(open(jsonFile,"r"))
except Exception as e:
die("Cannot open jsonFile '%s': %s" % (jsonFile, e))
if verbose:
dumpJSON(inp, "incoming JSON")
jsonTop = options["jsontop"]
if not jsonTop is None:
e = "inp" + jsonTop
trace("eval(%s)" % e)
inp = eval(e,{"__builtins__":None},{"inp":inp})
if verbose:
dumpJSON(inp, "modified JSON")
setupDictionaryDatabases(options, inp)
| 6,191 |
def location_engineering(df: pd.DataFrame) -> pd.DataFrame:
"""Call the `location_dict()` function to get the location dictionary and the
`location_dataframe()` one to add the location dictionary info to the DataFrame.
Parameters
----------
df :
The dataframe to work with.
Returns
-------
The DataFrame with location info added.
"""
# Call `location_dict` function to get a dictionary with location info
location_dictionary = location_dict(df)
# Call `location_dataframe` function to add the `location_dict` to a df
df = location_dataframe(df, location_dictionary)
return df
| 6,192 |
def extract_energyxtb(logfile=None):
"""
Extracts xtb energies from xtb logfile using regex matching.
Args:
logfile (str): Specifies logfile to pull energy from
Returns:
energy (list[float]): List of floats containing the energy in each step
"""
re_energy = re.compile("energy: (-\\d+\\.\\d+)")
energy = []
with logfile.open() as f:
for line in f:
if "energy" in line:
energy.append(float(re_energy.search(line).groups()[0]))
return energy
| 6,193 |
def GetPID():
"""Returns the PID of the shell."""
return os.getppid()
| 6,194 |
def render(history_lines, out_file):
"""Read historical data and save to out_file as img."""
dts = []
prs = []
queued = []
daily_happiness = [] # Percentage of last day queue was not blocked
merge_rate = [] # Merge rate for the past 24 active hours
real_merge_rate = [] # Merge rate including when queue is empty
merges = []
blocked_intervals = []
offline_intervals = []
active_merges = Sampler()
real_merges = Sampler()
happy_moments = Sampler()
daily_merged = collections.deque()
actually_merged = collections.deque()
dt = None
start_blocked = None
start_offline = None
last_merge = 0 # Number of merges last sample, resets on queue restart
for line in history_lines:
try:
dt, online, pr, queue, _, blocked, merged = parse_line(
*line.strip().split(' '))
except TypeError: # line does not fit expected criteria
continue
if dt < datetime.datetime.now() - datetime.timedelta(days=30):
continue
if not pr and not queue and not merged: # Bad sample
continue
if merged >= last_merge:
did_merge = merged - last_merge
elif online: # Restarts reset the number to 0
did_merge = merged
else:
did_merge = 0
last_merge = merged
happy_moments += int(bool(online and not blocked))
real_merges += did_merge
if queue or did_merge: # Only add samples when queue is busy.
active_merges += did_merge
if not start_offline and not online:
start_offline = dt
if start_offline and online:
offline_intervals.append((start_offline, dt))
start_offline = None
if not online: # Skip offline entries
continue
# Make them steps instead of slopes.
if dts:
dts.append(dt)
# Append the previous value at the current time
# which makes all changes move at right angles.
daily_happiness.append(daily_happiness[-1])
merge_rate.append(merge_rate[-1])
merges.append(did_merge)
prs.append(prs[-1])
queued.append(queued[-1])
real_merge_rate.append(real_merge_rate[-1])
dts.append(dt)
daily_happiness.append(happy_moments.mean)
merge_rate.append(active_merges.total)
merges.append(did_merge)
prs.append(pr)
queued.append(queue)
real_merge_rate.append(real_merges.total)
if not start_blocked and blocked:
start_blocked = dt
if start_blocked and not blocked:
blocked_intervals.append((start_blocked, dt))
start_blocked = None
if start_blocked:
blocked_intervals.append((start_blocked, dt))
if start_offline:
offline_intervals.append((start_offline, dt))
fig, (ax_open, ax_merged, ax_health) = plt.subplots(
3, sharex=True, figsize=(16, 8), dpi=100)
ax_queued = ax_open.twinx()
ax_merged.yaxis.tick_right()
ax_merged.yaxis.set_label_position('right')
ax_health.yaxis.tick_right()
ax_health.yaxis.set_label_position('right')
ax_open.plot(dts, prs, 'b-')
merge_color = merges_color(merge_rate[-1])
p_merge, = ax_merged.plot(dts, merge_rate, '%s-' % merge_color)
p_real_merge, = ax_merged.plot(dts, real_merge_rate, '%s:' % merge_color, alpha=0.5)
health_color = happy_color(daily_happiness[-1])
health_line = '%s-' % health_color
ax_health.plot(dts, daily_happiness, health_line)
ax_queued.plot(dts, queued, '%s-' % depth_color(queued[-1]))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
ax_open.set_ylabel('Open PRs: %d' % prs[-1], color='b')
ax_queued.set_ylabel(
'Queued PRs: %d' % queued[-1],
color=depth_color(queued[-1]))
ax_health.set_ylabel(
'Queue health: %.2f' % daily_happiness[-1],
color=health_color)
ax_merged.set_ylabel('Merge capacity: %d/d' % merge_rate[-1], color=merge_color)
ax_health.set_ylim([0.0, 1.0])
ax_health.set_xlim(left=datetime.datetime.now() - datetime.timedelta(days=21))
fig.autofmt_xdate()
for start, end in offline_intervals:
ax_merged.axvspan(start, end, alpha=0.2, color='black', linewidth=0)
ax_health.axvspan(start, end, alpha=0.2, color='black', linewidth=0)
for start, end in blocked_intervals:
ax_health.axvspan(start, end, alpha=0.2, color='brown', linewidth=0)
p_blocked = mpatches.Patch(color='brown', label='blocked', alpha=0.2)
p_offline = mpatches.Patch(color='black', label='offline', alpha=0.2)
ax_health.legend([p_offline, p_blocked], ['offline', 'blocked'], 'lower left', fontsize='x-small')
ax_merged.legend([p_merge, p_real_merge, p_offline], ['capacity', 'actual', 'offline'], 'lower left', fontsize='x-small')
last_week = datetime.datetime.now() - datetime.timedelta(days=6)
halign = 'center'
xpos = 0.5
fig.text(
xpos, 0.08, 'Weekly statistics', horizontalalignment=halign)
weekly_merge_rate = numpy.mean([
m for (d, m) in zip(dts, merge_rate) if d >= last_week])
weekly_merges = sum(
m for (d, m) in zip(dts, merges) if d >= last_week)
weekly_merges /= 2 # Due to steps
fig.text(
xpos, .00,
'Merge capacity: %.1f PRs/day (merged %d)' % (
weekly_merge_rate, weekly_merges),
color=merges_color(weekly_merge_rate),
horizontalalignment=halign,
)
week_happiness = numpy.mean(
[h for (d, h) in zip(dts, daily_happiness) if d >= last_week])
fig.text(
xpos, .04,
'Unblocked %.1f%% of this week' % (100 * week_happiness),
color=happy_color(week_happiness),
horizontalalignment=halign,
)
if not queued[-1]:
delta = datetime.timedelta(0)
wait = 'clear'
elif not merge_rate[-1]:
delta = datetime.timedelta(days=90)
wait = 'forever'
else:
delta = datetime.timedelta(float(queued[-1]) / merge_rate[-1])
wait = format_timedelta(delta)
fig.text(
xpos, -0.04,
'Queue backlog: %s' % wait,
color=wait_color(delta),
horizontalalignment=halign,
)
if dt:
fig.text(
0.1, -0.04,
'image: %s, sample: %s' % (
datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M'),
dt.strftime('%Y-%m-%d %H:%M'),
),
horizontalalignment='left',
fontsize='x-small',
color=fresh_color(dt),
)
plt.savefig(out_file, bbox_inches='tight', format='svg')
plt.close()
| 6,195 |
def make_transaction_frame(transactions):
"""
Formats a transaction DataFrame.
Parameters
----------
transactions : pd.DataFrame
Contains improperly formatted transactional data.
Returns
-------
df : pd.DataFrame
Daily transaction volume and dollar ammount.
- See full explanation in tears.create_full_tear_sheet.
"""
transaction_list = []
for dt in transactions.index:
txns = transactions.loc[dt]
if len(txns) == 0:
continue
for txn in txns:
txn = map_transaction(txn)
transaction_list.append(txn)
df = pd.DataFrame(sorted(transaction_list, key=lambda x: x['dt']))
df['txn_dollars'] = -df['amount'] * df['price']
df.index = list(map(pd.Timestamp, df.dt.values))
return df
| 6,196 |
def find_or_create_role(name, desc):
""" Find existing role or create new role """
role = Role.query.filter(Role.name == name).first()
if not role:
role = Role(name=name, desc=desc)
return role
return role
| 6,197 |
def enumerate_shapefile_fields(shapefile_uri):
"""Enumerate all the fielfd in a shapefile.
Inputs:
-shapefile_uri: uri to the shapefile which fields have to be
enumerated
Returns a nested list of the field names in the order they are stored
in the layer, and groupped per layer in the order the layers appear.
"""
message = shapefile_uri + "' doesn't point to a file."
assert os.path.isfile(shapefile_uri), message
shapefile = ogr.Open(shapefile_uri)
message = "OGR can't open " + shapefile_uri
assert shapefile is not None, message
layer_count = shapefile.GetLayerCount()
names = [] # names are organized by layer
for l in range(layer_count):
names.append([])
layer = shapefile.GetLayer(l)
feature = layer.GetFeature(0)
field_count = feature.GetFieldCount()
for f in range(field_count):
field_defn = feature.GetFieldDefnRef(f)
names[l].append(field_defn.GetNameRef())
return names
| 6,198 |
def parse_nrrdvector(inp):
"""Parse a vector from a nrrd header, return a list."""
assert inp[0] == '(', "Vector should be enclosed by parenthesis."
assert inp[-1] == ')', "Vector should be enclosed by parenthesis."
return [_to_reproducible_float(x) for x in inp[1:-1].split(',')]
| 6,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.