content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def test_initialize_with_displacements_and_force_sets_input(
generate_workchain,
generate_structure,
generate_displacements,
generate_settings,
generate_force_sets,
):
"""Test of PhonopyWorkChain.initialize() using NaCl data.
`displacements` (random displacements) is given as an input.
"""
from aiida.orm import ArrayData, Dict, StructureData
structure = generate_structure()
settings = generate_settings()
displacements = generate_displacements()
force_sets = generate_force_sets()
inputs = {
"structure": structure,
"settings": settings,
"displacements": displacements,
"force_sets": force_sets,
}
wc = generate_workchain("phonoxpy.phonopy", inputs)
wc.initialize()
assert "displacements" in wc.inputs
np.testing.assert_almost_equal(
wc.inputs.displacements.get_array("displacements"),
wc.ctx.displacements.get_array("displacements"),
)
np.testing.assert_almost_equal(
displacements.get_array("displacements"),
wc.ctx.displacements.get_array("displacements"),
)
phonon_setting_info_keys = [
"version",
"symmetry",
"symmetry_tolerance",
"primitive_matrix",
"supercell_matrix",
]
assert set(wc.ctx.phonon_setting_info.keys()) == set(phonon_setting_info_keys)
ctx = {
"phonon_setting_info": Dict,
"primitive": StructureData,
"supercell": StructureData,
"displacements": ArrayData,
"supercells": dict,
}
for key in wc.ctx:
assert key in ctx
assert isinstance(wc.ctx[key], ctx[key])
assert "supercell_" not in key
_assert_cells(wc)
| 6,500 |
def rasterize(
vectors,
layer=0,
output=None,
nodata=None,
pixel_size=None,
bounds=None,
affine=None,
shape=None,
attribute=None,
fill=0,
default_value=1,
):
"""Rasterize features
Options for definining the boundary and pixel size of rasterization:
User may provide
1) pixel_size only - uses full boundary of features
2) pixel size and bounds - limits features to given boundary
3) affine and shape - both required to determine boundary
Providing output path is optional. Only needed if you want to save
rasterized feature(s) to a GeoTiff
rasterio features rasterization function:
https://rasterio.readthedocs.io/en/latest/topics/features.html
https://rasterio.readthedocs.io/en/latest/api/rasterio.features.html
TODO:
could also use lookup dict with attribute arg for non-binary rasters
where attribute value is not numeric
Args
vectors:
features input, see rasterstats for acceptable inputs
layer: int or string, optional
If `vectors` is a path to an fiona source,
specify the vectors layer to use either by name or number.
defaults to 0
output (str): (optional)
output path for raster of rasterized features
nodata: (optional)
nodata value used if output argument is provided
pixel_size (float):
resolution at which to rasterize features
bounds (tuple):
boundary tuple (xmin, ymin, xmax, ymax)
affine (Affine):
affine transformation used for rasterization
shape (tuple):
shape for rasterization which corresponds with affine (nrows, ncols)
attribute (str):
field to use for assigning cell values instead of `default_value`
fill (int, float):
same as rasterio's features.rasterize `fill`
default_value (int, float):
same as rasterio's features.rasterize `default_value`
Returns
array representing rasterized features
affine of resoluting raster
"""
if (
affine is not None
and isinstance(affine, Affine)
and shape is not None
and isinstance(shape, tuple)
and len(shape) == 2
):
if pixel_size is not None and pixel_size != affine[0]:
warn("Ignoring `pixel_size` provided due to valid affine and shape input.")
if pixel_size is not None and bounds is not None:
alt_affine, alt_shape = get_affine_and_shape(
bounds=bounds, pixel_size=pixel_size
)
if alt_affine != affine or alt_shape != shape:
warn("Ignoring `bounds` due to valid affine and shape input")
elif pixel_size is not None and bounds is not None:
affine, shape = get_affine_and_shape(bounds=bounds, pixel_size=pixel_size)
else:
raise Exception("Must provide either pixel_size and bounds or affine and shape")
features_iter = read_features(vectors, layer)
if attribute is None:
feats = [
(feat["geometry"], default_value)
for feat in features_iter
if feat["geometry"] is not None
]
else:
feats = [
(feat["geometry"], feat["properties"][str(attribute)])
for feat in features_iter
if feat["geometry"] is not None
]
rv_array = features.rasterize(
feats,
out_shape=shape,
transform=affine,
fill=fill,
default_value=default_value,
all_touched=True,
dtype=None,
)
if output is not None:
export_raster(rv_array, affine, output, nodata=nodata)
return rv_array, affine
| 6,501 |
def blockchain_key_seed(request):
""" Private key template for the nodes in the private blockchain, allows
different keys to be used for each test to avoid collisions.
"""
# Using the test name as part of the template to force the keys to be
# different accross tests, otherwise the data directories would be the same
# and collisions would happen
return escape_for_format(request.node.name) + "cluster:{}"
| 6,502 |
def wls_simple(X, y, yerr):
"""
weighted least squares: (X.T*W*X)*beta = X.T*W*y
solution: beta = (X.T*X)^-1 * X.T *y
Note
----
wls solves single problems (n_problems=1)
BUT! is able to solve multiple-template (same error) problems
Parameters
----------
X: predictors
(n_obs, n_term) array
yerr: error of response
(n_obs, ) weight matrix
y: response
(n_obs, n_problems) array
Return
------
beta: coefficients
(n_term, ) array
"""
yerr = yerr.reshape(-1, 1) # column vector
yerr = np.where((yerr > 0) & np.isfinite(yerr), yerr, 1e5)
X_ = X / yerr
y_ = y / yerr
beta = ols(np.matmul(X_.T, X_), np.matmul(X_.T, y_))
return beta
| 6,503 |
def decorated1(debugger, args, exe_ctx, result, dict):
"""
Python command defined by @lldb.command
"""
print("hello from decorated1", file=result)
| 6,504 |
def assert_metadata_equal(this, other):
"""Assert metadata `this` are equal to metadata `other`."""
assert this.standard_name == other.standard_name
assert this.long_name == other.long_name
assert this.var_name == other.var_name
assert this.units == other.units
| 6,505 |
def _filter(dict_packages, expression):
"""Filter the dict_packages with expression.
Returns:
dict(rst): Filtered dict with that matches the expression.
"""
expression_list = ['(' + item + ')' for item in expression.split(',')]
expression_str = '|'.join(expression_list)
compiled_exp = re.compile('(?i:^(' + expression_str + ')$)')
cp_dict_packages = copy.deepcopy(dict_packages)
for key in dict_packages.keys():
match = re.search(compiled_exp, key)
if not match:
del cp_dict_packages[key]
return cp_dict_packages
| 6,506 |
def multi_index_tsv_to_dataframe(filepath, sep="\t", header_rows=None):
"""
Loads a multi-header tsv file into a :py:class:`pd.DataFrame`.
Parameters
----------
filepath : `str`
Path pointing to the tsv file.
sep : `str`, optional, default: '\t'
Character to use as the delimiter.
header_rows : `list`, optional, default: None
0-based indicies corresponding to the row locations to use as the
multi-index column names in the dataframe. Example:
condition E3 E3
value pvalue_raw z
_sy 8.6e-05 3.92
p.Ala16Arg 0.0 3.76raw_barcodes_counts.tsv
The *header_rows* for this instance will be [0, 1]
If not supplied, `header_rows` will be inferred from the file.
Returns
-------
:py:class:`~pd.DataFrame`
A :py:class:`pd.MultiIndex` dataframe.
"""
if header_rows is None:
header_rows = infer_multiindex_header_rows(filepath)
if header_rows == [0] or not header_rows:
return pd.read_table(filepath, index_col=0, sep=sep)
else:
try:
return pd.read_table(filepath, index_col=0, sep=sep, header=header_rows)
except IndexError:
return pd.read_table(filepath, index_col=0, sep=sep)
| 6,507 |
def generate_summoner_tab_summoner(db, profile, ss):
"""
:type db: darkarisulolstats.lolstats.database.Database
"""
summoner = {}
for s in ss:
raw_summoner = db.summoners.get(s)
if "profileIconPath" not in summoner:
summoner["profileIconPath"] = data.DataDragon.get_profile_icon_path(raw_summoner["profileIconId"])
summoner["name"] = profile
if "level" not in summoner:
summoner["level"] = raw_summoner["summonerLevel"]
else:
summoner["level"] += raw_summoner["summonerLevel"]
summoner["Playtime"] = 0
raw_matches = db.preprocessed.get(profile, "matchlists")
for raw_match in raw_matches:
summoner["Playtime"] += raw_match["duration"]
return summoner
| 6,508 |
def enable_ini():
"""
Switch seapy to use all fields from ROMS hydrodynamics and COBALT ini fields
"""
enable()
seapy.roms.fields.update(ini_fields)
| 6,509 |
def preserve_quotes (s):
"""
Removes HTML tags around greentext.
"""
return quot_pattern.sub(get_first_group, s)
| 6,510 |
def test_init_logger(monkeypatch):
"""
Tests `init_logger()`.
"""
test_config_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'test_config')
def mock_get_conf_path():
"""
Replaces the conf path with the one for mock confs in unit tests.
"""
return test_config_dir
monkeypatch.setattr(dirs, 'get_conf_path', mock_get_conf_path)
config.init_logger()
logger_conf_file = os.path.join(test_config_dir, 'logger.conf')
logger_cp = configparser.RawConfigParser()
logger_cp.read(logger_conf_file)
assert config.find_existing_handler_from_config(
logger_cp, 'fileHandler') is not None
assert config.find_existing_handler_from_config(
logger_cp, 'stdoutHandler') is not None
assert config.find_existing_handler_from_config(
logger_cp, 'stderrHandler') is not None
assert config.find_existing_handler_from_config(
logger_cp, 'disabledHandler') is not None
stdout_handler = config.find_existing_handler_from_config(
logger_cp, 'stdoutHandler')
assert stdout_handler is not None
assert stdout_handler.filters[0]._max_inc_levelno \
== logging.INFO # pylint: disable=protected-access
root_logger = logging.getLogger()
def clear_handlers():
"""
Clears all handlers from the root logger.
"""
existing_handlers = root_logger.handlers
for h_existing in existing_handlers:
root_logger.removeHandler(h_existing)
clear_handlers()
config.init_logger('VeRBoSe')
# Since level changed, cannot use existing function
# Only matching on format -- expected to be unique in this mock conf
for h_existing in root_logger.handlers:
h_conf = logger_cp['handler_stdoutHandler']
h_conf_fmt = logger_cp[ \
f'formatter_{h_conf["formatter"]}']['format'].strip()
if h_existing.formatter._fmt \
!= h_conf_fmt: # pylint: disable=protected-access
continue
stdout_handler = h_existing
break
assert stdout_handler.level == logging.NOTSET
clear_handlers()
config.init_logger(40)
# Since level changed, cannot use existing function
# Only matching on format -- expected to be unique in this mock conf
for h_existing in root_logger.handlers:
h_conf = logger_cp['handler_stderrHandler']
h_conf_fmt = logger_cp[ \
f'formatter_{h_conf["formatter"]}']['format'].strip()
if h_existing.formatter._fmt \
!= h_conf_fmt: # pylint: disable=protected-access
continue
stderr_handler = h_existing
break
assert stderr_handler.level == 40
| 6,511 |
def with_whitespace_inside(expr):
""" Returns an expression that allows for whitespace inside, but not
outside the expression.
"""
return Combine(OneOrMore(expr | White(' ', max=1) + expr))
| 6,512 |
def save_plot(workdir, filename, create_workdir=False):
"""Accepts workdir specified as argument to main script and filename
- saves plot to disk."""
if create_workdir:
if workdir == os.getcwd():
os.makedirs(f'{workdir}/figures', exist_ok=True)
plt.savefig(f'{workdir}/figures/{filename}', format='png')
else:
os.makedirs(f'{workdir}', exist_ok=True)
plt.savefig(f'{workdir}/{filename}', format='png')
else:
if workdir == os.getcwd():
plt.savefig(f'{workdir}/figures/{filename}', format='png')
else:
plt.savefig(f'{workdir}/{filename}', format='png')
| 6,513 |
def similarity(item, user, sim_dict):
"""
similarity between an item and a user (a set of items)
"""
if user not in sim_dict or item not in sim_dict[user]:
return 0
else:
return sim_dict[user][item]
| 6,514 |
def get_totalt_result(req_url):
"""[summary]
This gets all the results in INT from the specified query
Args:
req_url ([STR]): [The request query that decides the data]
"""
r = requests.get(req_url, headers=headers)
json = r.json()
return json['result']['totalHits']
| 6,515 |
def lorentz(x, FWHM, x0=0):
"""
Returns Lorentzian lineshape.
"""
return FWHM/2/np.pi*((x-x0)**2+(FWHM/2)**2)**-1
| 6,516 |
def _test_create_pd_detection(infos, tracking=False):
"""Creates a prediction objects file."""
assert tracking is False, "Not Supported Yet"
from waymo_open_dataset import dataset_pb2
from waymo_open_dataset import label_pb2
from waymo_open_dataset.protos import metrics_pb2
from waymo_open_dataset.utils import box_utils
objects = metrics_pb2.Objects()
for idx in tqdm(range(len(infos))):
info = infos[idx]
obj = get_obj(info['path'])
annos = obj['objects']
num_points_in_gt = np.array([ann['num_points'] for ann in annos])
box3d = np.array([ann['box'] for ann in annos])
if len(box3d) == 0:
continue
names = np.array([TYPE_LIST[ann['label']] for ann in annos])
if box3d.shape[1] > 7:
# drop velocity
box3d = box3d[:, [0, 1, 2, 3, 4, 5, -1]]
for i in range(box3d.shape[0]):
if num_points_in_gt[i] == 0:
continue
if names[i] == 'UNKNOWN':
continue
det = box3d[i]
score = 1.0
label = names[i]
o = metrics_pb2.Object()
o.context_name = obj['scene_name']
o.frame_timestamp_micros = int(obj['frame_name'].split("_")[-1])
# Populating box and score.
box = label_pb2.Label.Box()
box.center_x = det[0]
box.center_y = det[1]
box.center_z = det[2]
box.length = det[3]
box.width = det[4]
box.height = det[5]
box.heading = det[-1]
o.object.box.CopyFrom(box)
o.score = score
# Use correct type.
o.object.type = CAT_NAME_TO_ID[label]
o.object.num_lidar_points_in_box = num_points_in_gt[i]
objects.objects.append(o)
# Write objects to a file.
f = open(os.path.join(args.result_path, 'gt_preds.bin'), 'wb')
f.write(objects.SerializeToString())
f.close()
| 6,517 |
def equal_axes(axes, xlim=True, ylim=True):
"""
adjust xlim and ylim to the min/ max of all axes
Parameters
----------
axes: list
axes to adjust
xlim : bool, optional
If true, adjust xlim.
ylim : bool, optional
If true, adjust ylim.
"""
axes = np.array(axes).flatten()
if xlim:
mn = min([ax.get_xlim()[0] for ax in axes])
mx = max([ax.get_xlim()[1] for ax in axes])
[ax.set_xlim(mn, mx) for ax in axes]
if ylim:
mn = min([ax.get_ylim()[0] for ax in axes])
mx = max([ax.get_ylim()[1] for ax in axes])
[ax.set_ylim(mn, mx) for ax in axes]
| 6,518 |
def count_collision(strMap: list[str], right: int, down: int) -> int:
"""Read the map and count how many tree would be encountered if someone start from the top left corner"""
mapWidth = len(strMap[0]) # All lines are assumed to have same width
xCoord, yCoord = right % mapWidth, down
count = 0
while yCoord < len(strMap):
if strMap[yCoord][xCoord] == TREE:
count += 1
xCoord = (xCoord + right) % mapWidth
yCoord += down
return count
| 6,519 |
def svn_config_find_group(*args):
"""svn_config_find_group(svn_config_t cfg, char key, char master_section, apr_pool_t pool) -> char"""
return _core.svn_config_find_group(*args)
| 6,520 |
def _reduce_ticks(fig):
"""Reduce number of ticks by factor 1.5 if more than 4."""
# TODO: replace this by mpl built-in class
tick_reduc = 1.5
for axes in fig.get_axes():
if len(axes.get_xticks()) > 4:
axes.locator_params(
tight=False,
axis='x',
nbins=len(axes.get_xticks()) / tick_reduc,
)
if len(axes.get_yticks()) > 4:
axes.locator_params(
tight=False,
axis='y',
nbins=len(axes.get_yticks()) / tick_reduc,
)
| 6,521 |
def loading():
"""Loader"""
spinner = spinning_cursor()
while True:
sys.stdout.write(spinner.next())
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\b')
| 6,522 |
def download_file(url: str, target_dir: Union[str, Path]) -> None:
""" """
target_path = Path(target_dir)
r = requests.get(url, stream=True)
http_status = r.status_code
if http_status != 200:
raise Exception("Request was not successfull. Got response {0}".format(http_status))
total = int(r.headers.get("content-length", 0))
with open(target_path, "wb") as file, tqdm(
desc=str(target_path.absolute()),
total=total,
unit="iB",
unit_scale=True,
unit_divisor=1024,
) as bar:
for data in r.iter_content(chunk_size=1024):
size = file.write(data)
bar.update(size)
| 6,523 |
def run_delay():
"""Run the updater to call a function delayed."""
import time
import threading
from qtpy import QtWidgets
from qt_thread_updater import get_updater
app = QtWidgets.QApplication.instance() or QtWidgets.QApplication([])
text_edit = QtWidgets.QTextEdit()
text_edit.resize(300, 300)
text_edit.setReadOnly(True)
text_edit.show()
now = time.time()
def update_text(set_time):
text_edit.append('Requested {:.04f} Updated {:.04f}'.format(set_time, time.time() - now))
# Lower the timeout so it runs at a faster rate.
get_updater().timeout = 0 # 0.0001 # Qt runs in milliseconds
get_updater().delay(0.5, update_text, 0.5)
get_updater().delay(1, update_text, 1)
get_updater().delay(1.5, update_text, 1.5)
get_updater().delay(2, update_text, 2)
get_updater().delay(2.5, update_text, 2.5)
get_updater().delay(3, update_text, 3)
get_updater().delay(5, app.quit) # Quit after 5 seconds
app.exec_()
cleanup_app()
| 6,524 |
def _database_exists():
"""Checks for existence of database"""
_require_environment()
database = _get_database_name()
with settings(hide('warnings'), warn_only=True):
result = run(MYSQL_PREFIX % "\"SHOW DATABASES LIKE '%(NAME)s';\"" % database)
if database['NAME'] in result:
return True
else:
print('Database %(NAME)s does not exist' % database)
return False
| 6,525 |
def EncoderDecoder(d_model, d_ff, n_heads, dropout, layer_idx, mode,
ff_activation):
"""Transformer encoder-decoder layer.
The input is a triple (decoder_input, mask, encoder) where the mask is
created from the original source to prevent attending to the padding part
of the encoder.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
layer_idx: which layer are we at (for bookkeeping)
mode: str: 'train' or 'eval'
ff_activation: the non-linearity in feed-forward layer
Returns:
the layer, returning a triple (decoder_activations, mask, encoder).
"""
decoder_self_attention = [ # vecs_d pmask vecs_e
tl.LayerNorm(), # vecs_d ..... ......
tl.BasicCausalAttention(
d_model, n_heads=n_heads, dropout=dropout, mode=mode),
tl.Dropout(rate=dropout, mode=mode), # vecs_d ..... ......
]
decoder_to_encoder_attention = [ # vecs_d masks vecs_e
tl.LayerNorm(), # vecs_d masks vecs_e
tl.Parallel([], [], tl.Dup()), # ______ _____ vecs_e vecs_e
tl.Parallel([], tl.Swap()), # ______ vecs_e masks ......
tl.Parallel([], tl.Dup()), # ______ vecs_e vecs_e ..... ......
tl.AttentionQKV( # (q k v masks ... --> vecs_d masks ...)
d_model, n_heads=n_heads, dropout=dropout, mode=mode),
tl.Dropout(rate=dropout, mode=mode), # vecs_d mask vecs_e
]
feed_forward = [
FeedForward(d_model, d_ff, dropout, layer_idx, mode, ff_activation),
]
return tl.Serial( # vecs_d masks vecs_e
tl.Residual(decoder_self_attention), # vecs_d masks vecs_e
tl.Residual(decoder_to_encoder_attention), # vecs_d masks vecs_e
tl.Residual(feed_forward), # vecs_d masks vecs_e
)
| 6,526 |
def group_normalize(strokes):
""" normilize a multistroke drawing """
long_stroke = concat(strokes)
x_min = min(long_stroke.x)
x_max = max(long_stroke.x)
y_min = min(long_stroke.y)
y_max = max(long_stroke.y)
x_range = float(x_max-x_min)
y_range = float(y_max-y_min)
normalized_strokes = []
for stroke in strokes:
x = ((np.array(stroke.x) - x_min)/x_range).tolist()
y = ((np.array(stroke.y) - y_min)/y_range).tolist()
normalized_strokes.append(Stroke(x,y))
return normalized_strokes
| 6,527 |
def _get_sa_bracket(myimt, saset):
"""
For a given SA IMT, look through the input SAs and return a tuple of
a) a pair of IMT strings representing the periods bracketing the given
period; or c) the single IMT representing the first or last period in
the input list if the given period is off the end of the list.
Args:
myper (float): The period to search for in the input lists.
saset (list): A list of SA IMTs.
Returns:
tuple: One or two strings representing the IMTs closest to or
bracketing the input IMT.
"""
if not len(saset):
return ()
#
# Stick the target IMT into a copy of the list of SAs, then sort
# the list by period.
#
ss = saset.copy()
ss.append(myimt)
tmplist = sorted(ss, key=_get_period_from_imt)
nimt = len(tmplist)
#
# Get the index of the target IMT in the sorted list
#
myix = tmplist.index(myimt)
#
# If the target IMT is off the end of the list, return the
# appropriate endpoint; else return the pair of IMTs that
# bracket the target.
#
if myix == 0:
return (tmplist[1], )
elif myix == nimt - 1:
return (tmplist[-2], )
else:
return (tmplist[myix - 1], tmplist[myix + 1])
| 6,528 |
def test_ceil_special_cases_one_arg_equal(arg1):
"""
Special case test for `ceil(x, /)`:
- If `x_i` is already integer-valued, the result is `x_i`.
"""
res = ceil(arg1)
mask = isintegral(arg1)
assert_exactly_equal(res[mask], (arg1)[mask])
| 6,529 |
def fb83(A, B, eta=1., nu=None):
"""
Generates the FB8 distribution using the orthogonal vectors A and B
where A = gamma1*kappa and B = gamma2*beta (gamma3 is inferred)
A may have not have length zero but may be arbitrarily close to zero
B may have length zero however. If so, then an arbitrary value for gamma2
(orthogonal to gamma1) is chosen
"""
kappa = norm(A)
beta = norm(B)
gamma1 = A / kappa
if beta == 0.0:
gamma2 = __generate_arbitrary_orthogonal_unit_vector(gamma1)
else:
gamma2 = B / beta
theta, phi, psi = FB8Distribution.gammas_to_spherical_coordinates(
gamma1, gamma2)
gamma1, gamma2, gamma3 = FB8Distribution.spherical_coordinates_to_gammas(
theta, phi, psi)
return FB8Distribution(gamma1, gamma2, gamma3, kappa, beta, eta, nu)
| 6,530 |
def replicate(pta, ptac, p0, coefficients=False):
"""Create a replicated residuals conditioned on the data.
Here pta is standard marginalized-likelihood PTA, and
ptac is a hierarchical-likelihood version of pta with
coefficients=True for all GPs. This function:
- calls utils.get_coefficients(pta, p0) to get a realization
of the GP coefficients conditioned on the data and on the
hyperparameters in p0;
- calls ptac.get_delay() to compute the resulting realized
GPs at the toas;
- adds measurement noise (including ECORR) consistent with
the hyperparameters.
To use this (pending further development), you need to set
combine=False on the pta/ptac GPs, and method='sparse' on
the ptac EcorrKernelNoise.
Returns a list of replicated residuals, one list element
per pulsar."""
# GP delays
if not coefficients:
p0 = get_coefficients(pta, p0)
ds = ptac.get_delay(params=p0)
# note: the proper way to cache the Nmat computation is to give
# a `sample` method to csc_matrix_alt and ndarray_alt, which
# would then save the factorization in the instance
nmats = ptac.get_ndiag(params=p0)
for d, nmat in zip(ds, nmats):
if isinstance(nmat, sps.csc_matrix):
# add EFAC/EQUAD/ECORR noise
# use xx' = I => (Lx)(Lx)' = LL' with LL' = PNP'
# hence N[P[:, np.newaxis], P[np.newaxis, :]] = LL'
# see https://scikit-sparse.readthedocs.io/en/latest/cholmod.html
ch = cholesky(nmat)
d[ch.P()] += ch.L() @ np.random.randn(len(d))
elif isinstance(nmat, np.ndarray):
# diagonal case, nmat will be ndarray_alt instance
d += np.sqrt(nmat) * np.random.randn(len(d))
else:
raise NotImplementedError(
"Cannot take Nmat factor; " "you may need to set the EcorrKernelNoise to 'sparse'."
)
return ds
| 6,531 |
def screenshot_progressbar_horizontal(screenshot_on=True, theme='flatly'):
"""
Get screenshot for horizontal progressbars
"""
style = Style(theme)
window = style.master
window.title('ttkbootstrap')
window.geometry('600x400')
ss = Screenshot(window, '../images/progressbar_horizontal.png')
# headers
f1 = ttk.Frame(window, padding=5)
f1.pack(fill='both', side='left', expand='yes')
ttk.Label(f1, text='Horizontal.TProgressbar', font='Helvetica 10 bold', anchor='center').pack(fill='x', pady=5)
ttk.Separator(f1).pack(fill='x', padx=5, pady=(5, 10))
# widgets
for c in style.colors:
ttk.Label(f1, text=f'{c}.Horizontal.TProgressbar').pack(fill='x', padx=10)
ttk.Progressbar(f1, value=75, style=f'{c}.Horizontal.TProgressbar').pack(fill='x', expand='yes', padx=10,
pady=(0, 10))
if screenshot_on:
window.after(1000, lambda: ss.get_bounding_box(None))
window.after(1500, window.quit)
window.mainloop()
| 6,532 |
def load_danube() -> pd.DataFrame:
"""
The danube dataset contains ranks of base flow observations from the Global River Discharge
project of the Oak Ridge National Laboratory Distributed Active Archive Center (ORNL DAAC),
a NASA data center. The measurements are monthly average flow rate for two stations situated
at Scharding (Austria) on the Inn river and at Nagymaros (Hungary) on the Danube.
The data have been pre-processed to remove any time trend. Specifically, Bacigal et al. (2011)
extracted the raw data, and obtain the fast Fourier transformed centered observations. The
negative spectrum is retained and a linear time series model with 12 seasonal components is
fitted. Residuals are then extracted and AR model fitted to the series, the selection being
done based on the AIC criterion with imposed maximum order of 3 and the number of autoregressive
components may differ for each series.
This data frame contains the following columns:
inn:
A numeric vector containing the rank of pre-whitened level observations of the Inn river
at Nagyramos.
donau:
A numeric vector containing the rank of prewhitened level observations of the Donau river
at Scharding.
"""
return _load_file('danube.csv')
| 6,533 |
def get_index(grid_mids, values):
"""get the index of a value in an array
Args:
grid_mids: array of grid centers
value: array of values
Returns:
indices
"""
diff = np.diff(grid_mids)
diff = np.concatenate((diff, diff[-1:]))
edges = np.concatenate((grid_mids-diff/2, grid_mids[-1:]+diff[-1:]/2))
#print(edges)
ind = np.digitize(np.array(values), edges)-1
ind[ind > grid_mids.shape[0]-1] = grid_mids.shape[0]-1
return ind
| 6,534 |
def validate_incoming_edges(graphs, param=None):
"""
In case a node of a certain type has more then a threshold of incoming
edges determine a possible stitches as a bad stitch.
"""
param = param or {}
res = {}
i = 0
for candidate in graphs:
res[i] = 'ok'
for node, values in candidate.nodes(data=True):
if values[stitcher.TYPE_ATTR] not in list(param.keys()):
continue
tmp = param[values[stitcher.TYPE_ATTR]]
if len(candidate.in_edges(node)) >= tmp:
res[i] = 'node ' + str(node) + ' has to many edges: ' + \
str(len(candidate.in_edges(node)))
i += 1
return res
| 6,535 |
def test_calculate_part_stress_lambda_b_no_insulation():
"""calculate_part_stress_lambda_b() should raise an KeyError when passed an unknown
insulation ID."""
with pytest.raises(KeyError):
inductor.calculate_part_stress_lambda_b(1, 41, 85.77)
| 6,536 |
def face_area(bounding_box, correction):
"""
Increase face area, to square format, face detectors are very close
clipping useless when you want to get whole head
Arguments: bounding box original, correction value
Returns: 4-element list - bounding box for expanded area (ints)
"""
x_1, y_1, x_2, y_2 = bounding_box
x_1 = x_1 + correction
x_2 = x_2 + correction
x_center = int(x_1 + (x_2 - x_1) / 2)
y_center = int(y_1 + (y_2 - y_1) / 2)
factor = 2
square_factor = int(max(x_2 - x_1, y_2 - y_1) * factor / 2)
x_1p = x_center - square_factor
y_1p = y_center - square_factor
x_2p = x_1p + square_factor * 2
y_2p = y_1p + square_factor * 2
return [x_1p, y_1p, x_2p, y_2p]
| 6,537 |
def session_scope():
"""Provide a transactional scope around a series of operations."""
global ENGINE
global SESSION
if SESSION is None:
SESSION = sessionmaker(bind=ENGINE)
session = SESSION()
try:
yield session
except:
session.rollback()
raise
finally:
session.close()
| 6,538 |
def ut_to_dt(ut):
"""Converts a universal time in days to a dynamical time in days."""
# As at July 2020, TAI is 37 sec ahead of UTC, TDT is 32.184 seconds ahead of TAI.
return ut + 69.184/SEC_IN_DAY
| 6,539 |
def _read_group_h5(filename: Path, groupname: str) -> ndarray:
"""Return group content.
Args:
filename: path of hdf5 file.
groupname: name of group to read.
Returns:
content of group.
"""
try:
with h5py.File(filename, 'r') as h5f:
data = h5f[groupname][()]
except OSError as err:
# h5py doesn't always include the filename in its error messages
err.args += (filename,)
raise
return data
| 6,540 |
def test_attach_file(request, browser_name):
"""Should provide a way to change file field value"""
browser = get_browser(browser_name)
request.addfinalizer(browser.quit)
file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "mockfile.txt"
)
browser.visit(EXAMPLE_APP)
browser.attach_file("file", file_path)
browser.find_by_name("upload").click()
html = browser.html
assert "text/plain" in html
with open(file_path, "r") as f:
assert str(f.read().encode("utf-8")) in html
| 6,541 |
def check_genotype_data():
"""Checks if the genotype data is fully indexed"""
GENE_COUNT_TO_CHECK = 33341
SNP_COUNT_TO_CHECK = 10700998
gene_count = es.count('genotype',doc_type='genes')['count']
snps_count = es.count('genotype',doc_type='snps')['count']
if gene_count != GENE_COUNT_TO_CHECK:
raise Exception('Only %s instead of %s genes found' % (gene_count,GENE_COUNT_TO_CHECK))
if snps_count != SNP_COUNT_TO_CHECK:
raise Exception('Only %s instead of %s SNPs found' % (snps_count,SNP_COUNT_TO_CHECK))
| 6,542 |
def _clip_bbox(min_y, min_x, max_y, max_x):
"""Clip bounding box coordinates between 0 and 1.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
Returns:
Clipped coordinate values between 0 and 1.
"""
min_y = tf.clip_by_value(min_y, 0.0, 1.0)
min_x = tf.clip_by_value(min_x, 0.0, 1.0)
max_y = tf.clip_by_value(max_y, 0.0, 1.0)
max_x = tf.clip_by_value(max_x, 0.0, 1.0)
return min_y, min_x, max_y, max_x
| 6,543 |
def hot(df, hot_maps, drop_cold=True, ret_hots_only=False, verbose=False):
"""
df: pd.DataFrame
hot_maps: list(dict)
hot_map: dict
key: str column in df
value: one_hot vector for unique row value
---
returns dataframe
"""
if verbose:
print(f"hot_df cols: {df.columns}")
ret = []
for i, (col_name, hot_map) in enumerate(hot_maps.items()):
ret.append(hot_col(df[col_name], hot_map))
if ret_hots_only:
return ret
ret = pd.concat([df] + ret, axis=1)
if drop_cold:
ret = ret.drop(list(hot_maps.keys()), axis=1)
return ret
| 6,544 |
def menu(function_text):
"""
Decorator for plain-text handler
:param function_text: function which set as handle in bot class
:return:
"""
def wrapper(self, bot, update):
self.text_menu(bot, update)
function_text(self, bot, update)
return wrapper
| 6,545 |
def create_delete_classes(system_id_or_identifier, **kwargs):
"""Create classes for a classification system.
:param system_id_or_identifier: The id or identifier of a classification system
"""
if request.method == "DELETE":
data.delete_classes(system_id_or_identifier)
return {'message': f'Classes of {system_id_or_identifier} deleted'}, 204
if request.method == "POST":
args = request.get_json()
errors = ClassMetadataForm().validate(args)
if errors:
return abort(400, str(errors))
classes = data.insert_classes(system_id_or_identifier=system_id_or_identifier, classes_files_json=args['classes'])
result = ClassesSchema(exclude=['classification_system_id']).dump(classes, many=True)
return jsonify(result), 201
| 6,546 |
def ajax_upload_key():
"""Ajax upload a functionary key. Key files are stored to the db in their
dictionary representation. """
functionary_key = request.files.get("functionary_key", None)
functionary_name = request.form.get("functionary_name", None)
if not functionary_name:
flash("Something went wrong: We don't know which functionary,"
" this key belongs to", "alert-danger")
return jsonify({"error": True})
if not functionary_key:
flash("Something went wrong: No file uploaded", "alert-danger")
return jsonify({"error": True})
if functionary_key.filename == "":
flash("Something went wrong: No file selected", "alert-danger")
return jsonify({"error": True})
try:
# We try to load the public key to check the format
key = securesystemslib.keys.import_rsakey_from_public_pem(
functionary_key.read())
securesystemslib.formats.PUBLIC_KEY_SCHEMA.check_match(key)
file_name = functionary_key.filename
functionary_db_item = {
"functionary_name": functionary_name,
"file_name": file_name,
"key_dict": key
}
# Clumsy update or insert for functionary array embedded subdocument
# NOTE: Unfortunately we can't "upsert" on arrays but must first try to
# update and if that does not work insert.
# https://docs.mongodb.com/manual/reference/operator/update/positional/#upsert
# https://stackoverflow.com/questions/23470658/mongodb-upsert-sub-document
query_result = mongo.db.session_collection.update_one(
{
"_id": session["id"],
"functionaries.items.functionary_name": functionary_name
},
{
"$set": {"functionaries.items.$": functionary_db_item}
})
if not query_result.matched_count:
query_result = mongo.db.session_collection.update_one(
{
"_id": session["id"],
# This query part should deal with concurrent requests
"functionaries.items.functionary_name": {"$ne": functionary_name}
},
{
"$push": {"functionaries.items": functionary_db_item}
}, upsert=True)
flash("Added key '{fn}' for functionary '{functionary}'"
.format(fn=file_name, functionary=functionary_name),
"alert-success")
else:
flash("Updated key '{fn}' for functionary ""'{functionary}'"
.format(fn=file_name, functionary=functionary_name),
"alert-success")
# TODO: Throw more rocks at query_result
except Exception as e:
flash("Could not store uploaded file. Error: {}".format(e),
"alert-danger")
return jsonify({"error": True})
return jsonify({"error": False})
| 6,547 |
def test_reward_valid(env_name, reward_type, tmpdir):
"""Test output of reward function is appropriate shape and type."""
venv = util.make_vec_env(env_name, n_envs=1, parallel=False)
venv, tmppath = _make_env_and_save_reward_net(env_name, reward_type, tmpdir)
TRAJECTORY_LEN = 10
obs = _sample(venv.observation_space, TRAJECTORY_LEN)
actions = _sample(venv.action_space, TRAJECTORY_LEN)
next_obs = _sample(venv.observation_space, TRAJECTORY_LEN)
steps = np.arange(0, TRAJECTORY_LEN)
reward_fn = serialize.load_reward(reward_type, tmppath, venv)
pred_reward = reward_fn(obs, actions, next_obs, steps)
assert pred_reward.shape == (TRAJECTORY_LEN,)
assert isinstance(pred_reward[0], numbers.Number)
| 6,548 |
def add_latents_to_dataset_using_tensors(args, sess, tensors, data):
""" Get latent representations from model.
Args:
args: Arguments from parser in train_grocerystore.py.
sess: Tensorflow session.
tensors: Tensors used for extracting latent representations.
data: Data used during epoch.
Returns:
Data dictionary filled with latent representations.
"""
latents = sess.run(tensors['latents'], feed_dict={tensors['x']: data['features']} )
data['latents'] = latents
if args.use_private:
latents_ux = sess.run(tensors['latents_ux'], feed_dict={tensors['x']: data['features']} )
data['latents_ux'] = latents_ux
if args.use_text:
all_captions = load_captions(data['captions'], data['labels'])
latents_uw = sess.run(tensors['latents_uw'],
feed_dict={tensors['captions']: all_captions})
data['latents_uw'] = latents_uw
if args.use_iconic:
batch_size = args.batch_size
n_examples = len(data['iconic_image_paths'])
n_batches = int(np.ceil(n_examples/batch_size))
latents_ui = np.zeros([n_examples, args.z_dim])
for i in range(n_batches):
start = i * batch_size
end = start + batch_size
if end > n_examples:
end = n_examples
iconic_images = load_iconic_images(data['iconic_image_paths'][start:end])
latents_ui[start:end] = sess.run(tensors['latents_ui'],
feed_dict={tensors['iconic_images']: iconic_images})
data['latents_ui'] = latents_ui
return data
| 6,549 |
def normal_distribution_parameter_estimation(data):
"""
Notice: Unbiased Estimation Adopted. Line 115.
:param data: a list, each element is a real number, the value of some attribute
eg: [0.46, 0.376, 0.264, 0.318, 0.215, 0.237, 0.149, 0.211]
:return miu: the estimation of miu of the normal distribution based on 'data'
eg: 0.27875
:return sigma: the estimation of sigma of the normal distribution based on 'data'
eg: 0.10092394590553255
"""
miu = np.mean(data) # estimate miu of the normal distribution
sigma = 0 # initial sigma
data_num = len(data) # the number of data
# estimate sigma of the normal distribution
for each_data in data:
sigma = sigma + (each_data-miu) ** 2
sigma = sigma/(data_num-1) # unbiased estimation adopted!!
sigma = sigma ** 0.5
return miu, sigma
| 6,550 |
def map_icd_codes_to_categories(df, icd_version):
"""Append a column 'category' to df containing disease categories"""
# default label
df["category"] = OTHER_LABEL
# From ICD6 on we have numerical-only four-digit codes, categorization works
# on 3-digit codes only. Drop the last digit before left-padding.
if icd_version >= 6:
lp_code_map = {c: c[:-1] for c in df["code"].unique()}
else:
# Generate left-padded codes for lexsorted selection in table.
lp_code_map = {c: left_pad_code(c) for c in df["code"].unique()}
df["lp_code"] = df["code"].map(lp_code_map)
for category, mappings in ICD_CATEGORIES.items():
codes = mappings[f"ICD-{icd_version}"]
for code in [c.strip().strip(",") for c in codes.split()]:
if "-" in code:
start_code, end_code = code.split("-")
else:
start_code = end_code = code
# make sure there are no category overlaps
row_sel = (df["lp_code"] >= left_pad_code(start_code)) & (
df["lp_code"] <= left_pad_code(end_code) + "z"
)
assert (df.loc[row_sel, "category"].isin([OTHER_LABEL, category])).all()
# set category
df.loc[
row_sel,
"category",
] = category
| 6,551 |
def reverse_complement(seq):
"""
ARGS:
seq : sequence with _only_ A, T, C or G (case sensitive)
RETURN:
rcSeq : reverse complement of sequenced passed to it.
DESCRIPTION:
DEBUG:
Compared several sequences. Is working.
FUTURE:
"""
rcSeq = "" # Reverse Complement sequence
# Complement
for char in seq:
if(char == 'A' ):
rcSeq += 'T'
continue
if(char == 'T' ):
rcSeq += 'A'
continue
if(char == 'G' ):
rcSeq += 'C'
continue
if(char == 'C' ):
rcSeq += 'G'
continue
if(char == 'N' ):
rcSeq += 'N'
continue
if(char not in "ATCGN"):
exit_with_error("ERROR! char %s is not a valid sequencing character!\n"%(char))
# Revese
rcSeq = rcSeq[::-1]
return rcSeq
| 6,552 |
def compose(chosung, joongsung, jongsung=u''):
"""This function returns a Hangul letter by composing the specified chosung, joongsung, and jongsung.
@param chosung
@param joongsung
@param jongsung the terminal Hangul letter. This is optional if you do not need a jongsung."""
if jongsung is None: jongsung = u''
try:
chosung_index = CHOSUNGS.index(chosung)
joongsung_index = JOONGSUNGS.index(joongsung)
jongsung_index = JONGSUNGS.index(jongsung)
except Exception as e:
raise NotHangulException('No valid Hangul character can be generated using given combination of chosung, joongsung, and jongsung.')
return chr(0xAC00 + chosung_index * NUM_JOONGSUNGS * NUM_JONGSUNGS + joongsung_index * NUM_JONGSUNGS + jongsung_index)
| 6,553 |
def generate_markdown_metadata(metadata_obj: Dict[str, str]) -> List[str]:
"""generate_markdown_metadata
Add some basic metadata to the top of the file
in HTML tags.
"""
metadata: List[str] = ["<!---"]
passed_metadata: List[str] = [
f" {key}: {value}" for key, value in metadata_obj.items()
]
metadata.extend(passed_metadata)
metadata.append(f" Tags:")
metadata.append("--->")
metadata.append(f"# Diary for {metadata_obj['Date']}")
metadata.append("")
return metadata
| 6,554 |
def fftshift(x:np.ndarray):
"""平移FFT频谱
FFT默认频谱不是关于零频率对称的,使用fftshift可以对调左右频谱。
:Parameters:
- x: 频谱序列
:Returns: 平移后的频谱
"""
N = x.size
return np.append(x[N//2:], x[:N//2])
| 6,555 |
def store_feature_vectors(dfs, output_dir):
"""
Write out all feature vector information to a csv file, to be read
later by the feature vector plotting script.
Parameters
----------
dfs : dict of DataFrame
Time series data for multiple sub-image locations.
output_dir : str
Path to directory to save the csv.
"""
# loop over collections
for col_name, veg_df in dfs.items():
# if vegetation data
if "COPERNICUS/S2" in col_name or "LANDSAT" in col_name:
# check the feature vectors are availible
if "feature_vec" not in veg_df.columns:
print("Could not find feature vectors.")
continue
# sort by date
veg_df = veg_df.sort_values(by="date").dropna()
# create a df to store feature vectors
df = pd.DataFrame()
[
print(value)
for value in veg_df.feature_vec
if not isinstance(value, list)
]
# add feature vectors to dataframe
df = pd.DataFrame(value for value in veg_df.feature_vec)
# rename percentile columns
df = df.rename(columns={n: f"{(n+1)*5}th_percentile" for n in df.columns})
# reindex
df.index = veg_df.index
# add information
df.insert(0, "date", veg_df["date"])
df.insert(1, "latitude", veg_df["latitude"])
df.insert(2, "longitude", veg_df["longitude"])
# save csv
if col_name == "COPERNICUS/S2":
s = "S2"
elif "LANDSAT" in col_name:
s = "L" + col_name.split("/")[1][-1] + "_"
else:
s = col_name
filename = os.path.join(output_dir, s + "_feature_vectors.csv")
df.to_csv(filename, index=False)
| 6,556 |
def get_20newsgroups_data(
train_test,
categories=None,
max_text_len: int = None,
min_num_tokens=0,
random_state=42,
) -> List[Tuple[str, str]]:
"""
'alt.atheism',
'comp.graphics',
'comp.os.ms-windows.misc',
'comp.sys.ibm.pc.hardware',
'comp.sys.mac.hardware',
'comp.windows.x',
'misc.forsale',
'rec.autos',
'rec.motorcycles',
'rec.sport.baseball',
'rec.sport.hockey',
'sci.crypt',
'sci.electronics',
'sci.med',
'sci.space',
'soc.religion.christian',
'talk.politics.guns',
'talk.politics.mideast',
'talk.politics.misc',
'talk.religion.misc'
"""
data = fetch_20newsgroups(
subset=train_test,
shuffle=True,
remove=("headers", "footers", "quotes"),
categories=categories,
random_state=random_state,
)
target_names = data.target_names
def truncate_to_maxlen(text):
if max_text_len is not None:
return text[0 : min(len(text), max_text_len)]
else:
return text
text_target_tuples = [
(truncate_to_maxlen(d), target_names[target])
for d, target in zip(data.data, data.target)
if len(d.split(" ")) > min_num_tokens
]
return text_target_tuples
| 6,557 |
def crossdomain(allowed_origins=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True, credentials=False):
"""
http://flask.pocoo.org/snippets/56/
"""
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if isinstance(allowed_origins, str):
# always have allowed_origins as a list of strings.
allowed_origins = [allowed_origins]
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
# Get a hold of the request origin
origin = request.environ.get('HTTP_ORIGIN')
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
# if the origin matches any of our allowed origins set the
# access control header appropriately
allow_origin = (origin if origin is not None and
allowed_origins is not None and
origin in allowed_origins else None)
h['Access-Control-Allow-Origin'] = allow_origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if credentials:
h['Access-Control-Allow-Credentials'] = 'true'
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
| 6,558 |
def describe_my_user_profile():
"""
Describes a user\'s SSH information.
See also: AWS API Documentation
:example: response = client.describe_my_user_profile()
:rtype: dict
ReturnsResponse Syntax{
'UserProfile': {
'IamUserArn': 'string',
'Name': 'string',
'SshUsername': 'string',
'SshPublicKey': 'string'
}
}
Response Structure
(dict) --Contains the response to a DescribeMyUserProfile request.
UserProfile (dict) --A UserProfile object that describes the user\'s SSH information.
IamUserArn (string) --The user\'s IAM ARN.
Name (string) --The user\'s name.
SshUsername (string) --The user\'s SSH user name.
SshPublicKey (string) --The user\'s SSH public key.
:return: {
'UserProfile': {
'IamUserArn': 'string',
'Name': 'string',
'SshUsername': 'string',
'SshPublicKey': 'string'
}
}
"""
pass
| 6,559 |
def get_gaussian_xyz(lines, optimized=True):
"""
Input orientation:
---------------------------------------------------------------------
Center Atomic Atomic Coordinates (Angstroms)
Number Number Type X Y Z
---------------------------------------------------------------------
1 1 0 0.000000 0.000000 0.122819
2 1 0 0.000000 0.000000 0.877181
---------------------------------------------------------------------
"""
from . import iotools as io
if isinstance(lines, str):
lines = lines.splitlines()
natom = get_gaussian_natom(lines)
keyword = 'Input orientation:'
n = io.get_line_number(keyword, lines=lines, getlastone=optimized)
# for i in range(n):
return
| 6,560 |
def make_vector_gradient(bcs: Boundaries) -> Callable:
""" make a discretized vector gradient operator for a cylindrical grid
|Description_cylindrical|
Args:
bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):
|Arg_boundary_conditions|
Returns:
A function that can be applied to an array of values
"""
assert isinstance(bcs.grid, CylindricalGrid)
bcs.check_value_rank(1)
# calculate preliminary quantities
gradient_r = make_gradient(bcs.extract_component(0))
gradient_z = make_gradient(bcs.extract_component(1))
gradient_phi = make_gradient(bcs.extract_component(2))
@jit_allocate_out(out_shape=(3, 3) + bcs.grid.shape)
def vector_gradient(arr, out=None):
""" apply gradient operator to array `arr` """
gradient_r(arr[0], out=out[:, 0])
gradient_z(arr[1], out=out[:, 1])
gradient_phi(arr[2], out=out[:, 2])
return out
return vector_gradient
| 6,561 |
def exec_in_terminal(command):
"""Run a command in the terminal and get the
output stripping the last newline.
Args:
command: a string or list of strings
"""
return check_output(command).strip().decode("utf8")
| 6,562 |
def replace_string(original, start, end, replacement):
"""Replaces the specified range of |original| with |replacement|"""
return original[0:start] + replacement + original[end:]
| 6,563 |
def edit_mod():
""" Admin endpoint used for sub transfers. """
if not current_user.is_admin():
abort(403)
form = EditModForm()
try:
sub = Sub.get(fn.Lower(Sub.name) == form.sub.data.lower())
except Sub.DoesNotExist:
return jsonify(status='error', error=[_("Sub does not exist")])
try:
user = User.get(fn.Lower(User.name) == form.user.data.lower())
except User.DoesNotExist:
return jsonify(status='error', error=[_("User does not exist")])
if form.validate():
try:
sm = SubMod.get((SubMod.sid == sub.sid) & (SubMod.uid == user.uid))
sm.power_level = 0
sm.invite = False
sm.save()
except SubMod.DoesNotExist:
SubMod.create(sid=sub.sid, uid=user.uid, power_level=0)
misc.create_sublog(misc.LOG_TYPE_SUB_TRANSFER, current_user.uid, sub.sid,
comment=user.name, admin=True)
return jsonify(status='ok')
return jsonify(status="error", error=get_errors(form))
| 6,564 |
def get_current_info(symbol_list, columns='*'):
"""Retrieves the latest data (15 minute delay) for the
provided symbols."""
columns = ','.join(columns)
symbols = __format_symbol_list(symbol_list)
yql = ('select %s from %s where symbol in (%s)'
% (columns, FINANCE_TABLES['quotes'], symbols))
response = execute_yql_query(yql)
return __validate_response(response, 'quote')
| 6,565 |
def reavail_fulfillment_lines(fulfillment):
"""Return fulfilled skills to corresponding availabilitys."""
for line in fulfillment:
if line.task_line.variant and line.task_line.variant.track_inventory:
increase_availability(
line.task_line.variant, line.quantity, allocate=True)
| 6,566 |
def dictionarify_recpat_data(recpat_data):
"""
Covert a list of flat dictionaries (single-record dicts) into a dictionary.
If the given data structure is already a dictionary, it is left unchanged.
"""
return {track_id[0]: patterns[0] for track_id, patterns in \
[zip(*item.items()) for item in recpat_data]} \
if not isinstance(recpat_data, dict) else recpat_data
| 6,567 |
def copy_assemble_template(files, distfolder, headersize, configfile, mainfile, examplefile):
"""
Copia y ensambla el template.
:param files: Lista de archivos
:param distfolder: Carpeta de distribución
:param headersize: Tamaño del header
:param configfile: Archivo de configs
:param mainfile: Archivo principal
:param examplefile: Archivo de ejemplo
:return: None
"""
for f in files.keys():
fl = open(distfolder + f, 'w')
# Se escribe el header
if '.tex' in f:
data = files[f]
kline = 0
for d in data:
if kline < headersize:
fl.write(d)
else:
break
kline += 1
# Strip
dostrip = False
if f == configfile or f == mainfile or f == examplefile or '-config' in f:
dostrip = False
# Se escribe el documento
paste_external_tex_into_file(fl, f, files, headersize, STRIP_ALL_GENERATED_FILES and dostrip, dostrip,
True, configfile, False, dist=True, add_ending_line=False and dostrip)
# Se elimina la última linea en blanco si hay doble
fl.close()
# Mueve el archivo de configuraciones
copyfile(distfolder + configfile, distfolder + 'template_config.tex')
copyfile(distfolder + examplefile, distfolder + 'example.tex')
# Ensambla el archivo del template
assemble_template_file(files['template.tex'], configfile, distfolder, headersize, files)
| 6,568 |
def _get_qualified_name(workflow_name, job_name):
"""Construct a qualified name from workflow name and job name."""
return workflow_name + _NAME_DELIMITER + job_name
| 6,569 |
def normalize_each_time_frame(input_array):
"""
Normalize each time frame
- Input: 3D numpy array
- Output: 3D numpy array
"""
for i in range(input_array.shape[0]):
max_value = np.amax(input_array[i, :, :])
if max_value != 0:
input_array[i, :, :] = input_array[i, :, :] / max_value
return input_array
| 6,570 |
def describe_raid_arrays(InstanceId=None, StackId=None, RaidArrayIds=None):
"""
Describe an instance\'s RAID arrays.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_raid_arrays(
InstanceId='string',
StackId='string',
RaidArrayIds=[
'string',
]
)
:type InstanceId: string
:param InstanceId: The instance ID. If you use this parameter, DescribeRaidArrays returns descriptions of the RAID arrays associated with the specified instance.
:type StackId: string
:param StackId: The stack ID.
:type RaidArrayIds: list
:param RaidArrayIds: An array of RAID array IDs. If you use this parameter, DescribeRaidArrays returns descriptions of the specified arrays. Otherwise, it returns a description of every array.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'RaidArrays': [
{
'RaidArrayId': 'string',
'InstanceId': 'string',
'Name': 'string',
'RaidLevel': 123,
'NumberOfDisks': 123,
'Size': 123,
'Device': 'string',
'MountPoint': 'string',
'AvailabilityZone': 'string',
'CreatedAt': 'string',
'StackId': 'string',
'VolumeType': 'string',
'Iops': 123
},
]
}
Response Structure
(dict) --
Contains the response to a DescribeRaidArrays request.
RaidArrays (list) --
A RaidArrays object that describes the specified RAID arrays.
(dict) --
Describes an instance\'s RAID array.
RaidArrayId (string) --
The array ID.
InstanceId (string) --
The instance ID.
Name (string) --
The array name.
RaidLevel (integer) --
The RAID level .
NumberOfDisks (integer) --
The number of disks in the array.
Size (integer) --
The array\'s size.
Device (string) --
The array\'s Linux device. For example /dev/mdadm0.
MountPoint (string) --
The array\'s mount point.
AvailabilityZone (string) --
The array\'s Availability Zone. For more information, see Regions and Endpoints .
CreatedAt (string) --
When the RAID array was created.
StackId (string) --
The stack ID.
VolumeType (string) --
The volume type, standard or PIOPS.
Iops (integer) --
For PIOPS volumes, the IOPS per disk.
Exceptions
OpsWorks.Client.exceptions.ValidationException
OpsWorks.Client.exceptions.ResourceNotFoundException
:return: {
'RaidArrays': [
{
'RaidArrayId': 'string',
'InstanceId': 'string',
'Name': 'string',
'RaidLevel': 123,
'NumberOfDisks': 123,
'Size': 123,
'Device': 'string',
'MountPoint': 'string',
'AvailabilityZone': 'string',
'CreatedAt': 'string',
'StackId': 'string',
'VolumeType': 'string',
'Iops': 123
},
]
}
:returns:
OpsWorks.Client.exceptions.ValidationException
OpsWorks.Client.exceptions.ResourceNotFoundException
"""
pass
| 6,571 |
def pre_process_data(full_data):
"""
pre process data- dump invalid values
"""
clean_data = full_data[(full_data["Temp"] > -10)]
return clean_data
| 6,572 |
def invalid_grant(_):
"""Handles the Invalid Grant error when doing Oauth
"""
del current_app.blueprints['google'].token
flash(("InvalidGrant Error"), category="danger")
return redirect(url_for('index'))
| 6,573 |
def diff_text(a, b):
"""
Performs a diffing algorithm on two pieces of text. Returns
a string of HTML containing the content of both texts with
<span> tags inserted indicating where the differences are.
"""
def tokenise(text):
"""
Tokenises a string by spliting it into individual characters
and grouping the alphanumeric ones together.
This means that punctuation, whitespace, CJK characters, etc
become separate tokens and words/numbers are merged together
to form bigger tokens.
This makes the output of the diff easier to read as words are
not broken up.
"""
tokens = []
current_token = ""
for c in text:
if c.isalnum():
current_token += c
else:
if current_token:
tokens.append(current_token)
current_token = ""
tokens.append(c)
if current_token:
tokens.append(current_token)
return tokens
a_tok = tokenise(a)
b_tok = tokenise(b)
sm = difflib.SequenceMatcher(lambda t: len(t) <= 4, a_tok, b_tok)
changes = []
for op, i1, i2, j1, j2 in sm.get_opcodes():
if op == 'replace':
for token in a_tok[i1:i2]:
changes.append(('deletion', token))
for token in b_tok[j1:j2]:
changes.append(('addition', token))
elif op == 'delete':
for token in a_tok[i1:i2]:
changes.append(('deletion', token))
elif op == 'insert':
for token in b_tok[j1:j2]:
changes.append(('addition', token))
elif op == 'equal':
for token in a_tok[i1:i2]:
changes.append(('equal', token))
# Merge ajacent changes which have the same type. This just cleans up the HTML a bit
merged_changes = []
current_value = []
current_change_type = None
for change_type, value in changes:
if change_type != current_change_type:
if current_change_type is not None:
merged_changes.append((current_change_type, ''.join(current_value)))
current_value = []
current_change_type = change_type
current_value.append(value)
if current_value:
merged_changes.append((current_change_type, ''.join(current_value)))
return TextDiff(merged_changes)
| 6,574 |
def require_reset_password():
"""
请求重设密码
参数:
{
"identifier":"用户识别符"
}
返回:
{
"code":0,//非0表示调用成功
"message":"qwq"//code非0的时候表示错误信息
}
"""
if config.USE_PHONE_WHEN_REGISTER_AND_RESETPASSWD:
return make_response(-1, message="当前不使用邮箱验证密码")
import uuid
if db.session.query(User).filter(User.email == request.form["identifier"]).count() > 1:
return make_response(-1, message="此邮箱对应多个用户,请使用用户名进行操作")
query = db.session.query(User).filter(or_(
User.email == request.form["identifier"], User.username == request.form["identifier"]))
if query.count() == 0:
return make_response(-1, message="用户名或邮箱错误")
user: User = query.one()
from common.aes import encrypt
from common.datatypes import PasswordResetToken, load_from_json
from config import AUTH_PASSWORD, AUTH_TOKEN, RESET_PASSWORD_EXPIRE_SECONDS
from time import time
from urllib.parse import quote_plus
raw_json = PasswordResetToken(
user.id, int(time())+RESET_PASSWORD_EXPIRE_SECONDS, AUTH_TOKEN).as_json()
# print(raw_json)
to_send_token = encrypt(config.AUTH_PASSWORD, raw_json)
# print("raw token", to_send_token)
to_send_token = quote_plus(quote_plus(to_send_token))
# print(to_send_token)
# user.reset_token = str(uuid.uuid1())
from utils import send_mail
try:
send_mail(config.RESET_PASSWORD_EMAIL.format(
reset_token=to_send_token), "重置密码", user.email)
except Exception as ex:
import traceback
return make_response(-1, message=traceback.format_exc())
return make_response(0, message="重置密码的邮件已经发送到您邮箱的垃圾箱,请注意查收")
| 6,575 |
def encrypt(key, src_file_path, encrypted_file_path):
"""
Encrypts the specified source file to the target path using AES and the
specified RSA key
:param key: an RSA key
:param src_file_path: str path of file to be encrypted
:param encrypted_file_path: str path of target encrypted file
:return: None
"""
print('Encrypting file {} to {} using AES'.format(src_file_path,
encrypted_file_path))
rsa_key = RSA.import_key(key)
with open(encrypted_file_path, "wb") as outfile:
# Create a random session key and encrypt it with the input RSA key
session_key = get_random_bytes(16)
cipher_rsa = PKCS1_OAEP.new(rsa_key)
outfile.write(cipher_rsa.encrypt(session_key))
# Create an AES session key
cipher_aes = AES.new(session_key, AES.MODE_EAX)
with open(src_file_path ,'rb') as infile:
# Use AES session key to encrypt input file data
data = infile.read()
ciphertext, digest = cipher_aes.encrypt_and_digest(data)
# write to target file
outfile.write(cipher_aes.nonce)
outfile.write(digest)
outfile.write(ciphertext)
print('Done')
| 6,576 |
def coerce_affine(affine, *, ndim, name=None):
"""Coerce a user input into an affine transform object.
If the input is already an affine transform object, that same object is returned
with a name change if the given name is not None. If the input is None, an identity
affine transform object of the given dimensionality is returned.
Parameters
----------
affine : array-like or napari.utils.transforms.Affine
An existing affine transform object or an array-like that is its transform matrix.
ndim : int
The desired dimensionality of the transform. Ignored is affine is an Affine transform object.
name : str
The desired name of the transform.
Returns
-------
napari.utils.transforms.Affine
The input coerced into an affine transform object.
"""
if affine is None:
affine = Affine(affine_matrix=np.eye(ndim + 1), ndim=ndim)
elif isinstance(affine, np.ndarray):
affine = Affine(affine_matrix=affine, ndim=ndim)
elif isinstance(affine, list):
affine = Affine(affine_matrix=np.array(affine), ndim=ndim)
elif not isinstance(affine, Affine):
raise TypeError(
trans._(
'affine input not recognized. must be either napari.utils.transforms.Affine or ndarray. Got {dtype}',
deferred=True,
dtype=type(affine),
)
)
if name is not None:
affine.name = name
return affine
| 6,577 |
def unpack(url, sha256, compression, unpack_location='.'):
"""Fetch a remote archive, check its hash and decompress it
Download the file ``url``, ensure its hash matches the ``sha256`` argument,
then decompress it using ``compression`` method (either 'tar' or 'zip').
The unpacked files will be written to ``unpack_location``.
"""
print("Downloading", extract_filename(url))
with mev_build_utils.temporary_chdir(THIS_DIR):
# Chdir to make relative file:./z.zip URLs work.
urlopener = urllib.request.urlopen(url)
f = io.BytesIO(urlopener.read())
sha256_found = hashlib.sha256(f.getvalue()).hexdigest()
if sha256_found != sha256:
raise ModuleValidationError('Failed to validate downloaded package',
url, sha256_found, sha256)
available_compressors = {
'tar': lambda file, mode: tarfile.open(fileobj=file, mode=mode),
'zip': lambda file, mode: zipfile.ZipFile(file=file, mode=mode),
}
compressor = available_compressors[compression]
with compressor(f, 'r') as cf:
cf.extractall(os.path.join(THIS_DIR, unpack_location))
| 6,578 |
def provider_filtered_machines(request, provider_uuid,
identity_uuid, request_user=None):
"""
Return all filtered machines. Uses the most common,
default filtering method.
"""
identity = Identity.objects.filter(uuid=identity_uuid)
if not identity:
raise ObjectDoesNotExist()
try:
esh_driver = prepare_driver(request, provider_uuid, identity_uuid)
except Exception:
# TODO: Observe the change of 'Fail loudly' here
# and clean up the noise, rather than hide it.
logger.exception(
"Driver could not be prepared - Provider: %s , Identity: %s"
% (provider_uuid, identity_uuid))
esh_driver = None
if not esh_driver:
raise LibcloudInvalidCredsError()
logger.debug(esh_driver)
return list_filtered_machines(esh_driver, provider_uuid, request_user)
| 6,579 |
def build_pin_dict(fp, filepath):
""" build a dictionary with pins and their aliases for one pic
and print the dictionary
"""
dom = parse(filepath) # load .pic file
pinlist = {} # new dictionary
i = 1 # pin number
for pin in dom.getElementsByTagName("edc:Pin"): # select pin nodes
aliaslist = [] # new aliaslist this pin
for vpin in pin.getElementsByTagName("edc:VirtualPin"):
alias = vpin.getAttribute("edc:name") # raw alias
alias = alias.upper().strip("_").split()[0] # first word
aliaslist.append(alias) # add alias!
pinlist[i] = aliaslist # add aliaslist this pin
i += 1
for alias in aliaslist:
if (re.match(portpin, alias) or re.match(gpiopin, alias)): # select Rxy or GPx
portbit = alias
if portbit != aliaslist[0]: # not first in list
aliaslist.remove(portbit) # remove it
aliaslist.insert(0, portbit) # add it to front
break
picname = os.path.splitext(os.path.split(filepath)[1])[0][3:].upper() # pic type
print(picname) # progress signal
fp.write(picname + "\n")
if len(pinlist) > 0: # any pins in list
list_pic_pins(fp, pinlist) # list pinmap this pic
else:
print(" No pinlist!")
fp.write(" No pinlist\n")
| 6,580 |
def known_peaks():
"""Return a list of Peak instances with data (identified)."""
peak1 = Peak(
name="Test1Known",
r_time=5.00,
mz=867.1391,
charge="+",
inchi_key="IRPOHFRNKHKIQA-UHFFFAOYSA-N",
)
peak2 = Peak(
name="Test2Known",
r_time=8.00,
mz=260.0297,
charge="-",
inchi_key="HXXFSFRBOHSIMQ-FPRJBGLDSA-N",
)
return [peak1, peak2]
| 6,581 |
def explode_sheet_music(sheet_music):
"""
Splits unformatted sheet music into formated lines of LINE_LEN_LIM
and such and returns a list of such lines
"""
split_music = sheet_music.split(',')
split_music = list(map(lambda note: note+',', split_music))
split_list = []
counter = 0
line_counter = 1
for note in split_music:
if line_counter > LINES_LIMIT-1:
break
if counter+len(note) > LINE_LENGTH_LIM-2:
split_list[-1] = split_list[-1].rstrip(',')
split_list[-1] += END_OF_LINE_CHAR
counter = 0
line_counter += 1
split_list.append(note)
counter += len(note)
return split_list
| 6,582 |
def norm_coefficient(m, n):
"""
Calculate the normalization coefficient for the (m, n) Zernike mode.
Parameters
----------
m : int
m-th azimuthal Zernike index
n : int
n-th radial Zernike index
Returns
-------
norm_coeff : float
Noll normalization coefficient
"""
norm_coeff = np.sqrt(2 * (n + 1)/(1 + (m == 0)))
return norm_coeff
| 6,583 |
def get_waveglow(ckpt_url):
"""
Init WaveGlow vocoder model with weights.
Used to generate realistic audio from mel-spectrogram.
"""
wn_config = {
'n_layers': hp.wg_n_layers,
'n_channels': hp.wg_n_channels,
'kernel_size': hp.wg_kernel_size
}
audio_config = {
'wav_value': hp.wg_wav_value,
'sampling_rate': hp.wg_sampling_rate
}
model = WaveGlow(
n_mel_channels=hp.wg_n_mel_channels,
n_flows=hp.wg_n_flows,
n_group=hp.wg_n_group,
n_early_every=hp.wg_n_early_every,
n_early_size=hp.wg_n_early_size,
wn_config=wn_config
)
load_checkpoint(ckpt_url, model)
model.set_train(False)
return model, audio_config
| 6,584 |
def user_exists(keystone, user):
"""" Return True if user already exists"""
return user in [x.name for x in keystone.users.list()]
| 6,585 |
def gen_cartesian_product(*args: List[Dict]) -> List[Dict]:
""" generate cartesian product for lists
生成笛卡尔积,估计是参数化用的
Args:
args (list of list): lists to be generated with cartesian product
Returns:
list: cartesian product in list
Examples:
>>> arg1 = [{"a": 1}, {"a": 2}]
>>> arg2 = [{"x": 111, "y": 112}, {"x": 121, "y": 122}]
>>> args = [arg1, arg2]
>>> gen_cartesian_product(*args)
>>> # same as below
>>> gen_cartesian_product(arg1, arg2)
[
{'a': 1, 'x': 111, 'y': 112},
{'a': 1, 'x': 121, 'y': 122},
{'a': 2, 'x': 111, 'y': 112},
{'a': 2, 'x': 121, 'y': 122}
]
"""
if not args:
return []
elif len(args) == 1:
return args[0]
"""
经过以上判断,只有args≥2时
"""
product_list = []
# itertools.product(*args) 笛卡尔积,相当于嵌套的for循环
for product_item_tuple in itertools.product(*args):
"""
({'a': 1}, {'x': 111, 'y': 112})
({'a': 1}, {'x': 121, 'y': 122})
({'a': 2}, {'x': 111, 'y': 112})
({'a': 2}, {'x': 121, 'y': 122})
"""
product_item_dict = {}
for item in product_item_tuple:
"""
1 :{'a': 1}
1 :{'x': 111, 'y': 112}
2 :{'a': 1}
2 :{'x': 121, 'y': 122}
3 :{'a': 2}
3 :{'x': 111, 'y': 112}
4 :{'a': 2}
4 :{'x': 121, 'y': 122}
"""
product_item_dict.update(item)
product_list.append(product_item_dict)
# [{'a': 1, 'x': 111, 'y': 112}, {'a': 1, 'x': 121, 'y': 122}, {'a': 2, 'x': 111, 'y': 112}, {'a': 2, 'x': 121, 'y': 122}]
return product_list
| 6,586 |
def nxclass_handler(validator, v_item):
"""validate @NX_class"""
nx_class = utils.decode_byte_string(v_item.h5_object)
nxdl = validator.manager.classes.get(nx_class)
if nxdl is None:
c = "not a recognized NXDL class: " + nx_class
status = finding.ERROR
elif isBaseClassNXDL(nxdl):
c = "recognized NXDL base class: " + nx_class
status = finding.OK
else:
c = "incorrect use of @NX_class attribute: " + nx_class
# should place the application definition name in the entry/definition field
status = finding.ERROR
validator.record_finding(v_item, TEST_NAME, status, c)
| 6,587 |
def get_234_df(x):
"""
This function get the dataframe for model2.1,2.2,2.3
input: x, the col we want
output: the dataframe only for x
"""
styles = pd.read_csv("styles.csv", error_bad_lines=False)
styles = styles.drop(["productDisplayName"],axis = 1)
styles = styles.drop(["year"],axis = 1)
styles = styles[(styles.masterCategory=='Apparel')| (styles.masterCategory=='Footwear')]
styles = styles.drop(styles[styles["subCategory"] == "Innerwear"].index)
styles = styles.dropna()
styles = df_drop(styles,"subCategory", ["Apparel Set", "Dress","Loungewear and Nightwear","Saree","Socks"])
styles["subCategory"] = styles["subCategory"].transform(lambda x: "Footwear" if(x in ["Shoes","Flip Flops","Sandal"]) else x)
styles = styles.drop(labels=[6695,16194,32309,36381,40000], axis=0)
styles = styles[styles.subCategory == x]
group_color(styles)
styles.baseColour=styles.colorgroup
return styles
| 6,588 |
def appropriate_bond_orders(params, smrts_mol, smrts):
"""Checks if a SMARTS substring specification has appropriate bond orders
given the user-specified mode.
:param params: A dictionary of the user parameters and filters.
:type params: dict
:param smrts_mol: RDKit mol object of the SMARTS string.
:type smrts_mol: RDKit mol object.
:param smrts: The SMARTS string.
:type smrts: str
:return: 'True' if it validates, 'False' otherwise.
:rtype: bool
"""
# Test if double bonds are inappropriately specified.
if params["mode"] == "NONE" and (
".pdb" in params["ligand_exts"] or ".pdbqt" in params["ligand_exts"]
):
bond_orders = [b.GetBondTypeAsDouble() for b in smrts_mol.GetBonds()]
bond_orders = [o for o in bond_orders if o != 1.0]
if len(bond_orders) > 0:
# So it has bonds with orders greater than 1
output.error(
"When processing PDB- and PDBQT-formatted ligands in NONE "
+ "mode, LigGrep ignores bond orders and simply "
+ "assumes that all appropriately juxtaposed atoms are "
+ "connected by single bonds. But one (or more) of your "
+ "filters describes a substructure with bonds of higher "
+ "orders: "
+ smrts,
params,
)
return False
return True
| 6,589 |
def open_images_folder():
"""
Opens the `images` directory in the native file explorer, if supported
"""
images_folder = base_dir() + '/images'
if not os.path.exists(images_folder):
os.makedirs(images_folder)
subprocess.call(["open", images_folder])
| 6,590 |
def train(model, train_path, val_path, steps_per_epoch, batch_size,
records_path):
"""
Train the Keras graph model
Parameters:
model (keras Model): The Model defined in build_model
train_path (str): Path to training data
val_path (str): Path to validation data
steps_per_epoch (int): Len(training_data)/batch_size
batch_size (int): Size of mini-batches used during training
records_path (str): Path + prefix to output directory
Returns:
loss (ndarray): An array with the validation loss at each epoch
"""
adam = Adam(lr=0.001)
model.compile(loss='binary_crossentropy', optimizer=adam)
train_generator = data_generator(train_path, batch_size, seqlen=500)
val_generator = data_generator(val_path, 200000, seqlen=500)
validation_data = next(val_generator)
precision_recall_history = PrecisionRecall(validation_data)
# adding check-pointing
checkpointer = ModelCheckpoint(records_path + 'model_epoch{epoch}.hdf5',
verbose=1, save_best_only=False)
# defining parameters for early stopping
# earlystop = EarlyStopping(monitor='val_loss', mode='min', verbose=1,
# patience=5)
# training the model..
hist = model.fit_generator(epochs=15, steps_per_epoch=steps_per_epoch,
generator=train_generator,
validation_data=validation_data,
callbacks=[precision_recall_history,
checkpointer])
loss, val_pr = save_metrics(hist, precision_recall_history,
records_path=records_path)
return loss, val_pr
| 6,591 |
def verify_single_host(host, ip):
"""
Simple function to verify only a single host returned from query.
If no hosts, or multiple hosts are returned, an error message is printed
and the program exits.
"""
if len(host) == 0:
print("Error: No host with IP address {} was found".format(ip))
sys.exit(1)
if len(host) > 1:
print("Error: Multiple hosts with IP address {} were found".format(ip))
print(json.dumps(host, indent=2))
sys.exit(1)
| 6,592 |
def Mcnu_to_m1m2(Mc, nu):
"""Convert chirp mass, symmetric mass ratio pair to m1, m2"""
q = nu_to_q(nu)
M = Mcq_to_M(Mc, q)
return Mq_to_m1m2(M, q)
| 6,593 |
def _AccumulateActions(args):
"""Given program arguments, determines what actions we want to run.
Returns [(ResultsReportCtor, str)], where ResultsReportCtor can construct a
ResultsReport, and the str is the file extension for the given report.
"""
results = []
# The order of these is arbitrary.
if args.json:
results.append((JSONResultsReport, 'json'))
if args.text:
results.append((TextResultsReport, 'txt'))
if args.email:
email_ctor = functools.partial(TextResultsReport, email=True)
results.append((email_ctor, 'email'))
# We emit HTML if nothing else was specified.
if args.html or not results:
results.append((HTMLResultsReport, 'html'))
return results
| 6,594 |
def test_profile_rate_attr(mock_board) -> None:
"""Side effects of changing the rate profile via its attribute."""
profile = Profile(board=mock_board)
assert profile.rate == 1
assert profile._state == Profile.SyncedState.UNFETCHED
assert profile._profile_tracker == (1, 1)
profile.rate = 2
assert profile.rate == 1
assert profile._state == Profile.SyncedState.AWAITING_APPLY
assert profile._profile_tracker == (1, 2)
| 6,595 |
def build_predictions_dictionary(data, class_label_map):
"""Builds a predictions dictionary from predictions data in CSV file.
Args:
data: Pandas DataFrame with the predictions data for a single image.
class_label_map: Class labelmap from string label name to an integer.
Returns:
Dictionary with keys suitable for passing to
OpenImagesDetectionChallengeEvaluator.add_single_detected_image_info:
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes
of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
"""
dictionary = {
standard_fields.DetectionResultFields.detection_classes:
data['LabelName'].map(lambda x: class_label_map[x]).to_numpy(),
standard_fields.DetectionResultFields.detection_scores:
data['Score'].to_numpy().astype(float)
}
if 'Mask' in data:
segments, boxes = _decode_raw_data_into_masks_and_boxes(
data['Mask'], data['ImageWidth'], data['ImageHeight'])
dictionary[standard_fields.DetectionResultFields.detection_masks] = segments
dictionary[standard_fields.DetectionResultFields.detection_boxes] = boxes
else:
dictionary[standard_fields.DetectionResultFields.detection_boxes] = data[[
'YMin', 'XMin', 'YMax', 'XMax'
]].to_numpy().astype(float)
return dictionary
| 6,596 |
def create_file_link(link_id, file_id, parent_share_id, parent_datastore_id):
"""
DB wrapper to create a link between a file and a datastore or a share
Takes care of "degenerated" tree structures (e.g a child has two parents)
In addition checks if the link already exists, as this is a crucial part of the access rights system
:param link_id:
:param file_id:
:param parent_share_id:
:param parent_datastore_id:
:return:
"""
try:
File_Link.objects.create(
link_id = link_id,
file_id = file_id,
parent_datastore_id = parent_datastore_id,
parent_share_id = parent_share_id
)
except:
return False
return True
| 6,597 |
def deptree(lines):
"""Build a tree of what step depends on what other step(s).
Test input becomes
{'A': set(['C']), 'C': set([]), 'B': set(['A']),
'E': set(['B', 'D', 'F']), 'D': set(['A']),
'F': set(['C'])}
A depends on C
B depends on A
C depends on nothing (starting point)
D depends on A
E depends on B, D, F
F depends on C
"""
coll = defaultdict(set)
for line in lines:
parts = line.split()
coll[parts[7]].add(parts[1])
if parts[1] not in coll:
coll[parts[1]] = set()
return dict(coll)
| 6,598 |
def get_datetime(time_str, model="0"):
"""
时间格式化 '20200120.110227'转为'2020-01-20 11:02:27'
返回一个datetime格式
"""
if model == "0":
time_str = get_time(time_str)
time = datetime.datetime.strptime(time_str, "%Y-%m-%d %H:%M:%S")
return time
| 6,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.