content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def test_ExtremelyRandomizedTreesRegressionSklearn_5():
"""Non-trivial test case, including standard deviation."""
n, m, xlen = 150, 600, 10
train_inputs = np.reshape(np.linspace(-xlen / 2, +xlen / 2, n), (n, 1))
train_labels = (train_inputs * 2 + 1).flatten()
train_data = smlb.TabularData(data=train_inputs, labels=train_labels)
train_data = smlb.LabelNoise(noise=smlb.NormalNoise(rng=0)).fit(train_data).apply(train_data)
valid_inputs = np.reshape(np.linspace(-xlen / 2, +xlen / 2, m), (m, 1))
valid_labels = (valid_inputs * 2 + 1).flatten()
valid_data = smlb.TabularData(data=valid_inputs, labels=valid_labels)
valid_data = smlb.LabelNoise(noise=smlb.NormalNoise(rng=1)).fit(valid_data).apply(valid_data)
# 12 trees meets minimal requirements for jackknife estimates
rf = ExtremelyRandomizedTreesRegressionSklearn(random_state=0, uncertainties="naive")
preds = rf.fit(train_data).apply(valid_data)
mae = smlb.MeanAbsoluteError().evaluate(valid_data.labels(), preds)
# for perfect predictions, expect MAE of 1.12943
# (absolute difference between draws from two unit normal distributions)
assert np.allclose(mae, 1.13, atol=0.25)
assert np.allclose(np.mean(preds.stddev), 1, atol=0.25)
| 6,900 |
def confirm_channel(bitcoind, n1, n2):
"""
Confirm that a channel is open between two nodes
"""
assert n1.id() in [p.pub_key for p in n2.list_peers()]
assert n2.id() in [p.pub_key for p in n1.list_peers()]
for i in range(10):
time.sleep(0.5)
if n1.check_channel(n2) and n2.check_channel(n1):
return True
addr = bitcoind.rpc.getnewaddress("", "bech32")
bhash = bitcoind.rpc.generatetoaddress(1, addr)[0]
n1.block_sync(bhash)
n2.block_sync(bhash)
# Last ditch attempt
return n1.check_channel(n2) and n2.check_channel(n1)
| 6,901 |
def has_permissions(**perms):
"""A :func:`check` that is added that checks if the member has any of
the permissions necessary.
The permissions passed in must be exactly like the properties shown under
:class:`discord.Permissions`.
Parameters
------------
perms
An argument list of permissions to check for.
Example
---------
.. code-block:: python
@bot.command()
@commands.has_permissions(manage_messages=True)
async def test():
await bot.say('You can manage messages.')
"""
def predicate(ctx):
msg = ctx.message
ch = msg.channel
permissions = ch.permissions_for(msg.author)
return all(getattr(permissions, perm, None) == value for perm, value in perms.items())
return check(predicate)
| 6,902 |
def reload_configs():
"""
Reloads configuration parameters in stackstorm database from configuration files.
"""
os.system("st2ctl reload --register-configs")
| 6,903 |
def extend_params(params, more_params):
"""Extends dictionary with new values.
Args:
params: A dictionary
more_params: A dictionary
Returns:
A dictionary which combines keys from both dictionaries.
Raises:
ValueError: if dicts have the same key.
"""
for yak in more_params:
if yak in params:
raise ValueError('Key "%s" is already in dict' % yak)
params.update(more_params)
return params
| 6,904 |
def __compute_libdeps(node):
"""
Computes the direct library dependencies for a given SCons library node.
the attribute that it uses is populated by the Libdeps.py script
"""
if getattr(node.attributes, 'libdeps_exploring', False):
raise DependencyCycleError(node)
env = node.get_env()
deps = set()
node.attributes.libdeps_exploring = True
try:
try:
for child in env.Flatten(getattr(node.attributes, 'libdeps_direct',
[])):
if not child:
continue
deps.add(child)
except DependencyCycleError as e:
if len(e.cycle_nodes) == 1 or e.cycle_nodes[0] != e.cycle_nodes[
-1]:
e.cycle_nodes.insert(0, node)
logging.error("Found a dependency cycle" + str(e.cycle_nodes))
finally:
node.attributes.libdeps_exploring = False
return deps
| 6,905 |
def image2pptx(argv=sys.argv[1:]):
"""Paste images to PowerPoint.
Args:
--image-path (Path, optional) : Paths to image files. Defaults to ``()``.
--image-dir (Path, optional) : Path to the directory where images are. Defaults to ``None``.
-W/--slide-width (int, optional) : The width of PowerPoint slide. Defaults to ``9144000``.
-H/--slide-height (int, optional) : The height of PowerPoint slide. Defaults to ``6858000``.
--slide-size (str, optional) : The size of PowerPoint slide. Please chose from ``["4:3", "16:9"]``. Defaults to ``"4:3"``.
-O/--outpptx (Path, optional) : The path to the created PowerPoint. Defaults to ``Path("test.pptx")``.
Note:
When you run from the command line, execute as follows::
$ image2pptx --image-path /path/to/image1.png \\
/path/to/image2.jpg \\
/path/to/image3.jpeg \\
--image_dir /path/to/image_dir \\
--slide-size "16:9" \\
--outpptx "image.pptx"
"""
parser = argparse.ArgumentParser(prog="image2pptx", description="Paste images to PowerPoint", add_help=True)
parser.add_argument("--image-path", type=Path, nargs="*", help="Paths to image files.")
parser.add_argument("--image-dir", type=Path, help="Path to the directory where images are.")
parser.add_argument("-W", "--slide-width", type=int, default=9144000, help="The width of PowerPoint slide.")
parser.add_argument("-H", "--slide-height", type=int, default=6858000, help="The height of PowerPoint slide.")
parser.add_argument("--slide-size", type=str, default=None, choices=["4:3", "16:9"], help="The size of PowerPoint slide.")
parser.add_argument("-O", "--outpptx", type=Path, default=Path("test.pptx"), help="The path to the created PowerPoint.")
args = parser.parse_args()
image_paths = list(args.image_path)
if args.image_dir is not None:
image_paths += sorted(
[
path
for path in args.image_dir.rglob("*")
if path.suffix.lower() in IMAGE_EXTENSIONS
]
)
slide_size = args.slide_size
if slide_size is not None:
slide_width, slide_height = {
"4:3": (9144000, 6858000),
"16:9": (12193200, 6858000),
}[slide_size]
else:
slide_width = args.slide_width
slide_height = args.slide_height
prs = Presentation()
prs.slide_width = slide_width
prs.slide_height = slide_height
blank_slide_layout = prs.slide_layouts[6]
slide = prs.slides.add_slide(blank_slide_layout)
left = top = 0
for image_path in image_paths:
img = Image.open(image_path)
slide.shapes.add_picture(
image_file=str(image_path),
left=left,
top=top,
)
left += img.width * 1e4
if left >= slide_width:
top += img.height * 1e4
left = 0
prs.save(file=args.outpptx)
| 6,906 |
def _AccumulatorResultToDict(partition, feature, grads, hessians):
"""Converts the inputs to a dictionary since the ordering changes."""
return {(partition[i], feature[i, 0], feature[i, 1]): (grads[i], hessians[i])
for i in range(len(partition))}
| 6,907 |
def scale_rotor_pots(rotors, scale_factor=((), None)):
""" scale the pots
"""
# Count numbers
numtors = 0
for rotor in rotors:
numtors += len(rotor)
# Calculate the scaling factors
scale_indcs, factor = scale_factor
nscale = numtors - len(scale_indcs)
if nscale > 0:
sfactor = factor**(2.0/nscale)
ioprinter.debug_message(
'scale_coeff test:', factor, nscale, sfactor)
for rotor in rotors:
for tidx, torsion in enumerate(rotor):
if tidx not in scale_indcs and factor is not None:
torsion.pot = automol.pot.scale(torsion.pot, sfactor)
# following is being used in a test to see how effective
# a scaling of fixed scan torsional pots can be
torsion.pot = automol.pot.relax_scale(torsion.pot)
return rotors
| 6,908 |
async def countdown_minutes(channel, minutes):
"""Send a countdown message (updated each minute)
and another one when the countdown finishes."""
countdown = await channel.send("Time left: %d minutes" % minutes)
edit = asyncio.sleep(0)
while minutes > 0:
sleep = asyncio.sleep(60)
await asyncio.gather(sleep, edit)
minutes -= 1
edit = countdown.edit(content="Time left: %d minutes" % minutes)
await edit
await countdown.edit(content="Time’s up!")
await channel.send("Time’s up!")
| 6,909 |
def twitter_retrieve_data_streaming_api(ctx, consumer_key, consumer_secret, access_token, access_secret, save_data_mode, tweets_output_folder, area_name,
x_min, y_min, x_max, y_max, languages, max_num_tweets, only_geotagged,
db_username, db_password, db_hostname, db_port, db_database, db_schema):
"""Retrieve data from Twitter Streaming API
"""
if ctx.obj['verbose']:
click.echo('Executing ...')
geoso.twitter_retrieve_data_streaming_api(consumer_key, consumer_secret, access_token, access_secret, str(save_data_mode).upper(),
tweets_output_folder, area_name, float(x_min), float(x_max), float(
y_min), float(y_max), languages, int(max_num_tweets),
only_geotagged, db_hostname, db_port, db_database, db_schema, db_username, db_password)
if ctx.obj['verbose']:
click.echo('Execution finished successfully.')
| 6,910 |
async def create(payload: ProductIn):
"""Create new product from sent data."""
product_id = await db.add_product(payload)
apm.capture_message(param_message={'message': 'Product with %s id created.', 'params': product_id})
return ProductOut(**payload.dict(), product_id=product_id)
| 6,911 |
def get_glove_info(glove_file_name):
"""Return the number of vectors and dimensions in a file in GloVe format."""
with smart_open(glove_file_name) as f:
num_lines = sum(1 for line in f)
with smart_open(glove_file_name) as f:
num_dims = len(f.readline().split()) - 1
return num_lines, num_dims
| 6,912 |
def retrieve_database_inputs(db_session: Session) -> (
Dict[str, List[RevenueRate]], Dict[str, MergeAddress], List[Driver]):
"""
Retrieve the static inputs of the model from the database
:param db_session: SQLAlchemy Database connection session
:return: level of service mapped to List of RevenueRate objects, merge addresses mapped to MergeAddress objects,
List of driver objects
"""
revenue_table = load_revenue_table_from_db(db_session)
merge_details = load_merge_details_from_db(db_session)
drivers_table = load_drivers_from_db(db_session)
return revenue_table, merge_details, drivers_table
| 6,913 |
def _evolve_cx(base_pauli, qctrl, qtrgt):
"""Update P -> CX.P.CX"""
base_pauli._x[:, qtrgt] ^= base_pauli._x[:, qctrl]
base_pauli._z[:, qctrl] ^= base_pauli._z[:, qtrgt]
return base_pauli
| 6,914 |
def tp_pixel_num_cal(im, gt):
""" im is the prediction result;
gt is the ground truth labelled by biologists;"""
tp = np.logical_and(im, gt)
tp_pixel_num = tp.sum()
return tp_pixel_num
| 6,915 |
def xsg_data(year=None, month=None,
retry_count=3, pause=0.001):
"""
获取限售股解禁数据
Parameters
--------
year:年份,默认为当前年
month:解禁月份,默认为当前月
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
date:解禁日期
count:解禁数量(万股)
ratio:占总盘比率
"""
year = du.get_year() if year is None else year
month = du.get_month() if month is None else month
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.XSG_URL%(ct.P_TYPE['http'], ct.DOMAINS['em'],
ct.PAGES['emxsg'], year, month))
lines = urlopen(request, timeout = 10).read()
lines = lines.decode('utf-8') if ct.PY3 else lines
except Exception as e:
print(e)
else:
da = lines[3:len(lines)-3]
list = []
for row in da.split('","'):
list.append([data for data in row.split(',')])
df = pd.DataFrame(list)
df = df[[1, 3, 4, 5, 6]]
for col in [5, 6]:
df[col] = df[col].astype(float)
df[5] = df[5]/10000
df[6] = df[6]*100
df[5] = df[5].map(ct.FORMAT)
df[6] = df[6].map(ct.FORMAT)
df.columns = rv.XSG_COLS
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
| 6,916 |
def crm_ybquery_v2():
"""
crm根据用户手机号查询subId
:return:
"""
resp = getJsonResponse()
try:
jsonStr = request.data
# 调用业务逻辑
resp = {"message":"","status":200,"timestamp":1534844188679,"body":{"password":"21232f297a57a5a743894a0e4a801fc3","username":"admin"},"result":{"id":"4291d7da9005377ec9aec4a71ea837f","name":"Ronald Thompson","username":"admin","password":"","avatar":"https://gw.alipayobjects.com/zos/rmsportal/jZUIxmJycoymBprLOUbT.png","status":1,"telephone":"","lastLoginIp":"127.0.0.1","lastLoginTime":1534837621348,"creatorId":"admin","createTime":1497160610259,"deleted":0,"roleId":"admin","token":"4291d7da9005377ec9aec4a71ea837f"}}
except BaseException as e:
current_app.logger.error("=========异常============")
current_app.logger.error(e)
current_app.logger.error("=========异常============")
resp = getJsonResponse(code="101", msg="系统异常" + str(e))
return jsonify(resp)
| 6,917 |
def get_split_file_ids_and_pieces(
data_dfs: Dict[str, pd.DataFrame] = None,
xml_and_csv_paths: Dict[str, List[Union[str, Path]]] = None,
splits: Iterable[float] = (0.8, 0.1, 0.1),
seed: int = None,
) -> Tuple[Iterable[Iterable[int]], Iterable[Piece]]:
"""
Get the file_ids that should go in each split of a split dataset.
Parameters
----------
data_dfs : Dict[str, pd.DataFrame]
If using dataframes, a mapping of 'files', 'measures', 'chords', and 'notes' dfs.
xml_and_csv_paths : Dict[str, List[Union[str, Path]]]
If using the MusicXML ('xmls') and label csvs ('csvs'), a list of paths of the
matching xml and csv files.
splits : Iterable[float]
An Iterable of floats representing the proportion of pieces which will go into each split.
This will be normalized to sum to 1.
seed : int
A numpy random seed, if given.
Returns
-------
split_ids : Iterable[Iterable[int]]
An iterable, the length of `splits` containing the file_ids for each data point in each
split.
pieces : Iterable[Iterable[Piece]]
The loaded Pieces of each split.
"""
assert sum(splits) != 0
splits = np.array(splits) / sum(splits)
if seed is not None:
np.random.seed(seed)
indexes = []
pieces = []
if data_dfs is not None:
for i in tqdm(data_dfs["files"].index):
file_name = (
f"{data_dfs['files'].loc[i].corpus_name}/{data_dfs['files'].loc[i].file_name}"
)
logging.info("Parsing %s (id %s)", file_name, i)
dfs = [data_dfs["chords"], data_dfs["measures"], data_dfs["notes"]]
names = ["chords", "measures", "notes"]
exists = [i in df.index.get_level_values(0) for df in dfs]
if not all(exists):
for exist, name in zip(exists, names):
if not exist:
logging.warning(
"%s_df does not contain %s data (id %s).", name, file_name, i
)
continue
try:
piece = get_score_piece_from_data_frames(
data_dfs["notes"].loc[i], data_dfs["chords"].loc[i], data_dfs["measures"].loc[i]
)
pieces.append(piece)
indexes.append(i)
except Exception:
logging.exception("Error parsing index %s", i)
continue
elif xml_and_csv_paths is not None:
for i, (xml_path, csv_path) in tqdm(
enumerate(zip(xml_and_csv_paths["xmls"], xml_and_csv_paths["csvs"])),
desc="Parsing MusicXML files",
total=len(xml_and_csv_paths["xmls"]),
):
piece = get_score_piece_from_music_xml(xml_path, csv_path)
pieces.append(piece)
indexes.append(i)
# Shuffle the pieces and the df_indexes the same way
shuffled_indexes = np.arange(len(indexes))
np.random.shuffle(shuffled_indexes)
pieces = np.array(pieces)[shuffled_indexes]
indexes = np.array(indexes)[shuffled_indexes]
split_pieces = []
split_indexes = []
prop = 0
for split_prop in splits:
start = int(round(prop * len(pieces)))
prop += split_prop
end = int(round(prop * len(pieces)))
length = end - start
if length == 0:
split_pieces.append([])
split_indexes.append([])
elif length == 1:
split_pieces.append([pieces[start]])
split_indexes.append([indexes[start]])
else:
split_pieces.append(pieces[start:end])
split_indexes.append(indexes[start:end])
return split_indexes, split_pieces
| 6,918 |
def calc_full_dist(row, vert, hor, N, site_collection_SM):
"""
Calculates full distance matrix. Called once per row.
INPUTS:
:param vert:
integer, number of included rows
:param hor:
integer, number of columns within radius
:param N:
integer, number of points in row
:param site_collection_SM:
site collection object, for ShakeMap data
:returns:
dict, with following keys
grid_indices- indices of points included in distance matrix
distance_matrix- full distance matrix
"""
# gathers indices for full distance matrix for each row
grid_indices = [None]*(vert*(2*hor+1))
n_grid_indices = 0
for k in range(row-vert+1, row+1):
if k == row:
for j in range(0,hor+1):
grid_indices[n_grid_indices] = j + N*k
n_grid_indices += 1
else:
for j in range(0,2*hor+1):
grid_indices[n_grid_indices] = j + N*k
n_grid_indices += 1
del grid_indices[n_grid_indices:]
distance_matrix = np.zeros([np.size(grid_indices), np.size(grid_indices)])
# Create full distance matrix for row
for k in range(0, np.size(grid_indices)):
distance_matrix[k, k:] = geodetic_distance(
site_collection_SM.lons[grid_indices[k ]], site_collection_SM.lats[grid_indices[k]],
site_collection_SM.lons[grid_indices[k:]], site_collection_SM.lats[grid_indices[k:]]).flatten()
distance_matrix = distance_matrix + distance_matrix.T
return {'grid_indices':grid_indices, 'distance_matrix':distance_matrix}
| 6,919 |
def traffic_flow_color_scheme():
""" maybe not needed """
pass
| 6,920 |
def main(example="A"):
"""
Arguments of this example:
:param str example:
Whether to use example A (requires windows) or B
"""
# Parameters for sen-analysis:
sim_api = setup_fmu(example=example)
calibration_classes, validation_class = setup_calibration_classes(
example=example,
multiple_classes=True
)
# Sensitivity analysis:
calibration_classes = run_sensitivity_analysis(sim_api=sim_api,
cal_classes=calibration_classes)
# Calibration
run_calibration(sim_api=sim_api,
cal_classes=calibration_classes,
validation_class=validation_class)
| 6,921 |
def background_upload_do():
"""Handle the upload of a file."""
form = request.form
# Is the upload using Ajax, or a direct POST by the form?
is_ajax = False
if form.get("__ajax", None) == "true":
is_ajax = True
print form.items()
# Target folder for these uploads.
# target = os.sep.join(['app', 'static', 'photo_albums', 'Test', 'Dave'])
script_dir = os.path.dirname(os.path.abspath(__file__))
target = os.sep.join([script_dir, 'static', 'photo_albums', form.items()[0][1], form.items()[1][1]])
for upload in request.files.getlist("file"):
filename = upload.filename.rsplit(os.sep)[0]
if not os.path.exists(target):
print "Creating directory:", target
os.makedirs(target)
destination = os.sep.join([target, filename])
print "Accept incoming file:", filename
print "Save it to:", destination
upload.save(destination)
# if is_ajax:
return ajax_response(True, msg="DONE!")
# else:
# return redirect(url_for('upload'))
| 6,922 |
def write_yml(yml_file, host="127.0.0.1", port=2379):
"""Write a yml file to etcd.
Args:
yml_file (str): Path to the yml file.
host (str): Etcd host.
port (int): Etcd port.
"""
print(f"Writing the yml file {yml_file} in ETCD. Host: {host}. Port: {port}.")
etcd_tool = EtcdTool(host=host, port=port)
etcd_tool.load(yml_file)
print("Write finished correctly.")
| 6,923 |
def catch_list(in_dict, in_key, default, len_highest=1):
"""Handle list and list of list dictionary entries from parameter input files.
Casts list entries of input as same type as default_val.
Assign default values if user does not provide a given input parameter.
Args:
in_dict: Dictionary in input parameters, within which in_key is a key.
in_key: Key of parameter to retrieve from in_dict.
default:
Default list to assign at output if in_key does not exist in in_dict.
The type of the list entries in default implicitly defines the type which the parameter
is cast to, if it exists in in_dict.
len_highest: Expected length of topmost list.
Returns:
Input parameter list retrieved from in_dict, or default if not provided.
"""
# TODO: needs to throw an error if input list of lists is longer than len_highest
# TODO: could make a recursive function probably, just hard to define appropriate list lengths at each level
list_of_lists_flag = type(default[0]) == list
try:
inList = in_dict[in_key]
if len(inList) == 0:
raise ValueError
# List of lists
if list_of_lists_flag:
val_list = []
for list_idx in range(len_highest):
# If default value is None, trust user
if default[0][0] is None:
val_list.append(inList[list_idx])
else:
type_default = type(default[0][0])
cast_in_list = [type_default(inVal) for inVal in inList[list_idx]]
val_list.append(cast_in_list)
# Normal list
else:
# If default value is None, trust user
if default[0] is None:
val_list = inList
else:
type_default = type(default[0])
val_list = [type_default(inVal) for inVal in inList]
except:
if list_of_lists_flag:
val_list = []
for list_idx in range(len_highest):
val_list.append(default[0])
else:
val_list = default
return val_list
| 6,924 |
def _load_candidate_scorings (spec, input_dir_candidates, predictor):
"""
Load the msms-based scores for the candidates of the specified msms-spectra.
:param spec: string, identifier of the spectra and candidate list. Currently
we use the inchikey of the structure represented by the spectra.
:param input_dir_candidates: string, directory containing the scoring and
fingerprints of the candidates (compare also 'build_candidate_structure').
:param predictor: list of strings, containing the predictors used to train the model.
Currently only 'maccs' and 'maccsCount_f2dcf0b3' are supported.
:return: pandas.DataFrame, {"id1": [...], "score": [...]}
E.g.:
id1,score
"InChI=1S/C10H10N4O2S/c11-8-1-3-9(4-2-8)17...",0.601026809509167
"InChI=1S/C10H10N4O2S/c11-8-2-4-9(5-3-8)17...",0.59559886408
...
NOTE: 'id1' here to the InChI, this can be changed, but we also need to modify
'_process_single_candidate_list'.
"""
if predictor[0] == "maccs":
fps_fn = "maccs_binary"
elif predictor[0] == "maccsCount_f2dcf0b3":
fps_fn = "maccs_count"
else:
raise ValueError ("Unsupported predictor for candidates: %s" % predictor[0])
l_scoring_files = os.listdir (input_dir_candidates + "/scorings/" + fps_fn + "/")
scores_fn = list (filter (re.compile ("scoring_list.*=%s.csv" % spec).match, l_scoring_files))
assert (len (scores_fn) == 1)
scores_fn = input_dir_candidates + "/scorings/" + fps_fn + "/" + scores_fn[0]
# Return scores in descending order
return DataFrame.from_csv (scores_fn, index_col = None).sort_values ("score", ascending = False)
| 6,925 |
def load_dict_data(selected_entities=None, path_to_data_folder=None):
"""Loads up data from .pickle file for the selected entities.
Based on the selected entities, loads data from storage,
into memory, if respective files exists.
Args:
selected_entities: A list of string entity names to be loaded.
Default is load all available entitites.
path_to_data_folder: A string specifying the absolute path to
the data folder that contains the entity dataset files.
By default, uses the built-in entity datasets.
Returns:
A dictionary mapping entity type (key) to all entity values of
that type. Values are dictionary of dictionaries.
{
'genre': {
'comedy': {
'action': {1:1},
'drama': {1:1}
},
'action': {
'thriller': {1:1}
}
}
}
Always returns a dictionary. If .pickle files of selected entitites
are not found, or if no .pickle files are found, returns an empty
dictionary.
"""
return load_entities(
selected_entities=selected_entities, from_pickle=True,
path_to_data_folder=path_to_data_folder
)
| 6,926 |
def p_mtp3field(p):
"""mtp3field : SIO
| OPC
| DPC
| SLS
| HSIO
| HOPC
| HDPC
| HSLS"""
| 6,927 |
def cart_del(request, pk):
""" remove an experiment from the analysis cart and return"""
pk=int(pk) # make integer for lookup within template
analyze_list = request.session.get('analyze_list', [])
if pk in analyze_list:
analyze_list.remove(pk)
request.session['analyze_list'] = analyze_list
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
| 6,928 |
def get_block_devices(bdms=None):
"""
@type bdms: list
"""
ret = ""
if bdms:
for bdm in bdms:
ret += "{0}\n".format(bdm.get('DeviceName', '-'))
ebs = bdm.get('Ebs')
if ebs:
ret += " Status: {0}\n".format(ebs.get('Status', '-'))
ret += " Snapshot Id: {0}\n".format(ebs.get('SnapshotId', '-'))
ret += " Volume Size: {0}\n".format(ebs.get('VolumeSize', '-'))
ret += " Volume Type: {0}\n".format(ebs.get('VolumeType', '-'))
ret += " Encrypted: {0}\n".format(str(ebs.get('Encrypted', '-')))
ret += " Delete on Termination: {0}\n".format(ebs.get('DeleteOnTermination', '-'))
ret += " Attach Time: {0}\n".format(str(ebs.get('AttachTime', '-')))
return ret.rstrip()
else:
return ret
| 6,929 |
def outlier_test(model_results, method='bonf', alpha=.05, labels=None,
order=False, cutoff=None):
"""
Outlier Tests for RegressionResults instances.
Parameters
----------
model_results : RegressionResults instance
Linear model results
method : str
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
familywise error rate
labels : None or array_like
If `labels` is not None, then it will be used as index to the
returned pandas DataFrame. See also Returns below
order : bool
Whether or not to order the results by the absolute value of the
studentized residuals. If labels are provided they will also be sorted.
cutoff : None or float in [0, 1]
If cutoff is not None, then the return only includes observations with
multiple testing corrected p-values strictly below the cutoff. The
returned array or dataframe can be empty if there are no outlier
candidates at the specified cutoff.
Returns
-------
table : ndarray or DataFrame
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1.
"""
from scipy import stats # lazy import
if labels is None:
labels = getattr(model_results.model.data, 'row_labels', None)
infl = getattr(model_results, 'get_influence', None)
if infl is None:
results = maybe_unwrap_results(model_results)
raise AttributeError("model_results object %s does not have a "
"get_influence method." % results.__class__.__name__)
resid = infl().resid_studentized_external
if order:
idx = np.abs(resid).argsort()[::-1]
resid = resid[idx]
if labels is not None:
labels = np.asarray(labels)[idx]
df = model_results.df_resid - 1
unadj_p = stats.t.sf(np.abs(resid), df) * 2
adj_p = multipletests(unadj_p, alpha=alpha, method=method)
data = np.c_[resid, unadj_p, adj_p[1]]
if cutoff is not None:
mask = data[:, -1] < cutoff
data = data[mask]
else:
mask = slice(None)
if labels is not None:
from pandas import DataFrame
return DataFrame(data,
columns=['student_resid', 'unadj_p', method+"(p)"],
index=np.asarray(labels)[mask])
return data
| 6,930 |
def imgcat(data, width='auto', height='auto', preserveAspectRatio=False,
inline=True, filename=''):
"""
The width and height are given as a number followed by a unit, or the
word "auto".
N: N character cells.
Npx: N pixels.
N%: N percent of the session's width or height.
auto: The image's inherent size will be used to determine an
appropriate dimension.
"""
buf = bytes()
enc = 'utf-8'
is_tmux = os.environ['TERM'].startswith('screen')
# OSC
buf += b'\033'
if is_tmux:
buf += b'Ptmux;\033\033'
buf += b']'
buf += b'1337;File='
if filename:
buf += b'name='
buf += b64encode(filename.encode(enc))
buf += b';size=%d' % len(data)
buf += b';inline=%d' % int(inline)
buf += b';width=%s' % width.encode(enc)
buf += b';height=%s' % height.encode(enc)
buf += b';preserveAspectRatio=%d' % int(preserveAspectRatio)
buf += b':'
buf += b64encode(data)
# ST
buf += b'\a'
if is_tmux:
buf += b'\033\\'
buf += b'\n'
stdout.buffer.write(buf)
stdout.flush()
| 6,931 |
def align_reconstruction(reconstruction, gcp, config):
"""Align a reconstruction with GPS and GCP data."""
res = align_reconstruction_similarity(reconstruction, gcp, config)
if res:
s, A, b = res
apply_similarity(reconstruction, s, A, b)
| 6,932 |
def OpenTrade(request, callback, customData = None, extraHeaders = None):
"""
Opens a new outstanding trade. Note that a given item instance may only be in one open trade at a time.
https://docs.microsoft.com/rest/api/playfab/client/trading/opentrade
"""
if not PlayFabSettings._internalSettings.ClientSessionTicket:
raise PlayFabErrors.PlayFabException("Must be logged in to call this method")
def wrappedCallback(playFabResult, error):
if callback:
callback(playFabResult, error)
PlayFabHTTP.DoPost("/Client/OpenTrade", request, "X-Authorization", PlayFabSettings._internalSettings.ClientSessionTicket, wrappedCallback, customData, extraHeaders)
| 6,933 |
def move(request, content_type_id, obj_id, rank):
"""View to be used in the django admin for changing a :class:`RankedModel`
object's rank. See :func:`admin_link_move_up` and
:func:`admin_link_move_down` for helper functions to incoroprate in your
admin models.
Upon completion this view sends the caller back to the referring page.
:param content_type_id:
``ContentType`` id of object being moved
:param obj_id:
ID of object being moved
:param rank:
New rank of the object
"""
content_type = ContentType.objects.get_for_id(content_type_id)
obj = get_object_or_404(content_type.model_class(), id=obj_id)
obj.rank = int(rank)
obj.save()
return HttpResponseRedirect(request.META['HTTP_REFERER'])
| 6,934 |
def test_second_playback_enforcement(mocker, tmp_path):
"""
Given:
- A mockable test
When:
- The mockable test fails on the second playback
Then:
- Ensure that it exists in the failed_playbooks set
- Ensure that it does not exists in the succeeded_playbooks list
"""
class RunIncidentTestMock:
call_count = 0
count_response_mapping = {
1: PB_Status.FAILED, # The first playback run
2: PB_Status.COMPLETED, # The record run
3: PB_Status.FAILED # The second playback run
}
@staticmethod
def run_incident_test(*_):
# First playback run
RunIncidentTestMock.call_count += 1
return RunIncidentTestMock.count_response_mapping[RunIncidentTestMock.call_count]
filtered_tests = ['mocked_playbook']
tests = [generate_test_configuration(playbook_id='mocked_playbook',
integrations=['mocked_integration'])]
integrations_configurations = [generate_integration_configuration('mocked_integration')]
secret_test_conf = generate_secret_conf_json(integrations_configurations)
content_conf_json = generate_content_conf_json(tests=tests)
build_context = get_mocked_build_context(mocker,
tmp_path,
secret_conf_json=secret_test_conf,
content_conf_json=content_conf_json,
filtered_tests_content=filtered_tests)
mocked_demisto_client = DemistoClientMock(integrations=['mocked_integration'])
server_context = generate_mocked_server_context(build_context, mocked_demisto_client, mocker)
mocker.patch('demisto_sdk.commands.test_content.TestContentClasses.TestContext._run_incident_test',
RunIncidentTestMock.run_incident_test)
server_context.execute_tests()
assert 'mocked_playbook (Second Playback)' in build_context.tests_data_keeper.failed_playbooks
assert 'mocked_playbook' not in build_context.tests_data_keeper.succeeded_playbooks
| 6,935 |
def decode_textfield_ncr(content):
"""
Decodes the contents for CIF textfield from Numeric Character Reference.
:param content: a string with contents
:return: decoded string
"""
import re
def match2str(m):
return chr(int(m.group(1)))
return re.sub('&#(\d+);', match2str, content)
| 6,936 |
def reflect_or_create_tables(options):
"""
returns a dict of classes
make 'em if they don't exist
"tables" is {'wfdisc': mapped table class, ...}
"""
tables = {}
# this list should mirror the command line table options
for table in list(mapfns.keys()) + ['lastid']:
# if options.all_tables:
fulltabnm = getattr(options, table, None)
if fulltabnm:
try:
tables[table] = ps.get_tables(session.bind, [fulltabnm])[0]
except NoSuchTableError:
print("{0} doesn't exist. Adding it.".format(fulltabnm))
tables[table] = ps.make_table(fulltabnm, PROTOTYPES[table])
tables[table].__table__.create(session.bind, checkfirst=True)
return tables
| 6,937 |
def config_section_data():
"""Produce the default configuration section for app.config,
when called by `resilient-circuits config [-c|-u]`
"""
config_data = u"""[fn_grpc_interface]
interface_dir=<<path to the parent directory of your Protocol Buffer (pb2) files>>
#<<package_name>>=<<communication_type>>, <<secure connection type>>, <<certificate_path or google API token>>
# 'package_name' is a CSV list of length 3, where each possible value is described in the documentation
# Note: to setup, in your interface_dir, create a sub-directory that has
# the same name as your package, and copy the Protocol Buffer pb2 files
# into that directory.
#
# If the package_name was 'helloworld', your app.config would look like:
# [fn_grpc_interface]
# interface_dir=/home/admin/integrations/grpc_interface_files
# helloworld=unary, None, None"""
return config_data
| 6,938 |
def show_server(data):
"""Show server."""
| 6,939 |
def get_workers_count_based_on_cpu_count():
"""
Returns the number of workers based available virtual or physical CPUs on this system.
"""
# Python 2.6+
try:
import multiprocessing
return multiprocessing.cpu_count() * 2 + 1
except (ImportError, NotImplementedError):
pass
try:
res = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if res > 0:
return res * 2 + 1
except (AttributeError, ValueError):
pass
raise Exception('Can not determine number of CPUs on this system')
| 6,940 |
def addAttributeToChannelList(channellist, channelType, channelName, attributename, attributevalue):
"""Adds channel attributes to the supplied list.
Note: This method only changes the supplied list. It does not communicate with a dmgr.
For example, it creates the following:
'channel_attributes': [{ 'channelType': 'HTTPInboundChannel',
'channelName': 'HTTP_2',
'attributename': 'persistentTimeout',
'attributevalue': '15' }, ]
"""
m = "addAttributeToChannelList: "
# sop(m,"Entry. channelType=%s channelName=%s attributename=%s attributevalue=%s" % ( channelType, channelName, attributename, attributevalue, ))
# Create a new dictionary object to specify the attribute.
new_attribute_dict = { 'channelType': channelType,
'channelName': channelName,
'attributename': attributename,
'attributevalue': attributevalue, }
# sop(m,"new_attribute_dict=%s" % ( new_attribute_dict ))
# Find and remove any existing instances of the specified attribute from the list.
for old_attribute_dict in channellist:
# sop(m,"old_attribute_dict=%s" % ( old_attribute_dict ))
if 'channelType' in old_attribute_dict.keys() and 'channelName' in old_attribute_dict.keys()and 'attributename' in old_attribute_dict.keys():
# sop(m,"old attribute contains key 'channelType', 'channelName', and 'attributename'")
if channelType == old_attribute_dict['channelType'] and channelName == old_attribute_dict['channelName'] and attributename == old_attribute_dict['attributename']:
sop(m,"Found old attribute matchine specified new attribute. Removing old attribute from list. channelType=%s channelName=%s attributename=%s attributevalue=%s" % ( channelType, channelName, attributename, attributevalue, ))
channellist.remove(old_attribute_dict)
# else:
# sop(m,"Leaving old attribute intact in list.")
# Add the new attribute to the list.
sop(m,"Adding the new attribute to the list. channelType=%s channelName=%s attributename=%s attributevalue=%s" % ( channelType, channelName, attributename, attributevalue, ))
channellist.append(new_attribute_dict)
# sop(m,"Exit. channellist=%s" % ( repr(channellist) ))
| 6,941 |
def test_file_contents(
downloader, filing_type, ticker, before_date, downloaded_filename
):
"""Only run this test when the sample filings folder exists.
This check is required since the distributed python package will
not contain the sample filings test data due to size constraints.
"""
dl, dl_path = downloader
downloaded_filename = Path(downloaded_filename)
extension = downloaded_filename.suffix
num_downloaded = dl.get(
filing_type, ticker, amount=1, before=before_date, download_details=True
)
assert num_downloaded == 1
downloaded_file_path = dl_path / ROOT_SAVE_FOLDER_NAME / ticker / filing_type
assert len(list(downloaded_file_path.glob("*"))) == 1
accession_number = list(downloaded_file_path.glob("*"))[0]
downloaded_file_path /= accession_number
downloaded_filings = list(downloaded_file_path.glob("*"))
assert len(downloaded_filings) == 2
sample_filings = Path("tests/sample-filings")
filename_parts = [
ticker,
filing_type.replace("-", ""),
f"{before_date.replace('-', '')}{extension}",
]
filename = "-".join(filename_parts).lower()
expected = sample_filings / filename
downloaded = downloaded_file_path / downloaded_filename
with expected.open() as expected_file:
with downloaded.open() as downloaded_file:
assert expected_file.readlines() == downloaded_file.readlines()
| 6,942 |
def color_print(path: str, color = "white", attrs = []) -> None:
"""Prints colorized text on terminal"""
colored_text = colored(
text = read_warfle_text(path),
color = color,
attrs = attrs
)
print(colored_text)
return None
| 6,943 |
def generate_chords(midi_path=None):
"""Generates harmonizing chords and saves them as MIDI.
Input parameters : Midi to harmonize to (Primer Midi), automatically set to Midi from transcription.py
Output = Midifile named "Midifile.chords.midi" in same location
"""
tf.logging.set_verbosity('INFO')
bundle = get_bundle()
if bundle:
config_id = bundle.generator_details.id
config = improv_rnn_model.default_configs[config_id]
config.hparams.parse(cfg['hparams'])
generator = improv_rnn_sequence_generator.ImprovRnnSequenceGenerator(
model=improv_rnn_model.ImprovRnnModel(config),
details=config.details,
steps_per_quarter=config.steps_per_quarter,
checkpoint=None,
bundle=bundle)
output_dir = os.path.expanduser(cfg['output_dir'])
primer_midi = os.path.expanduser(midi_path)
if not tf.gfile.Exists(output_dir):
tf.gfile.MakeDirs(output_dir)
qpm = magenta.music.DEFAULT_QUARTERS_PER_MINUTE
primer_sequence = magenta.music.midi_file_to_sequence_proto(primer_midi)
qpm = primer_sequence.tempos[0].qpm
# Create backing chord progression from flags.
backing_chords = 'C G Am F C G F C'
raw_chords = backing_chords.split()
repeated_chords = [chord for chord in raw_chords
for _ in range(16)]
backing_chords = magenta.music.ChordProgression(repeated_chords)
# Derive the total number of seconds to generate based on the QPM of the
# priming sequence and the length of the backing chord progression.
seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
total_seconds = len(backing_chords) * seconds_per_step
# Specify start/stop time for generation based on starting generation at the
# end of the priming sequence and continuing until the sequence is num_steps
# long.
generator_options = generator_pb2.GeneratorOptions()
if primer_sequence:
input_sequence = primer_sequence
# Set the start time to begin on the next step after the last note ends.
if primer_sequence.notes:
last_end_time = max(n.end_time for n in primer_sequence.notes)
else:
last_end_time = 0
generate_section = generator_options.generate_sections.add(
start_time=last_end_time + seconds_per_step,
end_time=total_seconds)
if generate_section.start_time >= generate_section.end_time:
tf.logging.fatal(
'Priming sequence is longer than the total number of steps '
'requested: Priming sequence length: %s, Generation length '
'requested: %s',
generate_section.start_time, total_seconds)
return
else:
input_sequence = music_pb2.NoteSequence()
input_sequence.tempos.add().qpm = qpm
generate_section = generator_options.generate_sections.add(
start_time=0,
end_time=total_seconds)
# Add the backing chords to the input sequence.
chord_sequence = backing_chords.to_sequence(sequence_start_time=0.0, qpm=qpm)
for text_annotation in chord_sequence.text_annotations:
if text_annotation.annotation_type == CHORD_SYMBOL:
chord = input_sequence.text_annotations.add()
chord.CopyFrom(text_annotation)
input_sequence.total_time = len(backing_chords) * seconds_per_step
generator_options.args['temperature'].float_value = 1.0
generator_options.args['beam_size'].int_value = 1
generator_options.args['branch_factor'].int_value = 1
generator_options.args[
'steps_per_iteration'].int_value = 1
tf.logging.debug('input_sequence: %s', input_sequence)
tf.logging.debug('generator_options: %s', generator_options)
# Make the generate request num_outputs times and save the output as midi
# files.
date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
digits = len(str(1))
for i in range(1):
generated_sequence = generator.generate(input_sequence, generator_options)
renderer = magenta.music.BasicChordRenderer(velocity=CHORD_VELOCITY)
renderer.render(generated_sequence)
midi_filename = primer_midi.replace('midi', 'chords.midi')
midi_path = os.path.join(cfg['output_dir'], midi_filename)
magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path)
tf.logging.info('Wrote %d MIDI files to %s',
1, cfg['output_dir'])
| 6,944 |
def main(debug):
"""
\b
fetchmesh is a Python library and a command line utility to facilitate
the use of the RIPE Atlas anchoring mesh measurements.
\b
The documentation and the source code are available at
https://github.com/maxmouchet/fetchmesh.
"""
if debug:
logging.basicConfig(
level=logging.DEBUG,
datefmt="%Y/%m/%d %H:%M:%S",
format="%(asctime)s %(levelname)s %(process)d %(name)s %(message)s",
)
| 6,945 |
def _handle_sync_response(response_packet,
commands_waiting_for_response):
"""
"""
# for ACK/synchronous responses we only need to call the registered
# callback.
sequence_number = response_packet.sequence_number
if sequence_number in commands_waiting_for_response:
# TODO: check to make sure handler is callable before invoking.
commands_waiting_for_response[sequence_number](response_packet)
# NOTE: it is up to the callback/waiting function to remove the
# handler.
| 6,946 |
def create_cluster_spec(parameters_server: str, workers: str) -> tf.train.ClusterSpec:
"""
Creates a ClusterSpec object representing the cluster.
:param parameters_server: comma-separated list of hostname:port pairs to which the parameter servers are assigned
:param workers: comma-separated list of hostname:port pairs to which the workers are assigned
:return: a ClusterSpec object representing the cluster
"""
# extract the parameter servers and workers from the given strings
ps_hosts = parameters_server.split(",")
worker_hosts = workers.split(",")
# Create a cluster spec from the parameter server and worker hosts
cluster_spec = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})
return cluster_spec
| 6,947 |
def save_books_readers(
book: Books,
reader: Readers):
"""Reader's books control and save in data base."""
flag = False
if book.readers is not None:
for i in range(len(book.readers)):
if book.readers[i].id is reader.id:
flag = True
if flag is False:
book.readers.append(reader)
session.add(book)
session.commit()
| 6,948 |
def ranger_daemon_trigger(raw_args=None):
"""
Pass in raw_args if you wana trigger via this method
eg: ['-zk', 'localhost:2181', '-s', 'myapp', '-host', 'localhost', '-p', '9090', '-e', 'stage', '-hcu', 'http://localhost:9091/healthcheck?pretty=true']
:param raw_args: sys.argv (arguments to the script)
"""
ranger_service_provider = initial_program_setup(raw_args)
ranger_service_provider.start(True)
| 6,949 |
def bq_client(context):
"""
Initialize and return BigQueryClient()
"""
return BigQueryClient(
context.resource_config["dataset"],
)
| 6,950 |
def longascnode(x, y, z, u, v, w):
"""Compute value of longitude of ascending node, computed as
the angle between x-axis and the vector n = (-hy,hx,0), where hx, hy, are
respectively, the x and y components of specific angular momentum vector, h.
Args:
x (float): x-component of position
y (float): y-component of position
z (float): z-component of position
u (float): x-component of velocity
v (float): y-component of velocity
w (float): z-component of velocity
Returns:
float: longitude of ascending node
"""
res = np.arctan2(y*w-z*v, x*w-z*u) # remember atan2 is atan2(y/x)
if res >= 0.0:
return res
else:
return res+2.0*np.pi
| 6,951 |
def volumetric_roi_info(atlas_spec):
"""Returns a list of unique ROIs, their labels and centroids"""
if is_image(atlas_spec) and is_image_3D(atlas_spec):
if atlas_spec.__class__ in nibabel.all_image_classes:
atlas_labels = atlas_spec.get_fdata()
else:
atlas_labels = np.array(atlas_spec)
elif isinstance(atlas_spec, str):
atlas_path, atlas_name = get_atlas_path(atlas_spec)
atlas_labels = nibabel.load(atlas_path).get_fdata()
else:
raise ValueError('Unrecognized atlas specification!'
'Must be a predefined name, or'
'a preloaded image!')
# TODO names for ROIs are not read and used!
uniq_rois, roi_size, num_nodes = roi_info(atlas_labels, freesurfer_annot=False)
centroids = dict()
for roi in uniq_rois:
centroids[roi] = np.median(np.nonzero(atlas_labels==roi), axis=1)
return uniq_rois, centroids, atlas_labels
| 6,952 |
def predictFuture(bo:board, sn:snake, snakes:list, foods:list):
"""
Play forward (futuremoves) turns
Check for enemy moves before calculating route boards
==
bo: boardClass as board
sn: snakeClass as snake
snakes: list[] of snakes
==
return: none
(set board routing)
board.updateCell()
"""
# Update / modify routes
# Enemy kills us -- playing forward X turns
numfuture = CONST.lookAheadPredictFuture
# Update enemy future paths to depth of N = numfuture
bo.getEnemyFuture(snakes, numfuture)
# Check for any possible futures where we die
(rr, avoid) = checkFutureDeath(bo, sn, snakes, numfuture=numfuture)
# Update board to avoid these cells
them = []
for sid in snakes:
enemy = snakes[sid]
if (enemy.getType() == 'enemy'):
enemy_head = enemy.getHead()
them.append(enemy_head)
else:
head = enemy.getHead()
# TODO: Further modify this to capture closer & larger
same = True # include squares where we get there same time
closest = bo.closestDist(head, them, same=same)
# Update squares to avoid because of head on collisions
# Update squares to avoid because of head on deadend(deprecate)
if (len(avoid)):
# If enemy can get to location before us
# If enemy kill path found
# print(avoid)
for path in avoid:
# Set future markov board, where
# * length of steps reflects turn, and
# * routing logic looks at next markov board
for step in path:
# print("PREDICT FUTURE DEATH", step)
for i in range(0, len(path)):
bo.updateCell(path[i], CONST.routeSolid/2, i, replace=True)
bo.updateCell(path[i], CONST.routeSolid/2, i+1, replace=True)
# bo.updateCell(step, CONST.routeSolid/4, len(path)+1, replace=True)
# Update eating for snakes based on path
# Update cells that enemy snakes can get to before us
for sid in snakes:
# Ignore us -- we have better calculation because we know our route
if (snakes[sid].getType() != "us"):
# Uses every possible path based on self.predictEnemyMoves
paths = snakes[sid].getNextSteps()
length = snakes[sid].getLength()
#
threat = CONST.routeSolid/2
for path in paths:
# Todo: only paint squares in each path based on turn
for pt in path:
# Print out hazard for N = enemy length or width of board
for turn in range(0, min(length, bo.width)):
if not closest[pt[0], pt[1]]:
bo.updateCell(pt, threat, turn)
# bo.updateCell(pt, CONST.routeSolid/(turn+1), turn)
# threat = int(threat / 5)
food_in_route = 0
# Check if there is food one square from them ..
try:
food_path = 0
# Check if food is in future moves
for path in paths:
pt = path[0] # truncate to first point only for now
if pt in foods:
food_in_route = 1
# print("DEBUG ENEMY PATH", sid, pt, path, food_path) # lookAheadPredictFuture
except:
pass
# Check if snake ate this turn (don't we already accommodate?)
snakes[sid].setEatingFuture(food_in_route)
# print(sid, food_in_route)
# Performance monitoring
return rr
| 6,953 |
def test_return_complex_ref(namespace: TestNamespace) -> None:
"""Test returning a non-trivial reference."""
refH = namespace.addRegister("h")
refL = namespace.addRegister("l")
bitsHL = ConcatenatedBits(refL.bits, refH.bits)
slicedBits = SlicedBits(bitsHL, IntLiteral(0), 8)
namespace.addRetReference(Reference(slicedBits, IntType.u(8)))
code = createSimplifiedCode(namespace)
assertNodes(code.nodes, ())
assert len(code.returned) == 1
(retBits,) = code.returned
assert retBits.width == 8
# Note that we only simplify expressions, not references, so the
# reference itself is still complex. All we really check here is
# that code block creation doesn't break, but that is worthwhile
# in itself.
| 6,954 |
def convert_to_distance(primer_df, tm_opt, gc_opt, gc_clamp_opt=2):
"""
Convert tm, gc%, and gc_clamp to an absolute distance
(tm_dist, gc_dist, gc_clamp_dist)
away from optimum range. This makes it so that all features will need
to be minimized.
"""
primer_df['tm_dist'] = get_distance(
primer_df.tm.values, tm_opt, tm_opt)
primer_df['gc_dist'] = get_distance(
primer_df.gc.values, gc_opt['min'], gc_opt['max'])
primer_df['gc_clamp_dist'] = get_distance(
primer_df.gc_clamp.values, gc_clamp_opt, gc_clamp_opt)
# primer_df.drop(['tm', 'gc', 'gc_clamp'], axis=1, inplace=True)
return primer_df
| 6,955 |
def _tmap_error_detect(tmap: TensorMap) -> TensorMap:
"""Modifies tm so it returns it's mean unless previous tensor from file fails"""
new_tm = copy.deepcopy(tmap)
new_tm.shape = (1,)
new_tm.interpretation = Interpretation.CONTINUOUS
new_tm.channel_map = None
def tff(_: TensorMap, hd5: h5py.File, dependents=None):
return tmap.tensor_from_file(tmap, hd5, dependents).mean()
new_tm.tensor_from_file = tff
return new_tm
| 6,956 |
def get_filter(DetectorId=None, FilterName=None):
"""
Returns the details of the filter specified by the filter name.
See also: AWS API Documentation
Exceptions
:example: response = client.get_filter(
DetectorId='string',
FilterName='string'
)
:type DetectorId: string
:param DetectorId: [REQUIRED]\nThe unique ID of the detector that the filter is associated with.\n
:type FilterName: string
:param FilterName: [REQUIRED]\nThe name of the filter you want to get.\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string',
'Description': 'string',
'Action': 'NOOP'|'ARCHIVE',
'Rank': 123,
'FindingCriteria': {
'Criterion': {
'string': {
'Eq': [
'string',
],
'Neq': [
'string',
],
'Gt': 123,
'Gte': 123,
'Lt': 123,
'Lte': 123,
'Equals': [
'string',
],
'NotEquals': [
'string',
],
'GreaterThan': 123,
'GreaterThanOrEqual': 123,
'LessThan': 123,
'LessThanOrEqual': 123
}
}
},
'Tags': {
'string': 'string'
}
}
Response Structure
(dict) --
Name (string) --
The name of the filter.
Description (string) --
The description of the filter.
Action (string) --
Specifies the action that is to be applied to the findings that match the filter.
Rank (integer) --
Specifies the position of the filter in the list of current filters. Also specifies the order in which this filter is applied to the findings.
FindingCriteria (dict) --
Represents the criteria to be used in the filter for querying findings.
Criterion (dict) --
Represents a map of finding properties that match specified conditions and values when querying findings.
(string) --
(dict) --
Contains information about the condition.
Eq (list) --
Represents the equal condition to be applied to a single field when querying for findings.
(string) --
Neq (list) --
Represents the not equal condition to be applied to a single field when querying for findings.
(string) --
Gt (integer) --
Represents a greater than condition to be applied to a single field when querying for findings.
Gte (integer) --
Represents a greater than or equal condition to be applied to a single field when querying for findings.
Lt (integer) --
Represents a less than condition to be applied to a single field when querying for findings.
Lte (integer) --
Represents a less than or equal condition to be applied to a single field when querying for findings.
Equals (list) --
Represents an equal condition to be applied to a single field when querying for findings.
(string) --
NotEquals (list) --
Represents a not equal condition to be applied to a single field when querying for findings.
(string) --
GreaterThan (integer) --
Represents a greater than condition to be applied to a single field when querying for findings.
GreaterThanOrEqual (integer) --
Represents a greater than or equal condition to be applied to a single field when querying for findings.
LessThan (integer) --
Represents a less than condition to be applied to a single field when querying for findings.
LessThanOrEqual (integer) --
Represents a less than or equal condition to be applied to a single field when querying for findings.
Tags (dict) --
The tags of the filter resource.
(string) --
(string) --
Exceptions
GuardDuty.Client.exceptions.BadRequestException
GuardDuty.Client.exceptions.InternalServerErrorException
:return: {
'Name': 'string',
'Description': 'string',
'Action': 'NOOP'|'ARCHIVE',
'Rank': 123,
'FindingCriteria': {
'Criterion': {
'string': {
'Eq': [
'string',
],
'Neq': [
'string',
],
'Gt': 123,
'Gte': 123,
'Lt': 123,
'Lte': 123,
'Equals': [
'string',
],
'NotEquals': [
'string',
],
'GreaterThan': 123,
'GreaterThanOrEqual': 123,
'LessThan': 123,
'LessThanOrEqual': 123
}
}
},
'Tags': {
'string': 'string'
}
}
:returns:
(string) --
"""
pass
| 6,957 |
def generate_dataset(config, ahead=1, data_path=None):
"""
Generates the dataset for training, test and validation
:param ahead: number of steps ahead for prediction
:return:
"""
dataset = config['dataset']
datanames = config['datanames']
datasize = config['datasize']
testsize = config['testsize']
vars = config['vars']
lag = config['lag']
btc = {}
# Reads numpy arrays for all sites and keep only selected columns
btcdata = np.load(data_path + 'bitcoin_price_history.npz')
for d in datanames:
btc[d] = btcdata[d]
if vars is not None:
btc[d] = btc[d][:, vars]
if dataset == 0:
return _generate_dataset_one_var(btc[datanames[0]][:, WEIGHTED_PRICE_INDEX].reshape(-1, 1), datasize, testsize, lag=lag, ahead=ahead)
# Just add more options to generate datasets with more than one variable for predicting one value
# or a sequence of values
raise NameError('ERROR: No such dataset type')
| 6,958 |
def search(isamAppliance, name, check_mode=False, force=False):
"""
Search UUID for named Web Service connection
"""
ret_obj = get_all(isamAppliance)
return_obj = isamAppliance.create_return_object()
return_obj["warnings"] = ret_obj["warnings"]
for obj in ret_obj['data']:
if obj['name'] == name:
logger.info("Found Web Service connection {0} id: {1}".format(name, obj['uuid']))
return_obj['data'] = obj['uuid']
return_obj['rc'] = 0
return return_obj
| 6,959 |
def flask_app(initialize_configuration) -> Flask:
"""
Fixture for making a Flask instance, to be able to access application context manager.
This is not possible with a FlaskClient, and we need the context manager for creating
JWT tokens when is required.
@return: A Flask instance.
"""
flask_application = vcf_handler_api('TESTING')
flask_application.config['TESTING'] = True
flask_application.config['PROPAGATE_EXCEPTIONS'] = False
return flask_application
| 6,960 |
def test_customize_tag():
"""
>>> d = {'name':'uliweb'}
>>> dirs = [os.path.join(path, 'templates', x) for x in ['a']]
>>> print (template_file('new_tag.html', d, dirs=dirs))
<BLANKLINE>
uliweb
"""
| 6,961 |
def http_request(method, url_suffix, params=None, data=None, headers=HEADERS, safe=False):
"""
A wrapper for requests lib to send our requests and handle requests and responses better.
:type method: ``str``
:param method: HTTP method for the request.
:type url_suffix: ``str``
:param url_suffix: The suffix of the URL (endpoint)
:type params: ``dict``
:param params: The URL params to be passed.
:type data: ``str``
:param data: The body data of the request.
:type headers: ``dict``
:param headers: Request headers
:type safe: ``bool``
:param safe: If set to true will return None in case of http error
:return: Returns the http request response json
:rtype: ``dict``
"""
headers['Authorization'] = get_token()
url = BASE_URL + url_suffix
try:
res = requests.request(method, url, verify=USE_SSL, params=params, data=data, headers=headers)
# Try to create a new token
if res.status_code == 401:
headers['Authorization'] = get_token(new_token=True)
res = requests.request(method, url, verify=USE_SSL, params=params, data=data, headers=headers)
except requests.exceptions.RequestException:
return_error('Error in connection to the server. Please make sure you entered the URL correctly.')
# Handle error responses gracefully
if res.status_code not in {200, 201, 202}:
try:
result_msg = res.json()
finally:
reason = result_msg if result_msg else res.reason
err_msg = f'Error in API call. code:{res.status_code}; reason: {reason}'
if safe:
return None
return_error(err_msg)
return res.json()
| 6,962 |
def train(train_loader, model, criterion, average, optimizer, epoch, opt):
"""one epoch training"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
for i, (images, labels, _) in enumerate(train_loader):
data_time.update(time.time() - end)
# modify labels with their new indexes - we are not using all the labels anymore at the training
for ind, label in enumerate(labels):
labels[ind] = opt.original_index.index(label)
# images = torch.cat([images[0], images[1]], dim=0)
if torch.cuda.is_available():
images = images.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
#idxs = idxs.cuda(non_blocking=True)
bsz = labels.shape[0]
# warm-up learning rate
warmup_learning_rate(opt, epoch, i, len(train_loader), optimizer)
# compute loss
features, _ = model(images)
bs = features.size(0)
outs, prob = average(features, i)
loss = criterion(outs)
# update metric
losses.update(loss.item(), bsz)
# SGD
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print info
if (i + 1) % opt.print_freq == 0:
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})'.format(
epoch, i + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses))
sys.stdout.flush()
return losses.avg
| 6,963 |
def FP(target, prediction):
"""
False positives.
:param target: target value
:param prediction: prediction value
:return:
"""
return ((target == 0).float() * prediction.float().round()).sum()
| 6,964 |
def RunJ2ObjC(java, jvm_flags, j2objc, main_class, output_file_path,
j2objc_args, source_paths, files_to_translate):
"""Runs J2ObjC transpiler to translate Java source files to ObjC.
Args:
java: The path of the Java executable.
jvm_flags: A comma-separated list of flags to pass to JVM.
j2objc: The deploy jar of J2ObjC.
main_class: The J2ObjC main class to invoke.
output_file_path: The output file directory.
j2objc_args: A list of args to pass to J2ObjC transpiler.
source_paths: A list of directories that contain sources to translate.
files_to_translate: A list of relative paths (relative to source_paths) that
point to sources to translate.
Returns:
None.
"""
j2objc_args.extend(['-sourcepath', ':'.join(source_paths)])
j2objc_args.extend(['-d', output_file_path])
j2objc_args.extend(files_to_translate)
param_file_content = ' '.join(j2objc_args).encode('utf-8')
fd = None
param_filename = None
try:
fd, param_filename = tempfile.mkstemp(text=True)
os.write(fd, param_file_content)
finally:
if fd:
os.close(fd)
try:
j2objc_cmd = [java]
j2objc_cmd.extend([f_ for f_ in jvm_flags.split(',') if f_])
j2objc_cmd.extend(_ADD_EXPORTS)
j2objc_cmd.extend(['-cp', j2objc, main_class])
j2objc_cmd.append('@%s' % param_filename)
subprocess.check_call(j2objc_cmd, stderr=subprocess.STDOUT)
finally:
if param_filename:
os.remove(param_filename)
| 6,965 |
def get_angle(A, B, C):
"""
Return the angle at C (in radians) for the triangle formed by A, B, C
a, b, c are lengths
C
/ \
b / \a
/ \
A-------B
c
"""
(col_A, row_A) = A
(col_B, row_B) = B
(col_C, row_C) = C
a = pixel_distance(C, B)
b = pixel_distance(A, C)
c = pixel_distance(A, B)
try:
cos_angle = (math.pow(a, 2) + math.pow(b, 2) - math.pow(c, 2)) / (2 * a * b)
except ZeroDivisionError as e:
log.warning(
"get_angle: A %s, B %s, C %s, a %.3f, b %.3f, c %.3f" % (A, B, C, a, b, c)
)
raise e
# If CA and CB are very long and the angle at C very narrow we can get an
# invalid cos_angle which will cause math.acos() to raise a ValueError exception
if cos_angle > 1:
cos_angle = 1
elif cos_angle < -1:
cos_angle = -1
angle_ACB = math.acos(cos_angle)
# log.info("get_angle: A %s, B %s, C %s, a %.3f, b %.3f, c %.3f, cos_angle %s, angle_ACB %s" %
# (A, B, C, a, b, c, pformat(cos_angle), int(math.degrees(angle_ACB))))
return angle_ACB
| 6,966 |
def single_labels(interesting_class_id):
"""
:param interesting_class_id: integer in range [0,2] to specify class
:return: number of labels for the "interesting_class"
"""
def s_l(y_true, y_pred):
class_id_true = K.argmax(y_true, axis=-1)
accuracy_mask = K.cast(K.equal(class_id_true, interesting_class_id), 'int32')
return K.cast(K.maximum(K.sum(accuracy_mask), 1), 'int32')
return s_l
| 6,967 |
def get_iam_policy(client=None, **kwargs):
"""
service_account='string'
"""
service_account=kwargs.pop('service_account')
resp = client.projects().serviceAccounts().getIamPolicy(
resource=service_account).execute()
# TODO(supertom): err handling, check if 'bindings' is correct
if 'bindings' in resp:
return resp['bindings']
else:
return None
| 6,968 |
def get_rdf_lables(obj_list):
"""Get rdf:labels from a given list of objects."""
rdf_labels = []
for obj in obj_list:
rdf_labels.append(obj['rdf:label'])
return rdf_labels
| 6,969 |
def _create_model_fn(pipeline_proto, is_chief=True):
"""Creates a callable that build the model.
Args:
pipeline_proto: an instance of pipeline_pb2.Pipeline.
Returns:
model_fn: a callable that takes [features, labels, mode, params] as inputs.
"""
if not isinstance(pipeline_proto, pipeline_pb2.Pipeline):
raise ValueError('pipeline_proto has to be an instance of Pipeline.')
def _model_fn(features, labels, mode, params):
"""
Args:
features: a dict mapping from names to tensors, denoting the features.
labels: a dict mapping from names to tensors, denoting the labels.
mode: mode parameter required by the estimator.
params: additional parameters used for creating the model.
Returns:
an instance of EstimatorSpec.
"""
is_training = (tf.estimator.ModeKeys.TRAIN == mode)
tf.logging.info("Current mode is %s, is_training=%s", mode, is_training)
model = builder.build(pipeline_proto.model, is_training)
predictions = model.build_prediction(features)
# Get scaffold and variables_to_train.
scaffold = model.get_scaffold()
variables_to_train = model.get_variables_to_train()
# Compute losses. Note: variables created in build_loss are not trainable.
losses = model.build_loss(predictions, examples=features)
for name, loss in losses.items():
tf.losses.add_loss(loss)
tf.summary.scalar('loss/' + name, loss)
for loss in tf.losses.get_regularization_losses():
tf.summary.scalar(
"loss/regularization/" + '/'.join(loss.op.name.split('/')[:2]), loss)
total_loss = tf.losses.get_total_loss(add_regularization_losses=True)
train_op = None
eval_metric_ops = None
training_hooks = []
if tf.estimator.ModeKeys.TRAIN == mode:
train_config = pipeline_proto.train_config
# Create the optimizer.
learning_rate = train_config.learning_rate
global_step = tf.train.get_or_create_global_step()
if train_config.HasField('learning_rate_decay'):
learning_rate = tf.train.exponential_decay(
learning_rate,
global_step,
train_config.learning_rate_decay.decay_steps,
train_config.learning_rate_decay.decay_rate,
staircase=train_config.learning_rate_decay.staircase)
tf.summary.scalar('loss/learning_rate', learning_rate)
optimizer = training_utils.build_optimizer(
train_config.optimizer, learning_rate=learning_rate)
# Setup the replicas_hook for the SyncReplicasOptimizer.
if train_config.sync_replicas:
optimizer = tf.train.SyncReplicasOptimizer(
optimizer, replicas_to_aggregate=4)
sync_replicas_hook = optimizer.make_session_run_hook(is_chief)
training_hooks.append(sync_replicas_hook)
# Enable MovingAverageOptimizer if specified.
if train_config.HasField('moving_average_decay'):
optimizer = tf.contrib.opt.MovingAverageOptimizer(
optimizer, average_decay=train_config.moving_average_decay)
# Apply gradient multipliers.
trainable_variables = []
gradient_multipliers = {}
for var in variables_to_train:
add_to_trainable_variables = True
for multiplier in train_config.gradient_multiplier:
if var.op.name.startswith(multiplier.scope):
if var.op.name in gradient_multipliers:
tf.logging.warn('Override gradient multiplier: %s', var.op.name)
gradient_multipliers[var.op.name] = multiplier.multiplier
if multiplier.multiplier > 0:
add_to_trainable_variables = True
else:
add_to_trainable_variables = False
# Add to trainable variables.
if add_to_trainable_variables:
trainable_variables.append(var)
tf.logging.info('Variable to train: %s, %s', var.op.name,
var.get_shape())
elif var.op.name in gradient_multipliers:
del gradient_multipliers[var.op.name]
tf.logging.info('Apply gradient multipliers: \n%s',
json.dumps(gradient_multipliers, indent=2))
def transform_grads_fn(grads):
if gradient_multipliers:
grads = tf.contrib.training.multiply_gradients(
grads, gradient_multipliers)
if train_config.HasField('max_gradient_norm'):
grads = tf.contrib.training.clip_gradient_norms(
grads, max_norm=train_config.max_gradient_norm)
return grads
# The train_op is required for mode `TRAIN`.
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
variables_to_train=trainable_variables,
transform_grads_fn=transform_grads_fn,
summarize_gradients=True)
if train_config.HasField('moving_average_decay'):
scaffold = tf.train.Scaffold(
saver=optimizer.swapping_saver(), copy_from_scaffold=scaffold)
elif tf.estimator.ModeKeys.EVAL == mode:
# The eval_metric_ops is optional for mode `EVAL`.
eval_metric_ops = model.build_evaluation(predictions, examples=features)
elif tf.estimator.ModeKeys.PREDICT == mode:
# The predictions is required for mode `PREDICT`.
predictions.update(features)
predictions.update({'summary': tf.summary.merge_all()})
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=total_loss,
train_op=train_op,
training_hooks=training_hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
return _model_fn
| 6,970 |
def boolean_automatic(meshes, operation, **kwargs):
"""
Automatically pick an engine for booleans based on availability.
Parameters
--------------
meshes : list of Trimesh
Meshes to be booleaned
operation : str
Type of boolean, i.e. 'union', 'intersection', 'difference'
Returns
---------------
result : trimesh.Trimesh
Result of boolean operation
"""
if interfaces.blender.exists:
result = interfaces.blender.boolean(meshes, operation, **kwargs)
elif interfaces.scad.exists:
result = interfaces.scad.boolean(meshes, operation, **kwargs)
else:
raise ValueError('No backends available for boolean operations!')
return result
| 6,971 |
def get_context(work=None):
"""Get a concrete Context object.
Args:
work (gmx.workflow.WorkSpec): runnable work as a valid gmx.workflow.WorkSpec object
Returns:
An object implementing the :py:class:`gmx.context.Context` interface, if possible.
Raises:
gmx.exceptions.ValueError if an appropriate context for ``work`` could not be loaded.
If work is provided, return a Context object capable of running the provided work or produce an error.
The semantics for finding Context implementations needs more consideration, and a more informative exception
is likely possible.
A Context can run the provided work if
* the Context supports can resolve all operations specified in the elements
* the Context supports DAG topologies implied by the network of dependencies
* the Context supports features required by the elements with the specified parameters,
such as synchronous array jobs.
"""
# We need to define an interface for WorkSpec objects so that we don't need
# to rely on typing and inter-module dependencies.
from .workflow import WorkSpec
workspec = None
if work is not None:
if isinstance(work, WorkSpec):
workspec = work
elif hasattr(work, 'workspec') and isinstance(work.workspec,
WorkSpec):
workspec = work.workspec
else:
raise exceptions.ValueError('work argument must provide a gmx.workflow.WorkSpec.')
if workspec is not None and \
hasattr(workspec, '_context') and \
workspec._context is not None:
context = workspec._context
else:
context = Context(work=workspec)
return context
| 6,972 |
def test_data():
"""Get the `CIFAR-10` test data."""
global _MEAN # pylint: disable=global-statement
_np.random.seed(1)
view = _skdc10.view.OfficialImageClassificationTask()
permutation = _np.random.permutation(range(10000))
if _MEAN is None:
_MEAN = view.train.x.reshape((50000 * 32 * 32, 3)).mean(axis=0)
return ((view.test.x[:10000, :][permutation, :] - _MEAN).
transpose((0, 3, 1, 2)).astype('float32'),
view.test.y[:10000][permutation].reshape((10000, 1)).astype('float32'))
| 6,973 |
def plotHistogram(dataframe,label):
"""
:param dataframe: dataframe object of data
:param label: name of continous target variable
:return: None
"""
plt.hist(dataframe[label], bins=10)
plt.show()
| 6,974 |
def normalize_key_combo(key_combo):
"""Normalize key combination to make it easily comparable.
All aliases are converted and modifier orders are fixed to:
Control, Alt, Shift, Meta
Letters will always be read as upper-case.
Due to the native implementation of the key system, Shift pressed in
certain key combinations may yield inconsistent or unexpected results.
Therefore, it is not recommended to use Shift with non-letter keys. On OSX,
Control is swapped with Meta such that pressing Command reads as Control.
Parameters
----------
key_combo : str
Key combination.
Returns
-------
normalized_key_combo : str
Normalized key combination.
"""
key, modifiers = parse_key_combo(key_combo)
if len(key) != 1 and key not in SPECIAL_KEYS:
raise TypeError(f'invalid key {key}')
for modifier in modifiers:
if modifier not in MODIFIER_KEYS:
raise TypeError(f'invalid modifier key {modifier}')
return components_to_key_combo(key, modifiers)
| 6,975 |
def set_device_parameters(request):
"""Set up the class."""
if NAPALM_TEST_MOCK:
driver = request.cls.patched_driver
else:
driver = request.cls.driver
request.cls.device = driver(
NAPALM_HOSTNAME,
NAPALM_USERNAME,
NAPALM_PASSWORD,
timeout=60,
optional_args=NAPALM_OPTIONAL_ARGS,
)
request.cls.device.open()
| 6,976 |
def save_esco(G, features: bool):
""" Saving the graph, wither with or without features """
if features:
out_file = esco_out_file_features
enrich_esco_graph_with_features(G)
else:
out_file = esco_out_file
# no idea which labels to use and why... DGL doc could be a bit more explicit here!
graph_labels = {"glabel": torch.tensor([0])}
save_graphs(str(out_file), [G], graph_labels)
| 6,977 |
def shape_to_np(shape, dtype="int"):
"""
Used to convert from a shape object returned by dlib to an np array
"""
return np.array([[shape.part(i).x, shape.part(i).y] for i in range(68)], dtype=dtype)
| 6,978 |
def inet_pton(space, address):
""" Converts a human readable IP
address to its packed in_addr representation"""
n = rsocket.inet_pton(rsocket.AF_INET, address)
return space.newstr(n)
| 6,979 |
def delete_routing_segmentation_maps_from_source_segment(
self,
segment_id: int,
) -> bool:
"""Delete D-NAT policies for specific source segment
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - vrf
- DELETE
- /vrf/config/maps/{srcSegmentId}
:param segment_id: Numeric id of routing segment
:type segment_id: int
:return: Returns True/False based on successful call
:rtype: bool
"""
return self._delete(
"/vrf/config/maps/{}".format(segment_id),
expected_status=[204],
return_type="bool",
)
| 6,980 |
def load_bounding_boxes(dataset_dir):
"""
Load bounding boxes and return a dictionary of file names and corresponding bounding boxes
"""
# Paths
bounding_boxes_path = os.path.join(dataset_dir, 'bounding_boxes.txt')
file_paths_path = os.path.join(dataset_dir, 'images.txt')
# Read bounding_boxes.txt and images.txt file
df_bounding_boxes = pd.read_csv(bounding_boxes_path,
delim_whitespace=True, header=None).astype(int)
df_file_names = pd.read_csv(file_paths_path, delim_whitespace=True, header=None)
# Create a list of file names
file_names = df_file_names[1].tolist()
# Create a dictionary of file_names and bounding boxes
filename_boundingbox_dict = {img_file[:-4]: [] for img_file in file_names[:2]}
# Assign a bounding box to the corresponding image
for i in range(0, len(file_names)):
# Get the bounding box
bounding_box = df_bounding_boxes.iloc[i][1:].tolist()
key = file_names[i][:-4]
filename_boundingbox_dict[key] = bounding_box
return filename_boundingbox_dict
| 6,981 |
def _replace_generator_docstring(package_path: Path, replacement: str) -> None:
"""
Replace the generator docstring in the __init__.py module.
(see _parse_generator_docstring for more details).
:param package_path: path to the
:param replacement: the replacement to use.
"""
protocol_name = package_path.name
init_module = Path(PROTOCOLS) / protocol_name / "__init__.py"
content = init_module.read_text()
content = re.sub(PROTOCOL_GENERATOR_DOCSTRING_REGEX, replacement, content)
init_module.write_text(content)
| 6,982 |
def _identifier(name):
"""
:param name: string
:return: name in lower case and with '_' instead of '-'
:rtype: string
"""
if name.isidentifier():
return name
return name.lower().lstrip('0123456789. ').replace('-', '_')
| 6,983 |
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like of shape(M,)
Rank-1 array of polynomial co-efficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError:
When `p` cannot be converted to a rank-1 array.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> print np.roots(coeff)
[-0.3125+0.46351241j -0.3125-0.46351241j]
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError,"Input must be a rank-1 array."
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0, :] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
| 6,984 |
def test_empty_jailer_id(test_microvm_with_ssh):
"""Test that the jailer ID cannot be empty."""
test_microvm = test_microvm_with_ssh
# Set the jailer ID to None.
test_microvm.jailer.jailer_id = ""
# pylint: disable=W0703
try:
test_microvm.spawn()
# If the exception is not thrown, it means that Firecracker was
# started successfully, hence there's a bug in the code due to which
# we can set an empty ID.
assert False
except Exception as err:
expected_err = "Jailer error: Invalid instance ID: invalid len (0);" \
" the length must be between 1 and 64"
assert expected_err in str(err)
| 6,985 |
def download_object_file(bucket_name, source_filename=DEFAULT_MODEL_FILE, dest_filename=DEFAULT_MODEL_FILE):
"""Downloads file from bucket and saves in /tmp dir.
Args:
bucket_name (string): Bucket name.
source_filename (string): Name of file stored in bucket. [default: model.joblib]
dest_filename (string): Name of file stored in /tmp dir. [default: model.joblib]
"""
print(f"Downloading {bucket_name}/{source_filename} to /tmp/{dest_filename}")
download_start = time.process_time()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(source_filename)
blob.download_to_filename(f"/tmp/{dest_filename}")
download_stop = time.process_time()
print(f"{'Model download took':25}: {download_stop-download_start}")
| 6,986 |
def test_unsilence_errors(tmp_path, capfd):
"""Check that HDF5 errors can be muted/unmuted from h5py"""
filename = tmp_path / 'test.h5'
# Unmute HDF5 errors
try:
h5py._errors.unsilence_errors()
_access_not_existing_object(filename)
captured = capfd.readouterr()
assert captured.err != ''
assert captured.out == ''
# Mute HDF5 errors
finally:
h5py._errors.silence_errors()
_access_not_existing_object(filename)
captured = capfd.readouterr()
assert captured.err == ''
assert captured.out == ''
| 6,987 |
def calculate_trade_from_swaps(
swaps: List[AMMSwap],
trade_index: int = 0,
) -> AMMTrade:
"""Given a list of 1 or more AMMSwap (swap) return an AMMTrade (trade).
The trade is calculated using the first swap token (QUOTE) and last swap
token (BASE). Be aware that any token data in between will be ignored for
calculating the trade.
Examples:
[USDC -> AMPL] BASE_QUOTE pair is AMPL_USDC.
[USDC -> AMPL, AMPL -> WETH] BASE_QUOTE pair is WETH_USDC.
[USDC -> AMPL, AMPL -> WETH, WETH -> USDC] BASE_QUOTE pair is USDC_USDC.
May raise DeserializationError
"""
assert len(swaps) != 0, "Swaps can't be an empty list here"
if swaps[0].amount0_in == ZERO:
# Prevent a division by zero error when creating the trade.
# Swaps with `tokenIn` amount (<AMMSwap>.amount0_in) equals to zero are
# not expected nor supported. The function `deserialize_swap` will raise
# a DeserializationError, preventing to store them in the DB. In case
# of having a zero amount it means the db data was corrupted.
log.error(
'Failed to deserialize swap from db. First swap amount0_in is zero',
swaps=swaps,
)
raise DeserializationError('First swap amount0_in is zero.')
amm_trade = AMMTrade(
trade_type=TradeType.BUY, # AMMTrade is always a buy
base_asset=swaps[-1].token1,
quote_asset=swaps[0].token0,
amount=swaps[-1].amount1_out,
rate=Price(swaps[0].amount0_in / swaps[-1].amount1_out),
swaps=swaps,
trade_index=trade_index,
)
return amm_trade
| 6,988 |
def _show_scheduled_roles(account_number: str) -> None:
"""
Show scheduled repos for a given account. For each scheduled show whether scheduled time is elapsed or not.
"""
role_ids = get_all_role_ids_for_account(account_number)
roles = RoleList.from_ids(role_ids)
# filter to show only roles that are scheduled
roles = roles.get_active().get_scheduled()
header = ["Role name", "Scheduled", "Scheduled Time Elapsed?"]
rows = []
curtime = int(time.time())
for role in roles:
rows.append(
[
role.role_name,
dt.fromtimestamp(role.repo_scheduled).strftime("%Y-%m-%d %H:%M"),
role.repo_scheduled < curtime,
]
)
print(tabulate(rows, headers=header))
| 6,989 |
def tokenize(s):
"""
Tokenize a string.
Args:
s: String to be tokenized.
Returns:
A list of words as the result of tokenization.
"""
#return s.split(" ")
return nltk.word_tokenize(s)
| 6,990 |
def evaluate(request):
"""Eval view that shows how many times each entry was tracked"""
# default filter
end_date = datetime.date.today()
start_date = datetime.date(year=end_date.year, month=end_date.month - 1, day=end_date.day)
num_entries = 5
# get custom filter values from form
if request.method == 'POST':
form = PlotForm(request.POST)
if form.is_valid():
start_date = form.cleaned_data['start_date']
end_date = form.cleaned_data['end_date']
num_entries = form.cleaned_data['num_entries']
# or load empty form
else:
form = PlotForm(initial={'start_date': start_date, 'end_date': end_date, 'num_entries': num_entries})
# prepare chart data
labels = []
chart_data = []
entry_counts = most_frequent_entries(request.user, start_date, end_date, number=num_entries)
for entry, count in entry_counts.items():
labels.append(entry)
chart_data.append(count)
context = {
'form': form,
# for chart.js
'labels': labels,
'chart_label': 'Num. Entries',
'chart_data': chart_data,
'chart_title': f'Top {num_entries} Most Common Entries',
}
return render(request, 'app/eval.html', context)
| 6,991 |
def plot_af_correlation(vf1, vf2, ax=None, figsize=None):
"""
Create a scatter plot showing the correlation of allele frequency between
two VCF files.
This method will exclude the following sites:
- non-onverlapping sites
- multiallelic sites
- sites with one or more missing genotypes
Parameters
----------
vf1, vf2 : VcfFrame
VcfFrame objects to be compared.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
.. plot::
:context: close-figs
>>> from fuc import pyvcf, common
>>> import matplotlib.pyplot as plt
>>> data1 = {
... 'CHROM': ['chr1', 'chr1', 'chr1', 'chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102, 103, 104, 105],
... 'ID': ['.', '.', '.', '.', '.', '.'],
... 'REF': ['G', 'T', 'G', 'T', 'A', 'C'],
... 'ALT': ['A', 'C', 'C', 'G,A', 'C', 'T'],
... 'QUAL': ['.', '.', '.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.', '.', '.'],
... 'INFO': ['.', '.', '.', '.', '.', '.'],
... 'FORMAT': ['GT:DP', 'GT', 'GT', 'GT', 'GT', 'GT'],
... 'A': ['0/1:30', '0/0', '1/1', '0/1', '1/1', '0/1'],
... 'B': ['0/0:30', '0/0', '0/1', '0/1', '1/1', '0/1'],
... 'C': ['1/1:30', '0/0', '1/1', '0/1', '1/1', '0/1'],
... 'D': ['0/0:30', '0/0', '0/0', '0/0', '1/1', '0/1'],
... 'E': ['0/0:30', '0/0', '0/0', '1/2', '1/1', '0/1'],
... }
>>> vf1 = pyvcf.VcfFrame.from_dict([], data1)
>>> data2 = {
... 'CHROM': ['chr1', 'chr1', 'chr1', 'chr1', 'chr1'],
... 'POS': [101, 102, 103, 104, 105],
... 'ID': ['.', '.', '.', '.', '.'],
... 'REF': ['T', 'G', 'T', 'A', 'C'],
... 'ALT': ['C', 'C', 'G,A', 'C', 'T'],
... 'QUAL': ['.', '.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.', '.'],
... 'INFO': ['.', '.', '.', '.', '.'],
... 'FORMAT': ['GT', 'GT', 'GT', 'GT', 'GT'],
... 'F': ['0/0', '0/1', '0/1', '1/1', '0/0'],
... 'G': ['0/0', '0/1', '0/1', '1/1', './.'],
... 'H': ['0/0', '0/1', '0/1', '1/1', '1/1'],
... 'I': ['0/0', '0/1', '0/0', '1/1', '1/1'],
... 'J': ['0/0', '0/1', '1/2', '1/1', '0/1'],
... }
>>> vf2 = pyvcf.VcfFrame.from_dict([], data2)
>>> pyvcf.plot_af_correlation(vf1, vf2)
>>> plt.tight_layout()
"""
def one_gt(g):
alleles = g.split(':')[0].split('/')
alleles = [x for x in alleles if x != '0']
return len(alleles)
def one_row(r):
locus = f'{r.CHROM}-{r.POS}-{r.REF}-{r.ALT}'
ac = r[9:].apply(one_gt).sum()
if 'X' in r.CHROM or 'Y' in r.CHROM:
total = len(r[9:])
else:
total = len(r[9:]) * 2
af = ac / total
return pd.Series([locus, af])
s1 = vf1.filter_multialt().filter_empty(threshold=1).df.apply(one_row, axis=1)
s2 = vf2.filter_multialt().filter_empty(threshold=1).df.apply(one_row, axis=1)
s1.columns = ['Locus', 'First']
s2.columns = ['Locus', 'Second']
s1 = s1.set_index('Locus')
s2 = s2.set_index('Locus')
df = pd.concat([s1, s2], axis=1).dropna()
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
sns.scatterplot(data=df, x='First', y='Second', ax=ax)
return ax
| 6,992 |
def significant_pc_test(adata, p_cutoff=0.1, update=True, obsm='X_pca', downsample=50000):
"""
Parameters
----------
adata
p_cutoff
update
obsm
downsample
Returns
-------
"""
pcs = adata.obsm[obsm]
if pcs.shape[0] > downsample:
print(f'Downsample PC matrix to {downsample} cells to calculate significant PC components')
use_pcs = pd.DataFrame(pcs).sample(downsample).values
else:
use_pcs = pcs
i = 0
for i in range(use_pcs.shape[1] - 1):
cur_pc = use_pcs[:, i]
next_pc = use_pcs[:, i + 1]
p = ks_2samp(cur_pc, next_pc).pvalue
if p > p_cutoff:
break
n_components = min(i + 1, use_pcs.shape[1])
print(f'{n_components} components passed P cutoff of {p_cutoff}.')
if update:
adata.obsm[obsm] = pcs[:, :n_components]
print(f"Changing adata.obsm['X_pca'] from shape {pcs.shape} to {adata.obsm[obsm].shape}")
return n_components
| 6,993 |
def files_from_output(folder):
"""Get list of result files from output log."""
files = []
with open(path.join(folder, "OUTPUT.out")) as out_file:
for line in tqdm(out_file.readlines(), desc="Read files from output"):
if line.find("+ -o") != -1:
files.append(line.replace(
"+ -o\t", "").replace("results/", "").strip())
elif line.find("+++ TASK ->") != -1 and line.find("output=") != -1:
chunks = line.split("\t")
for chunk in chunks:
if chunk.find("output=") != -1:
files.append(chunk.replace("output=", "").strip())
return files
| 6,994 |
def act_tags_and_rootlabels():
"""
Create a CSV file named swda-actags-and-rootlabels.csv in
which each utterance utt has its own row consisting of just
utt.act_tag, utt.damsl_act_tag(), and utt.trees[0].node
restricting attention to cases in which utt has a single,
perfectly matching tree associated with it.
"""
csvwriter = csv.writer(open('swda-actags-and-rootlabels.csv', 'wt'))
csvwriter.writerow(['ActTag', 'DamslActTag', 'RootNode'])
corpus = CorpusReader('swda')
for utt in corpus.iter_utterances(display_progress=True):
if utt.tree_is_perfect_match():
csvwriter.writerow([utt.act_tag, utt.damsl_act_tag(), utt.trees[0].node])
| 6,995 |
def validate_file_name(file_name):
"""Validate file name."""
if not file_name:
raise TypeError("Invalid filename!")
if not isinstance(file_name, unicode):
raise ValueError("Invalid filename!")
if not os.path.isfile(file_name):
raise IOError("File not exist: " + file_name)
| 6,996 |
def unique_hurricanes(hurdat):
"""
Returns header info for each unique hurricanes in HURDAT2-formatted text
file hurdat.
"""
#split on returns if hurdat is not a list
if not isinstance(hurdat, list):
hurdat = hurdat.split('\n')
header_rows = [parse_header(
line, line_num
) for line_num, line in enumerate(hurdat) if parse_header(
line, line_num
)]
keys = [h.keys()[0] for h in header_rows]
values = [h.values()[0] for h in header_rows]
return {k: v for k, v in zip(keys, values)}
| 6,997 |
def find_package_data():
"""
Find package_data.
"""
theme_dirs = []
for dir, subdirs, files in os.walk(pjoin('jupyterlab', 'themes')):
slice_len = len('jupyterlab' + os.sep)
theme_dirs.append(pjoin(dir[slice_len:], '*'))
schema_dirs = []
for dir, subdirs, files in os.walk(pjoin('jupyterlab', 'schemas')):
slice_len = len('jupyterlab' + os.sep)
schema_dirs.append(pjoin(dir[slice_len:], '*'))
return {
'jupyterlab': ['build/*', '*.js', 'package.app.json',
'yarn.lock', 'yarn.app.lock', '.yarnrc'
] + theme_dirs + schema_dirs
}
| 6,998 |
def EnableBE():
""" enables BE workloads, locally
"""
if st.k8sOn:
command = 'kubectl label --overwrite nodes ' + st.node.name + ' hyperpilot.io/be-enabled=true'
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, \
stderr=subprocess.PIPE)
_, stderr = process.communicate()
if process.returncode != 0:
print "Main:ERROR: Failed to enable BE on k8s: %s" % stderr
| 6,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.