content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def as_jenks_caspall_sampled(*args, **kwargs):
"""
Generate Jenks-Caspall Sampled classes from the provided queryset. If the queryset
is empty, no class breaks are returned. For more information on the Jenks
Caspall Sampled classifier, please visit:
U{http://pysal.geodacenter.org/1.2/library/esda/mapclassify.html#pysal.esda.mapclassify.Jenks_Caspall_Sampled}
@type queryset: QuerySet
@param queryset: The query set that contains the entire distribution of
data values.
@type field: string
@param field: The name of the field on the model in the queryset that
contains the data values.
@type nclasses: integer
@param nclasses: The number of class breaks desired.
@type geofield: string
@param geofield: The name of the geometry field. Defaults to 'geom'.
@rtype: L{sld.StyledLayerDescriptor}
@returns: An SLD object that represents the class breaks.
"""
return _as_classification(Jenks_Caspall_Sampled, *args, **kwargs)
| 5,000 |
def testToID() -> None:
"""Tests the toID() function
"""
assert psclient.toID("hi") == "hi"
assert psclient.toID("HI") == "hi"
assert psclient.toID("$&@*%$HI ^4åå") == "hi4"
| 5,001 |
def positive_dice_parse(dice: str) -> str:
"""
:param dice: Formatted string, where each line is blank or matches
t: [(t, )*t]
t = (0|T|2A|SA|2S|S|A)
(note: T stands for Triumph here)
:return: Formatted string matching above, except tokens are replaced
with their corresponding values in the 4-tuple system,
(successes, advantages, triumphs, despairs)
"""
return dice.replace("0", "(0, 0, 0, 0)")\
.replace("T", "(1, 0, 1, 0)")\
.replace("2A", "(0, 2, 0, 0)")\
.replace("SA", "(1, 1, 0, 0)")\
.replace("2S", "(2, 0, 0, 0)")\
.replace("S", "(1, 0, 0, 0)")\
.replace("A", "(0, 1, 0, 0)")
| 5,002 |
def fmt_title(text):
"""Article title formatter.
Except functional words, first letter uppercase. Example:
"Google Killing Annoying Browsing Feature"
**中文文档**
文章标题的格式, 除了虚词, 每个英文单词的第一个字母大写。
"""
text = text.strip()
if len(text) == 0: # if empty string, return it
return text
else:
text = text.lower() # lower all char
# delete redundant empty space
chunks = [chunk for chunk in text.split(" ") if len(chunk) >= 1]
new_chunks = list()
for chunk in chunks:
if chunk not in _function_words:
chunk = chunk[0].upper() + chunk[1:]
new_chunks.append(chunk)
new_chunks[0] = new_chunks[0][0].upper() + new_chunks[0][1:]
return " ".join(new_chunks)
| 5,003 |
def insn_add_off_drefs(*args):
"""
insn_add_off_drefs(insn, x, type, outf) -> ea_t
"""
return _ida_ua.insn_add_off_drefs(*args)
| 5,004 |
def lambda_handler(event, context):
"""Lambda function that responds to changes in labeling job status, updating
the corresponding dynamo db tables and publishing to sns after a job is cancelled.
Parameters
----------
event: dict, required API gateway request with an input SQS arn, output SQS arn
context: object, required Lambda Context runtime methods and attributes
Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
Lambda Output Format: dict
Return doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
log.log_request_and_context(event, context)
job_status = event["status"]
job_arns = event["job_arns"]
if len(job_arns) != 1:
raise ValueError("incorrect number of job arns in event: ", job_arns)
job_arn = job_arns[0]
# We received a new status for the job_arn.
process_new_status(job_arn, job_status, context.invoked_function_arn)
return "success"
| 5,005 |
def get_answer(question: Union[dns.message.Message, bytes],
server: Union[IPv4Address, IPv6Address],
port: int = 53,
tcp: bool = False,
timeout: int = pydnstest.mock_client.SOCKET_OPERATION_TIMEOUT) -> dns.message.Message:
"""Get an DNS message with answer with specific query"""
sock = pydnstest.mock_client.setup_socket(str(server), port, tcp=tcp)
with sock:
pydnstest.mock_client.send_query(sock, question)
return pydnstest.mock_client.get_dns_message(sock, timeout=timeout)
| 5,006 |
def test_parse_input_update_rules_A():
"""
`parse_input_update_rules`
Feature A: parsing predecessor node lists and truth tables from
proper input.
"""
# Test for {no invalid Python node names, invalid Python node names}.
node_name_1 = 'A'
node_name_2 = '1A'
# Test for {lowercase operators, non-lowercase operators}.
not_txt_1 = 'nOt'
not_txt_2 = 'not'
# Test for {no constant '0', constant '0'}.
zero_expression_txt_1 = ''
zero_expression_txt_2 = 'or 0'
# Test for {no constant '1', constant '1'}.
one_expression_txt_1 = ''
one_expression_txt_2 = 'and 1'
expected_predecessor_nodes_lists = [[0]]
expected_truth_tables = [{(False,): True, (True,): False}]
for node_name, not_txt, zero_expression_txt, one_expression_txt in product(
[node_name_1, node_name_2], [not_txt_1, not_txt_2],
[zero_expression_txt_1, zero_expression_txt_2],
[one_expression_txt_1, one_expression_txt_2]):
node_names = [node_name]
update_rules_dict = {node_name: ' '.join(
[not_txt, node_name, zero_expression_txt, one_expression_txt])}
predecessor_nodes_lists, truth_tables = parse_input_update_rules(
update_rules_dict, node_names, {'section': 'update rules'})
test_description = generate_test_description(
locals(), 'node_name', 'not_txt', 'zero_expression_txt', 'one_expression_txt')
assert expected_predecessor_nodes_lists == predecessor_nodes_lists, test_description
assert expected_truth_tables == truth_tables, test_description
| 5,007 |
def intersectionPoint(line1, line2):
"""
Determining intersection point b/w two lines of the form r = xcos(R) + ysin(R)
"""
y = (line2[0][0]*np.cos(line1[0][1]) - line1[0][0]*np.cos(line2[0][1]))/(np.sin(line2[0][1])*np.cos(line1[0][1]) - np.sin(line1[0][1])*np.cos(line2[0][1]))
x = (line1[0][0] - y*np.sin(line1[0][1]))/np.cos(line1[0][1])
return [x,y]
| 5,008 |
def iter_sequences_bam(bamfile):
"""Iterate over sequences in a BAM file. Only outputs the sequence, useful
for kmerizing."""
bam = pysam.AlignmentFile(bamfile, check_header=False, check_sq=False)
seq_iter = iter(bam.fetch(until_eof=True))
seq_iter = filter(lambda r: not r.is_qcfail, seq_iter)
yield from (seq.seq.encode('utf-8') for seq in seq_iter)
bam.close()
| 5,009 |
def test_datadir_rootdir(tmp_path: Path):
"""
Tests datadir/rootdir discovery, for
a datadir.txt in the same directory and in
a parent directory.
"""
(tmp_path / 'root').mkdir()
(tmp_path / 'root' / 'inner').mkdir()
(tmp_path / 'data').mkdir()
with (tmp_path / 'root' / 'datadir.txt').open('w') as datadir_txt:
datadir_txt.write(str(tmp_path / 'data'))
# Test same-directory datadir.txt lookup
os.chdir(tmp_path / 'root')
reload(rushd.io)
assert rushd.datadir == (tmp_path / 'data')
assert rushd.rootdir == (tmp_path / 'root')
# Test parent-directory datadir.txt lookup
os.chdir(tmp_path / 'root' / 'inner')
reload(rushd.io)
assert rushd.datadir == (tmp_path / 'data')
assert rushd.rootdir == (tmp_path / 'root')
| 5,010 |
def _remove_keywords(d):
"""
copy the dict, filter_keywords
Parameters
----------
d : dict
"""
return { k:v for k, v in iteritems(d) if k not in RESERVED }
| 5,011 |
def build_trib_exp(trib_identifier, trib_key_field):
"""Establishes a SQL query expresion associating a given tributary id"""
return '"{0}"'.format(trib_key_field) + " LIKE '%{0}%'".format(trib_identifier)
| 5,012 |
def train_epoch(loader, vae, optimizer, device, epoch_idx, log_interval,
loss_weights, stats_logger, clip_gradients=None):
"""Train VAE for an epoch"""
vae.train()
train_losses = {}
train_total_loss = 0
for batch_idx, data in enumerate(loader):
data = data.to(device).float()
target = data
optimizer.zero_grad()
decoder_output, z, mu, logvar = vae(data)
losses = vae.loss(decoder_output, target, z, mu, logvar)
total_loss = sum(loss_weights.get(loss_name, 1) * loss
for loss_name, loss in losses.items()
if '_unweighted' not in loss_name)
total_loss.backward()
if clip_gradients is not None:
torch.nn.utils.clip_grad_value_(vae.parameters(), clip_gradients)
optimizer.step()
train_total_loss += total_loss.item() * len(data)
for name, loss in losses.items():
train_loss = train_losses.setdefault(name, 0)
train_losses[name] = train_loss + loss.item() * len(data)
if batch_idx % log_interval == 0:
s = ('Train Epoch: {} [{}/{} ({:.0f}%)]\t'
.format(epoch_idx,
batch_idx * len(data),
len(loader.dataset),
100. * batch_idx / len(loader)))
s += ', '.join('Loss {}: {:.7f}'.format(name, loss.item())
for name, loss in losses.items())
print(s)
stats = {name: loss / len(loader.dataset)
for name, loss in train_losses.items()}
stats['total_loss'] = train_total_loss / len(loader.dataset)
s = ('====> Epoch: {} Avg. total loss: {:.7f}, '
.format(epoch_idx, stats['total_loss']))
s += ', '.join('{} loss: {:.7f}'.format(name, loss)
for name, loss in stats.items() if name != 'total_loss')
print(s)
# Add weighted losses for logging
for name, loss in train_losses.items():
weight = loss_weights.get(name, 1)
stats['weighted_' + name] = weight * loss / len(loader.dataset)
return stats
| 5,013 |
def test_environment():
"""Creates a test environment to check if everything is ok"""
game, actions = create_environment(visible=True)
game.new_episode()
while not game.is_episode_finished():
action = random.choice(actions)
game.make_action(action)
print("Total reward:", game.get_total_reward())
| 5,014 |
def phased_multi_axes(times, data, std, ephemeris, thin=1,
colours='midnightblue', ylim_shrink=0.8,
subplot_kw=None, gridspec_kw=None, **kws):
"""
Parameters
----------
times
data
std
ephemeris
thin
colours
subplot_kw
gridspec_kw
Returns
-------
"""
from mpl_toolkits.axes_grid1.parasite_axes import SubplotHost
# sharex=True, # not sharing x since it shares
# all the ticks which is NOT desired here.
# instead set range for all
# NOTE: could try:
# for tck in ax.xaxis.get_major_ticks():
# tck.label1.set_visible(True)
n = len(times)
fig, axes = plt.subplots(n, 1,
sharey=True,
subplot_kw=subplot_kw,
gridspec_kw=gridspec_kw
)
# hack to get dual axes on topmost
pos = axes[0].get_position()
axes[0].remove()
ax = fig.axes[0] = axes[0] = SubplotHost(fig, n, 1, 1, **subplot_kw)
axp = make_twin(ax, 45, ephemeris.P)
fig.add_subplot(ax)
ax.set_position(pos)
# get colours
if not isinstance(colours, (list, tuple, np.ndarray)):
colours = [colours] * n
# plot options
opts = dict(fmt='o', ms=1, alpha=0.75, clip_on=False)
opts.update(**kws)
# do plotting
s = np.s_[::thin]
xlim = [np.inf, -np.inf]
ylim = [np.inf, -np.inf]
for i, (ax, t, y, u) in enumerate(zip(axes, times, data, std)):
first = (i == 0)
last = (i == n - 1)
#
phase = ephemeris.phase(t)
phase -= max(np.floor(phase[0]) + 1, 0)
if np.all(phase < 0):
phase += 1
ebc = ax.errorbar(phase[s], y[s], u if u is None else u[s],
color=colours[i], **opts)
xlim = [min(xlim[0], phase[0]),
max(xlim[1], phase[-1])]
ylim = [min(ylim[0], y.min()),
max(ylim[1], y.max())]
# ticks
ax.tick_params('y', which='minor', length=2.5, left=True, right=True)
ax.tick_params('y', which='major', length=5, left=True, right=True)
ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())
if last:
ax.tick_params('x', which='minor', length=2.5, bottom=(not first),
top=(not last))
ax.tick_params('x', which='major', length=5, bottom=(not first),
top=(not last))
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
else:
ax.tick_params('x', length=0)
# remove top & bottom spines
if not first:
ax.spines['top'].set_visible(False)
if not last:
ax.spines['bottom'].set_visible(False)
ax.xaxis.set_ticklabels([])
ax.tick_params(labelright=True, labelleft=True)
ax.grid(True)
# axes limits
stretch = np.ptp(xlim) * 0.025
xlim = np.add(xlim, [-stretch, stretch])
ylim[1] *= ylim_shrink
for ax in axes:
ax.set(xlim=xlim, ylim=ylim)
# axes[0].set_ylim(-0.15, 1.65)
# x label
axes_label_font_spec = dict(weight='bold', size=14)
ax.set_xlabel('Orbital Phase', fontdict=axes_label_font_spec)
# y label
y_middle = 0.5 # (fig.subplotpars.top - fig.subplotpars.bottom) / 2
for x, va in zip((0.01, 1), ('top', 'bottom')):
fig.text(x, y_middle, 'Relative Flux', axes_label_font_spec,
rotation=90, rotation_mode='anchor',
ha='center', va=va)
# top ticks
# axp.xaxis.set_ticks(np.r_[-2.5:3.5:0.5])
axp.set_xlabel('Time (hours)', fontdict=dict(weight='bold'))
axp.tick_params('x', which='minor', length=2.5, bottom=False,
top=True)
return fig
| 5,015 |
def get_delta(K):
"""This function returns the delta matrix needed calculting Pj = delta*S + (1-delta)*(1-S)
Args:
inputs:
K: Integers below 2^K will be considered
outputs:
delta: Matrix containing binary codes of numbers (1, 2^K) each one arranged row-wise. shape [2^K x K]
one_minus_delta: Matrix containing complement of binary codes of numbers (1, 2^K) each one arranged row-wise. shape [2^K x K]
"""
delta = np.arange(1, 2 ** K)[:, np.newaxis] >> np.arange(K)[::-1] & 1
# all_ones = np.array(
# [list(np.binary_repr(2 ** int(np.ceil(np.log2(1 + x))) - 1, K)) for x in
# range(1, 2 ** K)], dtype=int)
all_ones = np.array([[1 for _ in range(K)] for _ in range(2**K-1)])
one_minus_delta = all_ones - delta
return delta, one_minus_delta
| 5,016 |
def string_split_readable(inp, length):
"""
Convenience function to chunk a string into parts of a certain length,
whilst being wary of spaces.
This means that chunks will only be split on spaces, which means some
chunks will be shorter, but it also means that the resulting list will
only contain readable strings.
ValueError is thrown if there's a word that's longer than the max chunk
size.
:param inp: The string to be split
:param length: Maximum length of the chunks to return
:return: List containing the split chunks
"""
done = []
current = ""
for word in inp.split():
if len(current) == length:
done.append(current)
current = ""
if len(word) > length:
raise ValueError(_("Word %s is longer than %s characters") %
(word, length))
else:
if len(current + word) > length:
done.append(current)
current = ""
current += word
if len(current) <= (length - 1):
current += " "
if len(current):
done.append(current)
return done
| 5,017 |
def convertFormat(sequence):
"""convert a genbank file to a fasta format file"""
for seq_record in SeqIO.parse(sequence, "genbank"):
count = SeqIO.convert(sequence, "genbank", "%s.fasta" %seq_record.id, "fasta")
print("Converted %i records" % count)
| 5,018 |
def get_similarity_transform_matrix(
from_pts: torch.Tensor, to_pts: torch.Tensor) -> torch.Tensor:
"""
Args:
from_pts, to_pts: b x n x 2
Returns:
torch.Tensor: b x 3 x 3
"""
mfrom = from_pts.mean(dim=1, keepdim=True) # b x 1 x 2
mto = to_pts.mean(dim=1, keepdim=True) # b x 1 x 2
a1 = (from_pts - mfrom).square().sum([1, 2], keepdim=False) # b
c1 = ((to_pts - mto) * (from_pts - mfrom)).sum([1, 2], keepdim=False) # b
to_delta = to_pts - mto
from_delta = from_pts - mfrom
c2 = (to_delta[:, :, 0] * from_delta[:, :, 1] - to_delta[:,
:, 1] * from_delta[:, :, 0]).sum([1], keepdim=False) # b
a = c1 / a1
b = c2 / a1
dx = mto[:, 0, 0] - a * mfrom[:, 0, 0] - b * mfrom[:, 0, 1] # b
dy = mto[:, 0, 1] + b * mfrom[:, 0, 0] - a * mfrom[:, 0, 1] # b
ones_pl = torch.ones_like(a1)
zeros_pl = torch.zeros_like(a1)
return torch.stack([
a, b, dx,
-b, a, dy,
zeros_pl, zeros_pl, ones_pl,
], dim=-1).reshape(-1, 3, 3)
| 5,019 |
def get_current_daily_puzzle(**kwargs) -> ChessDotComResponse:
"""
:returns: ``ChessDotComResponse`` object containing
information about the daily puzzle found in www.chess.com.
"""
return Resource(
uri = "/puzzle",
top_level_attr = "puzzle",
**kwargs
)
| 5,020 |
def logger_error(message: str) -> None:
"""Adds an error message to the log file. This is used for reporting errors in
the program that occur that do not prevent the program from working."""
# Sets the config for the logger.
logging.basicConfig(filename='../pysys.log', level=logging.DEBUG, format=FORMAT,
datefmt='%m/%d/%Y %I:%M:%S %p')
logging.error(message)
| 5,021 |
def stroke_negative():
"""
render template if user is predicted negative for stroke
"""
return render_template("negative.html")
| 5,022 |
def default_data(sim_type):
"""New simulation base data
Args:
sim_type (str): simulation type
Returns:
dict: simulation data
"""
import sirepo.sim_data
return open_json_file(
sim_type,
path=sirepo.sim_data.get_class(sim_type).resource_path(f'default-data{sirepo.const.JSON_SUFFIX}')
)
| 5,023 |
def get_instance_name_to_id_map(instance_info):
"""
generate instance_name to instance_id map.
Every instance without a name will be given a key 'unknownx', where x is an incrementing number of instances without a key.
"""
instance_name_to_id = {}
unknown_instance_count = 0
for instance_id in instance_info:
instance = instance_info[instance_id]
instance_name = "unnamed" + str(unknown_instance_count)
if "Tags" in instance:
for tag in instance["Tags"]:
if tag["Key"] == "Name":
instance_name = tag["Value"]
if instance_name == "unnamed" + str(unknown_instance_count):
unknown_instance_count = unknown_instance_count + 1
instance_name_to_id[instance_name] = instance["InstanceId"]
return instance_name_to_id
| 5,024 |
def log_get_stdio_record(log):
"""
Returns a darshan log record for STDIO.
Args:
log: handle returned by darshan.open
Returns:
dict: log record
"""
return log_get_generic_record(log, "STDIO", "struct darshan_stdio_file **")
| 5,025 |
def gits_set_profile(args):
"""
Function that prints hello message
to user console
"""
# print(args.email)
# print("Hello from GITS Commandline Tools-Profile")
try:
# check regex
check_val = check(args.email)
# print(check_val)
if check_val:
process = subprocess.Popen(["git", "config", "--global",
"--unset", "user.email"],
stdout=PIPE,
stderr=PIPE)
stdout, stderr = process.communicate()
process1 = subprocess.Popen(["git", "config", "--global",
"--unset", "user.name"],
stdout=PIPE,
stderr=PIPE)
stdout, stderr = process1.communicate()
process2 = subprocess.Popen(["git", "config", "--global",
"user.name", args.name],
stdout=PIPE,
stderr=PIPE)
stdout, stderr = process2.communicate()
process3 = subprocess.Popen(["git", "config", "--global",
"user.email", args.email],
stdout=PIPE,
stderr=PIPE)
stdout, stderr = process3.communicate()
profile_verify_name_command = list()
profile_verify_name_command.append("git")
profile_verify_name_command.append("config")
profile_verify_name_command.append("--list")
profile_verify_name = list()
profile_verify_name.append("grep")
profile_verify_name.append('user.name')
process4 = subprocess.Popen(profile_verify_name_command,
stdout=PIPE,
stderr=PIPE)
process41 = subprocess.Popen(profile_verify_name,
stdin=process4.stdout,
stdout=PIPE,
stderr=PIPE)
stdout, stderr = process41.communicate()
print("Setting name and email..\n")
print(stdout.decode('utf-8'))
profile_verify_email_command = list()
profile_verify_email_command.append("git")
profile_verify_email_command.append("config")
profile_verify_email_command.append("--list")
profile_verify_email = list()
profile_verify_email.append("grep")
profile_verify_email.append("user.email")
process5 = subprocess.Popen(profile_verify_email_command,
stdout=PIPE,
stderr=PIPE)
process51 = subprocess.Popen(profile_verify_email,
stdin=process5.stdout,
stdout=PIPE,
stderr=PIPE)
stdout, stderr = process51.communicate()
print(stdout.decode('utf-8'))
else:
print("Enter a valid email id")
except Exception as e:
print("ERROR: gits profile command caught an exception")
print("ERROR: {}".format(str(e)))
return False
return True
| 5,026 |
def generate_spiral2d(nspiral=1000,
ntotal=500,
nsample=100,
start=0.,
stop=1, # approximately equal to 6pi
noise_std=.1,
a=0.,
b=1.,
savefig=True):
"""Parametric formula for 2d spiral is `r = a + b * theta`.
Args:
nspiral: number of spirals, i.e. batch dimension
ntotal: total number of datapoints per spiral
nsample: number of sampled datapoints for model fitting per spiral
start: spiral starting theta value
stop: spiral ending theta value
noise_std: observation noise standard deviation
a, b: parameters of the Archimedean spiral
savefig: plot the ground truth for sanity check
Returns:
Tuple where first element is true trajectory of size (nspiral, ntotal, 2),
second element is noisy observations of size (nspiral, nsample, 2),
third element is timestamps of size (ntotal,),
and fourth element is timestamps of size (nsample,)
"""
# add 1 all timestamps to avoid division by 0
orig_ts = np.linspace(start, stop, num=ntotal)
samp_ts = orig_ts[:nsample]
# generate clock-wise and counter clock-wise spirals in observation space
# with two sets of time-invariant latent dynamics
zs_cw = stop + 1. - orig_ts
rs_cw = a + b * 50. / zs_cw
xs, ys = rs_cw * np.cos(zs_cw) - 5., rs_cw * np.sin(zs_cw)
orig_traj_cw = np.stack((xs, ys), axis=1)
zs_cc = orig_ts
rw_cc = a + b * zs_cc
xs, ys = rw_cc * np.cos(zs_cc) + 5., rw_cc * np.sin(zs_cc)
orig_traj_cc = np.stack((xs, ys), axis=1)
if savefig:
plt.figure()
plt.plot(orig_traj_cw[:, 0], orig_traj_cw[:, 1], label='clock')
plt.plot(orig_traj_cc[:, 0], orig_traj_cc[:, 1], label='counter clock')
plt.legend()
plt.savefig('./ground_truth.png', dpi=500)
print('Saved ground truth spiral at {}'.format('./ground_truth.png'))
# sample starting timestamps
orig_trajs = []
samp_trajs = []
for _ in range(nspiral):
# don't sample t0 very near the start or the end
t0_idx = npr.multinomial(
1, [1. / (ntotal - 2. * nsample)] * (ntotal - int(2 * nsample)))
t0_idx = np.argmax(t0_idx) + nsample
cc = bool(npr.rand() > .5) # uniformly select rotation
orig_traj = orig_traj_cc if cc else orig_traj_cw
orig_trajs.append(orig_traj)
samp_traj = orig_traj[t0_idx:t0_idx + nsample, :].copy()
samp_traj += npr.randn(*samp_traj.shape) * noise_std
samp_trajs.append(samp_traj)
# batching for sample trajectories is good for RNN; batching for original
# trajectories only for ease of indexing
orig_trajs = np.stack(orig_trajs, axis=0)
samp_trajs = np.stack(samp_trajs, axis=0)
return orig_trajs, samp_trajs, orig_ts, samp_ts
| 5,027 |
def AUC_confidence(auc_value, num, interval=0.95):
"""
Calculate upper and lower 95% CI for area under the roc curve
Inspired by https://stats.stackexchange.com/questions/18887
:param r: spearman's rho
:param num: number of data points
:param interval: confidence interval (0-1.0)
:return: lower bound, upper bound
"""
stderr = 1.0 / math.sqrt(num - 3)
z_score = norm.ppf(interval)
delta = z_score * stderr
lower = math.tanh(math.atanh(auc_value) - delta)
upper = math.tanh(math.atanh(auc_value) + delta)
return lower, upper
| 5,028 |
def add_data(data):
""" This adds data """
item = data
db.insert(data)
return 'chain updated'
| 5,029 |
def detect_face(img_path, cc_path='../files/haarcascade_frontalface_default.xml'):
"""
Detect the face from the image, return colored face
"""
cc = cv2.CascadeClassifier(os.path.abspath(cc_path))
img_path = os.path.abspath(img_path)
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = cc.detectMultiScale(gray, 1.3, 5)
roi_color = None
if len(faces) == 0:
logging.exception(img_path + ': No face found')
else:
x,y,w,h = faces[0]
_h, _w = compute_size(h, w)
roi_color = img[y - _h:y + h + _h, x - _w:x + w + _w]
return roi_color
| 5,030 |
def build_node_to_name_map(head):
"""
:type head: DecisionGraphNode
:return:
"""
node_to_name_map = {}
name_to_next_idx_map = Counter()
def add_node_name(node):
assert node not in node_to_name_map
node_type_name = node.get_node_type_name()
idx = name_to_next_idx_map[node_type_name]
name_to_next_idx_map[node_type_name] += 1
name = "{}_{}".format(node_type_name, idx)
node_to_name_map[node] = name
bfs(head, add_node_name)
return node_to_name_map
| 5,031 |
async def test_first_does_not_kill(dut):
"""Test that `First` does not kill coroutines that did not finish first"""
ran = False
@cocotb.coroutine # decorating `async def` is required to use `First`
async def coro():
nonlocal ran
await Timer(2, units="ns")
ran = True
# Coroutine runs for 2ns, so we expect the timer to fire first
timer = Timer(1, units="ns")
t = await First(timer, coro())
assert t is timer
assert not ran
# the background routine is still running, but should finish after 1ns
await Timer(2, units="ns")
assert ran
| 5,032 |
def ReduceFloat(f, op=None):
"""Reduce a single float value over MPI"""
if not hasMPI:
raise Exception("mpi4py required for Reduce operations: not found")
if op is None:
op = MPI.SUM
fa = np.array([f]) # can only reduce over numpy arrays
MPI.COMM_WORLD.Allreduce(MPI.IN_PLACE,
fa,
op=MPI.SUM)
return fa[0]
| 5,033 |
def GetVerificationStepsKeyName(name):
"""Returns a str used to uniquely identify a verification steps."""
return 'VerificationSteps_' + name
| 5,034 |
def annotate_ms1_peaks(ms1_data, ms2_data, analyte_list):
"""Interpolate MS1 intensities for the time points for the MS2 scans for the largest mass peak in each analyte.
Use relative changes in intensity between interpolated MS1 data and real MS2 data to find MS2 peaks that go with
each analyte. """
ms2_data["analyte_id"] = None
# Extract list of unique scan numbers and corresponding retention times
ms2_scans = ms2_data[["scan", "rt"]].drop_duplicates().sort_values(by=["scan"])
for analyte in analyte_list:
max_peak_data = ms1_data[ms1_data["peak_id"] == analyte.max_peak_id][["scan", "rt", "intensity"]].sort_values(by=["scan"])
interpolated_range = ms2_scans[ms2_scans["scan"].between(max_peak_data["scan"].min(), max_peak_data["scan"].max())].copy()
if len(interpolated_range.index) >= config.matched_scan_minimum:
if len(max_peak_data.index) > 3:
tck = interpolate.splrep(max_peak_data["rt"].to_numpy(), max_peak_data["intensity"].to_numpy(), s=0)
elif len(max_peak_data.index) == 3:
tck = interpolate.splrep(max_peak_data["rt"].to_numpy(), max_peak_data["intensity"].to_numpy(), s=0, k=2)
else:
continue
interpolated_intensities = interpolate.splev(interpolated_range["rt"].to_numpy(), tck, der=0)
interpolated_range["intensity"] = interpolated_intensities
ms2_data = ms2_to_analyte_vectorized(ms2_data,
interpolated_range[["scan", "intensity"]],
analyte.analyte_id)
else:
continue
return ms2_data
| 5,035 |
def split_tblastn_hits_into_separate_genes(query_res_obj, max_gap):
"""Take a SearchIO QueryResult object and return a new object with hits
split into groups of HSPs that represent distinct genes. This is important,
because there may be multiple paralogous genes present in a single
nucleotide subject sequence (such as a chromosome or scaffold).
"""
# Print message.
print('\n\tSearch program was tblastn.\n\tChecking number of distinct genes represented by HSPs.\n')
# Copy the query result object.
#query_res_obj2 = copy.deepcopy(query_res_obj)
# Compile a list of all HSP clusters.
# Display a simple visualization of HSP location.
# List hits and HSPs in original object.
num_dots = 150
all_hsp_clusters = []
hit_num = 0
for hit in query_res_obj:
hit_num += 1
print('\tQuery: ' + hit.query_id)
print('\tHit '+ str(hit_num) + ': ' + hit.id + ' ' + hit.description)
print('\t' + 'HSP positions in subject sequence (1 dot = ' +\
str(int(hit.seq_len / num_dots)) + ' bp):')
print('\t ' + '0' + ' ' * (num_dots -2) + str(hit.seq_len))
print('\t ' + 'v' + ' ' * (num_dots -2) + 'v')
print('\t ' + '.' * num_dots + ' ' + 'Query range:')
# Make a list of hsps.
hsps = []
for hsp in hit:
hsps.append(hsp)
# Sort the HSPs.
hsps2 = sorted(hsps, key=lambda x: x.hit_start)
# Display the HSPs.
for hsp in hsps2:
string = '\t'
sign = None
if hsp.hit_frame > 0:
sign = '+'
elif hsp.hit_frame < 0:
sign = '-'
prepend_dots = '.' * int((hsp.hit_start*num_dots)/(hit.seq_len))
string = string + sign + prepend_dots
span_string = str(hsp.hit_start) + ', ' + str(hsp.hit_end)
string = string + span_string
string = string + '.' * max([0, num_dots - len(prepend_dots) - len(span_string)])
string = string + ' ' + str(hsp.query_range) #+ ' ' + str(hsp.evalue)
print(string)
#print(hsp.hit.seq)
print('\n')
# Generate an expanded list of hit objects.
# Recursively find clusters of HSPs that likely represent different
# genes, and return as a list of lists.
hsp_clusters = get_hsp_clusters(hit, max_gap)
all_hsp_clusters = all_hsp_clusters + hsp_clusters
# Display HSPs in each cluster.
cluster_num = 0
for clusterplus in hsp_clusters:
cluster = clusterplus[0]
cluster_num += 1
# Call function for printing visualization.
print_cluster(clusterplus, hit_num, cluster_num, num_dots) #***
## ***Redundant?:
## Check that the clusters do not overlap with each other on the subject
## sequence.
#for cluster1 in hsp_clusters:
# for cluster2 in hsp_clusters:
# if cluster1[0] != cluster2[0]:
# if clusters_overlap(cluster1[0], cluster2[0]):
# # Visualize overlapping clusters (for troubleshooting).
# startend = get_cluster_range(cluster1[0] + cluster2[0])
# print('Overlapping clusters:')
# print_cluster(cluster1,\
# str(get_cluster_range(cluster1[0])),\
# cluster_num, num_dots, startend)
# print_cluster(cluster2,\
# str(get_cluster_range(cluster2[0])),\
# cluster_num, num_dots, startend)
# ## Assert no overlap.
# #assert not clusters_overlap(cluster1[0], cluster2[0]),\
# #"""Clusters overlap: %s and %s""" %\
# #(cluster1[0][0].hit_id + str(get_cluster_range(cluster1[0])),\
# # cluster2[0][0].hit_id + str(get_cluster_range(cluster2[0])))
## Check that the clusters do not overlap with each other on the subject
## sequence.
#for cluster1 in all_hsp_clusters:
# for cluster2 in all_hsp_clusters:
# if cluster1[0] != cluster2[0]:
# assert not clusters_overlap(cluster1[0], cluster2[0]),\
# """Clusters overlap: %s and %s""" %\
# (cluster1[0][0].hit_id + str(get_cluster_range(cluster1[0])),\
# cluster2[0][0].hit_id + str(get_cluster_range(cluster2[0])))
# Sort HSPs according to E-value (the ranking may change because when
# TBLASTN HSPs for the same scaffold sequence are split into those
# representing potentially separate genes, then some may have higher
# E-values).
all_hsp_clusters.sort(key=lambda x: min([y.evalue for y in x[0]]))
# Return the list of SearchIO HSP (not Hit) object clusters/lists.
return all_hsp_clusters
| 5,036 |
def to_entity_values(entity_group):
""" Parse current entity group content into a CreateEntity[]
"""
values = []
for _, row in entity_group.iterrows():
value = row[ENTITY_VALUE_COLUMN]
if not value: # Handle reserved entities
continue
synonyms = []
patterns = []
# Drop first two item and iterate the rest items (synonym or pattern)
for _, val in row.drop([ENTITY_COLUMN, ENTITY_VALUE_COLUMN]) \
.iteritems():
if not pd.isnull(val):
if val.startswith('/'): # is pattern?
patterns.append(val[:-1][1:])
else:
synonyms.append(val)
# Construct CreateValue[]
if len(patterns) != 0:
values.append({'value': value, 'patterns': patterns,
'type': 'patterns'})
else:
values.append({'value': value, 'synonyms': synonyms,
'type': 'synonyms'})
return values
| 5,037 |
def read_file(file_path):
"""
Read the contents of a file using utf-8 encoding, or return an empty string
if it does not exist
:param file_path: str: path to the file to read
:return: str: contents of file
"""
try:
with codecs.open(file_path, 'r', encoding='utf-8', errors='xmlcharrefreplace') as infile:
return infile.read()
except OSError as e:
logging.exception('Error opening {}'.format(file_path))
return ''
| 5,038 |
def scrub(data):
"""
Reads a CSV file and organizes it neatly into a DataFrame.
Arguments:
data {.csv} -- the csv file to be read and scrubbed
Returns:
DataFrame -- the logarithmic returns of selected ticker symbols
"""
df = pd.read_csv(data, header=0, index_col=0, parse_dates=True)
df.dropna(axis=1, inplace=True)
logret = np.log(df).diff().iloc[1:]
return logret
| 5,039 |
def parse_args():
"""read arguments from command line
"""
parser = argparse.ArgumentParser(
description='preprocess.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset',
type=str,
nargs='?',
default='data/datasets/solid-state_dataset_2019-09-27_upd.json',
help="Path to dataset to use")
parser.add_argument('--elem-dict',
type=str,
nargs='?',
default='data/elem_dict',
help="Path to element to index dictionary without extension")
parser.add_argument('--action-dict',
type=str,
nargs='?',
default='data/action_dict',
help="Path to element to index dictionary without extension")
parser.add_argument('--magpie-embed',
type=str,
nargs='?',
default='data/magpie_embed',
help="Path to magpie embeddings dictionary without extension")
parser.add_argument('--clean-set',
type=str,
nargs='?',
default='data/dataset',
help="Path to full clean dataset to use without extension")
parser.add_argument('--train-set',
type=str,
nargs='?',
default='data/train',
help="Path to train dataset to use without extension")
parser.add_argument('--test-set',
type=str,
nargs='?',
default='data/test',
help="Path to test dataset to use without extension")
parser.add_argument('--val-set',
type=str,
nargs='?',
default='data/val',
help="Path to val dataset to use without extension")
parser.add_argument('--test-size',
type=float,
nargs='?',
default=0.2,
help="size of clean dataset for testing")
parser.add_argument('--val-size',
type=float,
nargs='?',
default=0,
help="size of clean dataset for validation")
parser.add_argument('--seed',
type=int,
nargs='?',
default=0,
help="Random seed for splitting data")
parser.add_argument('--ps',
type=str,
nargs='?',
default='',
help="postscript on path for save files")
parser.add_argument('--max-prec',
type=int,
nargs='?',
default=10,
help='Max number of precursors per reaction.')
parser.add_argument('--min-prec',
type=int,
nargs='?',
default=2,
help='Min number of precursors per reaction. Default 2')
parser.add_argument('--augment',
action="store_true",
help="augment data with precursor rearrangements")
parser.add_argument('--split-prec-amts',
action="store_true",
help="split out data for the baseline model")
parser.add_argument('--num-elem',
type=int,
metavar='N',
nargs='?',
default=-1,
help='Take N most common elements only. Default: -1 (all)')
args = parser.parse_args()
return args
| 5,040 |
def _get_lto_level():
"""
Returns the user-specific LTO parallelism level.
"""
default = 32 if config.get_lto_type() else 0
return read_int("cxx", "lto", default)
| 5,041 |
def slice_label_rows(labeldf: pd.DataFrame, label: str, sample_list: List[str],
row_mask: NDArray[Any]) -> NDArray[Any]:
"""
Selects rows from the Pandas DataFrame of labels corresponding to the samples in a particular sample_block.
Args:
labeldf : Pandas DataFrame containing the labels
label : Header for the particular label to slice. Can be 'all' if all labels are desired.
sample_list : List of sample ids corresponding to the sample_block to be sliced out.
row_mask : 1D numpy array of size n_rows containing booleans used to mask samples from the rows sliced from
labeldf.
Returns:
Matrix of [number of samples in sample_block - number of samples masked] x [number of labels to slice]
"""
if row_mask.size == 0:
row_mask = np.full(len(sample_list), True)
if label == 'all':
return labeldf.loc[sample_list, :].to_numpy()[row_mask, :]
else:
return labeldf[label].loc[sample_list].to_numpy().reshape(-1, 1)[row_mask, :]
| 5,042 |
def rollback(var_list, ckpt_folder, ckpt_file=None):
""" This function provides a shortcut for reloading a model and calculating a list of variables
:param var_list:
:param ckpt_folder:
:param ckpt_file: in case an older ckpt file is needed, provide it here, e.g. 'cifar.ckpt-6284'
:return:
"""
global_step = global_step_config()
# register a session
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False))
# initialization
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
# load the training graph
saver = tf.compat.v1.train.Saver(max_to_keep=2)
ckpt = get_ckpt(ckpt_folder, ckpt_file=ckpt_file)
if ckpt is None:
raise FileNotFoundError('No ckpt Model found at {}.'.format(ckpt_folder))
saver.restore(sess, ckpt.model_checkpoint_path)
FLAGS.print('Model reloaded.')
# run the session
coord = tf.train.Coordinator()
# threads = tf.train.start_queue_runners(sess=sess, coord=coord)
var_value, global_step_value = sess.run([var_list, global_step])
coord.request_stop()
# coord.join(threads)
sess.close()
FLAGS.print('Variable calculated.')
return var_value, global_step_value
| 5,043 |
def read_interaction_file_mat(file):
"""
Renvoie la matrice d'adjacence associée au graph d'intéraction entre protéines ainsi que la liste
ordonnée des sommets
:param file: tableau contenant un graphe
:type file: dataframe
:return: une matrice d'adjascence de ce graphe et une liste ordonnée des sommets
:rtype: tuple
"""
list_sommets = pd.concat([file.Sommet, file.Interaction])
list_sommets = sorted(list(dict.fromkeys(list_sommets)))
res_mat = numpy.zeros((len(list_sommets), len(list_sommets)), dtype=int)
res_list = read_interaction_file_list(file)
for interaction in res_list:
res_mat[list_sommets.index(interaction[0])][list_sommets.index(interaction[1])] = 1
res_mat[list_sommets.index(interaction[1])][list_sommets.index(interaction[0])] = 1
return res_mat, list_sommets
| 5,044 |
def load_figures(fig_names):
"""
Uses a list of the figure names to load them into a list
@param fig_names:
@type fig_names:
@return: A list containing all the figures
@rtype: list
"""
fig_list = []
for i, name in enumerate(fig_names):
fig_list.append(pl.load(open(f"{name}.pickle", "rb")))
return fig_list
| 5,045 |
def define_mimonet_layers(input_shape, classes, regularized=False):
"""
Use the functional API to define the model
https://keras.io/getting-started/functional-api-guide/
params: input_shape (h,w,channels)
"""
layers = { 'inputs' : None,
'down_path' : {},
'bottle_neck' : None,
'up_path' : {},
'outputs' : None }
layers['inputs'] = [Input(input_shape[0],name='in1'),Input(input_shape[1],name='in2'),Input(input_shape[2],name='in3')]
layers['down_path'][4] = cnv3x3Relu(64,regularized=regularized)(layers['inputs'][0])
layers['down_path'][4] = cnv3x3Relu(64,regularized=regularized)(layers['down_path'][4])
layers['down_path'][3] = crop_concatenate(layers['inputs'][1],
new_down_level(128,layers['down_path'][4],regularized=regularized))
layers['down_path'][2] = crop_concatenate(layers['inputs'][2],
new_down_level(256,layers['down_path'][3],regularized=regularized))
layers['down_path'][1] = new_down_level(512,layers['down_path'][2],regularized=regularized)
layers['bottle_neck'] = new_down_level(1024,layers['down_path'][1],regularized=regularized)
layers['up_path'][1] = new_up_level(512,layers['bottle_neck'],layers['down_path'][1],regularized=regularized)
layers['up_path'][2] = new_up_level(256,layers['up_path'][1],layers['down_path'][2],padding='same',regularized=regularized)
layers['up_path'][3] = new_up_level(128,layers['up_path'][2],layers['down_path'][3],padding='same',regularized=regularized)
layers['up_path'][4] = new_up_level(64,layers['up_path'][3],layers['down_path'][4],regularized=regularized)
auxla1, la1 = feature_mask(4,256,64,classes,layers['up_path'][2],'la1')
auxla2, la2 = feature_mask(2,128,64,classes,layers['up_path'][3],'la2')
auxla3 = layers['up_path'][4]
layers['outputs'] = [ la1,la2 ]
layers['outputs'] += [ Conv2D(classes, (1, 1), activation='softmax', name='la3')(auxla3) ]
l0 = crop_concatenate(auxla1, auxla2)
l0 = crop_concatenate(l0,auxla3)
l0 = cnv3x3Relu(64,regularized=regularized, padding='same')(l0)
l0 = cnv3x3Relu(32,regularized=regularized, padding='same')(l0)
layers['outputs'] += [ Conv2D(classes, (1, 1), activation='softmax', name='l0')(l0) ]
return layers
| 5,046 |
def scattering_angle( sza, vza, phi, Expand=False, Degree=False ):
"""
Function scattering_angle() calculates the scattering angle.
cos(pi-THETA) = cos(theta)cos(theta0) + sin(theta)sin(theta0)cos(phi)
Input and output are in the unit of PI
Parameters
----------
sza: solar zenith angle is radian
vza: viewing zenith angle in radian
phi: relative azimuth angle in radian
Expand: (optional) Ture/False to expand the dimension of calculated THETA
Returns
-------
THETA: scattering angle in radian
"""
# Change angle from degree to radian if needed
if Degree:
angle2rad = np.pi / 180.
sza = sza * angle2rad
vza = vza * angle2rad
phi = phi * angle2rad
# define the
m,n,l = np.size(sza),np.size(vza),np.size(phi)
if Expand:
THETA = np.zeros( (m,n,l) )
for k in range(l):
for j in range(n):
for i in range(m):
t1 = np.cos(vza[j]) * np.cos(sza[i]) \
+ np.sin(vza[j]) * np.sin(sza[i]) * np.cos(phi[k])
t2 = np.arccos(t1)
THETA[i,j,k] = np.pi - t2
else:
# Check the dimension
if (( m != n) | (m != l )):
sys.ext("scattering_angle() error #1 in util.py")
t1 = np.cos(vza) * np.cos(sza) \
+ np.sin(vza) * np.sin(sza) * np.cos(phi)
t2 = np.arccos(t1)
THETA = np.pi - t2
if Degree:
THETA = THETA * 180. / np.pi
return THETA
| 5,047 |
def _list_subclasses(cls):
"""
Recursively lists all subclasses of `cls`.
"""
subclasses = cls.__subclasses__()
for subclass in cls.__subclasses__():
subclasses += _list_subclasses(subclass)
return subclasses
| 5,048 |
def main(from_json: bool = True, filename: str = DEFAULT_ARGS['pipeline_config_save_path']):
"""
Calls the specified pipeline.
:param filename: json filename
:param from_json: whether to run pipeline from json file or not
:return: pipeline call function
"""
# Parsing arguments
parser = HfArgumentParser((ModelArguments, DatabuilderArguments, TrainingArguments, PipelineArguments))
model_args, databuilder_args, training_args, pipeline_args = parser.parse_json_file(
json_file=filename) if from_json else parser.parse_args_into_dataclasses()
# Asserting specified pipeline does exist
assert pipeline_args.pipeline in PIPELINES, \
"Unknown pipeline {}, available pipelines are {}".format(pipeline_args.pipeline, list(PIPELINES.keys()))
# Logging session informations
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
# Loading model & tokenizer
model = AutoModelForSeq2SeqLM.from_pretrained(training_args.output_dir)
tokenizer = AutoTokenizer.from_pretrained(training_args.output_dir)
# Getting specified pipeline
task_pipeline = PIPELINES[pipeline_args.pipeline]["impl"]
logger.info(f'Pipeline has been loaded and is ready for inference. ')
return task_pipeline(model=model, tokenizer=tokenizer)
| 5,049 |
def put(url, **kwargs):
"""PUT to a URL."""
return session.put(url, **kwargs)
| 5,050 |
def cod_records(mocker, cod_records_json):
"""Fixture for COD records metric instance."""
mocker.patch.object(RecordsMetric, 'collect',
new=records_collect(cod_records_json))
return metrics.records('cod_records', 'http://www.google.com')
| 5,051 |
def _validate_cluster_spec(cluster_spec, task_type, task_id):
"""Validates `cluster_spec`.
It checks:
0) None of `cluster_spec`, `task_type`, and `task_id` is `None`.
1) task type is one of "chief", "worker" or "evaluator".
2) whether there is such a task type as `task_type` in the `cluster_spec`.
3) whether there is at most one "chief" job.
4) whether there is at most one "evaluator" job.
5) whether the `task_id` is smaller than the number of tasks for that
particular `task_type`.
Args:
cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object to be validated.
task_type: string indicating the type of the task.
task_id: task_id: the id of the `task_type` in this cluster.
Throws:
ValueError: if `cluster_spec` fails any check.
"""
if cluster_spec is None or task_type is None or task_id is None:
raise ValueError(
"None of `cluster_spec`, `task_type`, and `task_id` should be `None`.")
cluster_spec = normalize_cluster_spec(cluster_spec).as_dict()
if task_type not in ("chief", "worker", "evaluator", "ps"):
raise ValueError(
"Unrecognized task_type: %r, valid task types are: \"chief\", "
"\"worker\", \"evaluator\" and \"ps\"." % task_type)
if task_type and task_type not in cluster_spec:
raise ValueError("`task_type` %r not found in cluster_spec." % task_type)
if len(cluster_spec.get("chief", [])) > 1:
raise ValueError("There must be at most one 'chief' job.")
if len(cluster_spec.get("evaluator", [])) > 1:
raise ValueError("There must be at most one 'evaluator' job.")
if task_id >= len(cluster_spec[task_type]):
raise ValueError(
"The `task_id` %d exceeds the maximum id of %s." % (task_id, task_type))
| 5,052 |
def add_computed_document_features(input_dict):
"""
TODO: Add a feature to Annotated Document Corpus.
:param adc: Annotated Document Corpus
:param feature_name: the name of new feature
:param feature_computation: "New Feature Computatation
:param feature_spec: Comma separated list of names of old features used in the 'New Feature Computataion'.
:return: new adc
"""
adc=input_dict["adc"]
compute_new_features(adc.documents,input_dict["feature_name"],input_dict["feature_computation"])
return {"adc":adc}
| 5,053 |
def get_lite_addons():
"""Load the lite addons file as a set."""
return set_from_file('validations/lite-addons.txt')
| 5,054 |
def GetApexPlayerStatus_TRN(api_key, platform, playerName):
"""
Get the status of a player on Apex Legends.
:param api_key: The API key to use.
:param platform: The platform to use.
:param playerName: The player name to use.
"""
platform = _fixplatform(platform)
if _checkplatform(platform):
url = f'https://public-api.tracker.gg/{API_VER}/apex/standard/profile/{platform}/{playerName}'
try:
res = get_request(url, {'TRN-Api-Key': api_key})
response = res[0]
if response.status_code == 200:
r = response.json()
list_legends_data = []
my_append = list_legends_data.append
for d in r['data']['segments']:
if d["type"] == "overview":
continue
else:
my_append(d)
res = ApexTrackerPy.Apexclass.TRN_PlayerStatus(
row_json=r,
elapsed_time=res[1],
platformUserId=r['data']['platformInfo']['platformUserId'],
activelegend=r['data']['metadata']['activeLegend'],
userlevel=r['data']['segments'][0]['stats']['level']['value'],
totalkill=r['data']['segments'][0]['stats']['kills']['value'],
totaldamage=r['data']['segments'][0]['stats']['damage']['value'],
totalheadshots=r['data']['segments'][0]['stats']['headshots']['value'],
CurrentRank=r['data']['segments'][0]['stats']['rankScore']['metadata']['rankName'],
CurrentRankScore=r['data']['segments'][0]['stats']['rankScore']['value'],
ArenaRankedName=r['data']['segments'][0]['stats']['arenaRankScore']['metadata']['rankName'],
ArenaRankedScore=r['data']['segments'][0]['stats']['arenaRankScore']['value'],
legends_json=list_legends_data,
)
return res
else:
raise Exception('HttpError!:The API returned status code '+str(response.status_code))
except Exception as e:
raise Exception('HttpError!:An error has occurred during the API call.\n'+str(e))
else:
raise Exception('Invalid platform!')
| 5,055 |
def subFactoryGet(fixture, **kwargs):
"""
To be used in fixture definition (or in the kwargs of the fixture constructor) to reference a other
fixture using the :meth:`.BaseFix.get` method.
:param fixture: Desired fixture
:param kwargs: *Optional:* key words to overwrite properties of this fixture
:return: Proxy object for the desired fixture including the altered properties
"""
return SubFactory(fixture, METHOD_GET, **kwargs)
| 5,056 |
def hello():
"""Return the dashboard homepage."""
return render_template('index.html')
| 5,057 |
def create_subparsers(subparsers):
"""Create subparsers for run command"""
parser = subparsers.add_parser(
'extractpipenv', help="Run notebook and check if it reproduces the same results"
)
parser.set_defaults(func=extract, command=parser)
parser.add_argument("-p", "--path", type=str, default="Pipfile.lock",
help="pipenv path")
parser.add_argument("-o", "--output", type=str, default="_julynter_requirements.txt",
help="requirements output")
| 5,058 |
def rapid_ping(client, dst_ip):
"""TODO: Docstring for ping.
:returns: TODO
"""
status = False
# run ping command with count 10 rapidly
command = 'exec cli ping ' + dst_ip + ' count 10 rapid'
stdin, stdout, stderr = client.exec_command(command, get_pty=True)
for line in iter(stdout.readline, ""):
if ("!!!!!!!!!" in line):
status = True
return status
| 5,059 |
def test_data_dir():
"""
Returns path of test datas like excel
Used for test or notebook
"""
path = Path(__file__).parent.parent / 'testdata'
return path
| 5,060 |
def ingest_sequences(input_toml, click_loguru=None):
"""Marshal protein and genome sequence information."""
options = click_loguru.get_global_options()
user_options = click_loguru.get_user_global_options()
parallel = user_options["parallel"]
input_obj = TaxonomicInputTable(Path(input_toml), write_table=False)
input_table = input_obj.input_table
logger.info(f"Output directory: {input_obj.setname}/")
set_path = Path(input_obj.setname)
arg_list = []
for unused_i, row in input_table.iterrows():
arg_list.append(
(
row["path"],
row["fasta_url"],
row["gff_url"],
)
)
bag = db.from_sequence(arg_list)
file_stats = []
if not options.quiet:
logger.info(f"Extracting FASTA/GFF info for {len(arg_list)} genomes:")
ProgressBar().register()
if parallel:
file_stats = bag.map(
read_fasta_and_gff, verbose=options.verbose
).compute()
else:
for args in arg_list:
file_stats.append(
read_fasta_and_gff(args, verbose=options.verbose)
)
del arg_list
seq_stats = pd.DataFrame.from_dict([s[0] for s in file_stats]).set_index(
"path"
)
frag_stats = pd.DataFrame.from_dict([s[1] for s in file_stats]).set_index(
"path"
)
proteomes = pd.concat(
[input_table.set_index("path"), frag_stats, seq_stats], axis=1
)
proteomes.drop(["fasta_url", "gff_url"], axis=1, inplace=True)
proteomes = sort_proteome_frame(proteomes)
if not options.quiet:
with pd.option_context(
"display.max_rows", None, "display.float_format", "{:,.2f}%".format
):
print(
proteomes.drop(
[
col
for col in proteomes.columns
if col.startswith("phy")
],
axis=1,
)
)
proteome_table_path = set_path / PROTEOMES_FILE
logger.info(
f'Edit table of proteomes at "{proteome_table_path}"'
+ " to change preferences"
)
write_tsv_or_parquet(proteomes, proteome_table_path)
idx_start = 0
for df in [s[2] for s in file_stats]:
df.index = range(idx_start, idx_start + len(df))
idx_start += len(df)
frags = pd.concat([s[2] for s in file_stats], axis=0)
frags.index.name = "idx"
fragalyzer = FragmentCharacterizer()
frags = fragalyzer.assign_frag_properties(frags)
frags_path = set_path / FRAGMENTS_FILE
if not frags_path.exists():
logger.info(
f'Edit fragment table at "{frags_path}" to rename fragments'
)
write_tsv_or_parquet(frags, frags_path)
else:
new_frags_path = set_path / ("new." + FRAGMENTS_FILE)
logger.info(f'A fragments file table already exists at "{frags_path}"')
logger.info(f'A new file has been written at "{new_frags_path}".')
logger.info("Edit and rename it to rename fragments.")
write_tsv_or_parquet(frags, new_frags_path)
| 5,061 |
def classify_audio(model, callback,
labels_file=None,
inference_overlap_ratio=0.1,
buffer_size_secs=2.0,
buffer_write_size_secs=0.1,
audio_device_index=None):
"""
Continuously classifies audio samples from the microphone, yielding results
to your own callback function.
Your callback function receives the top classification result for every
inference performed. Although the audio sample size is fixed based on the
model input size, you can adjust the rate of inference with
``inference_overlap_ratio``. A larger overlap means the model runs inference
more frequently but with larger amounts of sample data shared between
inferences, which can result in duplicate results.
Args:
model (str): Path to a ``.tflite`` file.
callback: A function that takes two arguments (in order): a string for
the classification label, and a float for the prediction score.
The function must return a boolean: True to continue running
inference, or False to stop.
labels_file (str): Path to a labels file (required only if the model
does not include metadata labels). If provided, this overrides the
labels file provided in the model metadata.
inference_overlap_ratio (float): The amount of audio that should overlap
between each sample used for inference. May be 0.0 up to (but not
including) 1.0. For example, if set to 0.5 and the model takes a
one-second sample as input, the model will run an inference every
half second, or if set to 0, then there is no overlap and
it will run once each second.
buffer_size_secs (float): The length of audio to hold in the audio
buffer.
buffer_write_size_secs (float): The length of audio to capture into the
buffer with each sampling from the microphone.
audio_device_index (int): The audio input device index to use.
"""
if not model:
raise ValueError('model must be specified')
if buffer_size_secs <= 0.0:
raise ValueError('buffer_size_secs must be positive')
if buffer_write_size_secs <= 0.0:
raise ValueError('buffer_write_size_secs must be positive')
if inference_overlap_ratio < 0.0 or \
inference_overlap_ratio >= 1.0:
raise ValueError('inference_overlap_ratio must be in [0.0 .. 1.0)')
sample_rate_hz, channels = model_audio_properties(model)
if labels_file is not None:
labels = dataset.read_label_file(labels_file)
else:
labels = utils.read_labels_from_metadata(model)
print('Say one of the following:')
for value in labels.values():
print(' %s' % value)
interpreter = tflite.Interpreter(model_path=model)
interpreter.allocate_tensors()
# Input tensor
input_details = interpreter.get_input_details()
waveform_input_index = input_details[0]['index']
_, num_audio_frames = input_details[0]['shape']
waveform = np.zeros(num_audio_frames, dtype=np.float32)
# Output tensor
output_details = interpreter.get_output_details()
scores_output_index = output_details[0]['index']
ring_buffer_size = int(buffer_size_secs * sample_rate_hz)
frames_per_buffer = int(buffer_write_size_secs * sample_rate_hz)
remove_size = int((1.0 - inference_overlap_ratio) * len(waveform))
rb = ring_buffer.ConcurrentRingBuffer(
np.zeros(ring_buffer_size, dtype=np.float32))
def stream_callback(in_data, frame_count, time_info, status):
try:
rb.write(np.frombuffer(in_data, dtype=np.float32), block=False)
except ring_buffer.Overflow:
print('WARNING: Dropping input audio buffer', file=sys.stderr)
return None, pyaudio.paContinue
with pyaudio_stream(format=pyaudio.paFloat32,
channels=channels,
rate=sample_rate_hz,
frames_per_buffer=frames_per_buffer,
stream_callback=stream_callback,
input=True,
input_device_index=audio_device_index) as stream:
keep_listening = True
while keep_listening:
rb.read(waveform, remove_size=remove_size)
interpreter.set_tensor(waveform_input_index, [waveform])
interpreter.invoke()
scores = interpreter.get_tensor(scores_output_index)
scores = np.mean(scores, axis=0)
prediction = np.argmax(scores)
keep_listening = callback(labels[prediction], scores[prediction])
| 5,062 |
def is_leap_year(year: int) -> bool:
"""Returns whether the given year is a leap year"""
if year % 100 == 0:
return year % 400 == 0
else:
return year % 4 == 0
| 5,063 |
def find_manifests(pkgnames, verbose=True):
""" return a dictionary keyed by pkgname with the found manifest's full path """
(abspath, dirname) = (os.path.abspath, os.path.dirname)
(ret,stdout,stderr) = spawn("git rev-parse --show-toplevel")
root = stdout[0] if ret == 0 else os.getcwd()
jsonfiles = all_json_files(root)
def ensure_json(pkgname):
return pkgname if pkgname.endswith(".json") else "{}.json".format(pkgname)
def match(pkg, jsonfile):
return jsonfile.endswith(ensure_json(pkg)) and is_manifest(jsonfile, verbose)
return {p:j for p in pkgnames for j in jsonfiles if match(p,j)}
| 5,064 |
def prepare_nginx_certs(cert_key_path, cert_path):
"""
Prepare the certs file with proper ownership
1. Remove nginx cert files in secret dir
2. Copy cert files on host filesystem to secret dir
3. Change the permission to 644 and ownership to 10000:10000
"""
host_ngx_cert_key_path = Path(os.path.join(host_root_dir, cert_key_path.lstrip('/')))
host_ngx_cert_path = Path(os.path.join(host_root_dir, cert_path.lstrip('/')))
if host_ngx_real_cert_dir.exists() and host_ngx_real_cert_dir.is_dir():
shutil.rmtree(host_ngx_real_cert_dir)
os.makedirs(host_ngx_real_cert_dir, mode=0o755)
real_key_path = os.path.join(host_ngx_real_cert_dir, 'server.key')
real_crt_path = os.path.join(host_ngx_real_cert_dir, 'server.crt')
shutil.copy2(host_ngx_cert_key_path, real_key_path)
shutil.copy2(host_ngx_cert_path, real_crt_path)
os.chown(host_ngx_real_cert_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
mark_file(real_key_path, uid=DEFAULT_UID, gid=DEFAULT_GID)
mark_file(real_crt_path, uid=DEFAULT_UID, gid=DEFAULT_GID)
| 5,065 |
def is_valid_distribution(qk: np.ndarray, axis: int) -> bool:
"""valid is e.g.: [], [1.0], [0.5, 0.5]"""
"""not valid is e.g.: [-1.0], [0.6, 0.6], [np.nan], [np.nan, 0.6], [1.2]"""
assert 0 <= axis < len(qk.shape)
if qk.shape[axis] == 0:
return True
if np.any(qk < 0.0):
return False
if np.any(qk > 1.0):
return False
result = np.all(np.sum(qk, axis=axis) == 1)
return result
| 5,066 |
def test_did_to_bytes():
"""Tests did to bytes conversion."""
id_test = secrets.token_hex(32)
did_test = "did:op:{}".format(id_test)
id_bytes = Web3.toBytes(hexstr=id_test)
assert did_to_id_bytes(did_test) == id_bytes
assert did_to_id_bytes(id_bytes) == id_bytes
with pytest.raises(ValueError):
assert did_to_id_bytes(id_test) == id_bytes
with pytest.raises(ValueError):
assert did_to_id_bytes("0x" + id_test)
with pytest.raises(ValueError):
did_to_id_bytes("did:opx:Somebadtexstwithnohexvalue0x123456789abcdecfg")
with pytest.raises(ValueError):
did_to_id_bytes("")
with pytest.raises(TypeError):
did_to_id_bytes(None)
with pytest.raises(TypeError):
did_to_id_bytes({})
with pytest.raises(TypeError):
did_to_id_bytes(42)
| 5,067 |
def srt(data, cube, **kwargs):
"""
Define Solar Rotational Tomography model with optional masking of
data and map areas. Can also define priors.
Parameters
----------
data: InfoArray
data cube
cube: FitsArray
map cube
obj_rmin: float
Object minimal radius. Areas below obj_rmin are masked out.
obj_rmax: float
Object maximal radius. Areas above obj_rmax are masked out.
data_rmin: float
Data minimal radius. Areas below data_rmin are masked out.
data_rmax: float
Data maximal radius. Areas above data_rmax are masked out.
mask_negative: boolean
If true, negative values in the data are masked out.
Returns
-------
P : The projector with masking
D : Smoothness priors
obj_mask : object mask array
data_mask : data mask array
"""
# Model : it is Solar rotational tomography, so obstacle="sun".
data_mask = solar.define_data_mask(data, **kwargs)
P = siddon_lo(data.header, cube.header, mask=data_mask, obstacle="sun")
D = smoothness_prior(cube, kwargs.get("height_prior", False))
P, D, obj_mask = _apply_object_mask(P, D, cube, **kwargs)
return P, D, obj_mask, data_mask
| 5,068 |
def get(status_id):
"""Fetches a status of previously submitted PushFunds request.
Returns a status of :func:`~pyvdp.visadirect.fundstransfer.MultiPushFundsTransactionsModel` request by transaction
identifier, returned with 202 response.
:param str status_id: **Required**. Transaction status identifier.
:return: Dictionary with VDP API response.
**Usage:**
.. code:: python
from pyvdp.visadirect.fundstransfer import multipushfundstransactions
status_id = "1491819372_186_81_l73c003_VDP_ARM"
result = pushfundstransactions.send(status_id)
print(result)
"""
query_string = '/' + status_id
c = VisaDirectDispatcher(resource='visadirect',
api='fundstransfer',
method='multipushfundstransactions',
http_verb='GET',
query_string=query_string)
return c.send()
| 5,069 |
def onInit():
""" Do everything that is needed to initialize processing (e.g.
open files, create handles, connect to systems...)
"""
print "onInit"
| 5,070 |
def do_image_tag_update(gc, args):
"""Update an image with the given tag."""
if not (args.image_id and args.tag_value):
utils.exit('Unable to update tag. Specify image_id and tag_value')
else:
gc.image_tags.update(args.image_id, args.tag_value)
image = gc.images.get(args.image_id)
image = [image]
columns = ['ID', 'Tags']
utils.print_list(image, columns)
| 5,071 |
def getLocalDir(jobdir, dirname=''):
"""
Assemble destination directory for job results.
Raises:
TargetDirExistsError: Destination for job results already exists.
"""
if dirname:
dstDir = os.path.join(dirname, jobdir)
else:
dstDir = os.path.join(os.getcwd(), jobdir)
if not os.path.exists(dstDir):
return dstDir
else:
raise TargetDirExistsError(dstDir)
| 5,072 |
def eval_pop_thread(args):
"""
Evaluates solutions, returns a list of floats, between 0 and 1
(probabilities of survival and reproduction).
"""
m_solutions, m_state_hash_table, id_mi = args[0], args[1], args[2]
step = int(N_POP/N_PROC)
prob_surv = np.zeros(step)
for index_sol in range(len(m_solutions)):
print("Solution ", index_sol, " Id: ", id_mi)
sol = m_solutions[index_sol]
tmp_points = 0
max_sol = np.max(sol)
for state_key in m_state_hash_table:
state = m_state_hash_table[state_key]
tmp_w = compute_heuristic(state_key, 'WHITE', sol)
tmp_b = compute_heuristic(state_key, 'BLACK', sol)
if tmp_w < 0 and state['value']['white'] / state['games'] > 0.5:
tmp_points += 1
elif tmp_w > 0 and state['value']['black'] / state['games'] > 0.5:
tmp_points += 1
elif 0+ERROR_ZERO * max_sol >= tmp_w >= 0-ERROR_ZERO * max_sol and \
state['value']['black'] / state['games'] < 0.5 and state['value']['white'] / state['games'] < 0.5:
tmp_points += 1
if tmp_b < 0 and state['value']['black'] / state['games'] > 0.5:
tmp_points += 1
elif tmp_b > 0 and state['value']['white'] / state['games'] > 0.5:
tmp_points += 1
elif 0 + ERROR_ZERO * max_sol >= tmp_b >= 0-ERROR_ZERO * max_sol and \
state['value']['black'] / state['games'] < 0.5 and state['value']['white'] / state['games'] < 0.5:
tmp_points += 1
tmp_points /= 2
prob_surv[index_sol] = tmp_points
return prob_surv
| 5,073 |
def test_sophos_firewall_ip_host_group_list_command(requests_mock):
"""
Scenario: List all IP host groups.
Given:
- User has provided valid credentials.
When:
- sophos_firewall_ip_host_group_list is called.
Then:
- Ensure number of items is correct.
- Ensure outputs prefix is correct.
- Ensure a sample value from the API matches what is generated in the context.
"""
from sophos_firewall import Client, sophos_firewall_ip_host_group_list_command
mock_response = load_mock_response('ip_host_group_list.xml')
requests_mock.get(REQUEST_URL, text=mock_response)
client = Client(base_url=BASE_URL, verify=False, auth=('uname', 'passwd'), proxy=False)
result = sophos_firewall_ip_host_group_list_command(client, 0, 10)
assert result.outputs_prefix == 'SophosFirewall.IPHostGroup'
assert len(result.outputs) == 2
assert result.outputs[0].get('Name') == 'ip_hosts'
| 5,074 |
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
| 5,075 |
def setfig(fig,**kwargs):
"""Tim's handy plot tool
"""
if fig:
pl.figure(fig,**kwargs)
pl.clf()
elif fig==0:
pass
else:
pl.figure(**kwargs)
| 5,076 |
def test_incremental_file_insert(tmp_path):
"""Test inserting text into a file."""
temp_file = tmp_path / "output.txt"
test_file = fixture_dir / "pipeline_dag_test.md"
temp_file.write_text(test_file.read_text())
writer = file_processing.IncrementalFileInsert(str(temp_file), r"(?im)^## \d+\.\d+\.\d+")
writer("This is new\n")
assert temp_file.read_text() == "This is new\n\n## 0.0.1 (2022-01-01)\n\nThis stuff stays.\n"
| 5,077 |
def update_user(BrokerId=None, ConsoleAccess=None, Groups=None, Password=None, Username=None):
"""
Updates the information for an ActiveMQ user.
See also: AWS API Documentation
Exceptions
:example: response = client.update_user(
BrokerId='string',
ConsoleAccess=True|False,
Groups=[
'string',
],
Password='string',
Username='string'
)
:type BrokerId: string
:param BrokerId: [REQUIRED] The unique ID that Amazon MQ generates for the broker.
:type ConsoleAccess: boolean
:param ConsoleAccess: Enables access to the the ActiveMQ Web Console for the ActiveMQ user.
:type Groups: list
:param Groups: The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.\n\n(string) --\n\n
:type Password: string
:param Password: The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas.
:type Username: string
:param Username: [REQUIRED] Required. The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) -- HTTP Status Code 200: OK.
Exceptions
MQ.Client.exceptions.NotFoundException
MQ.Client.exceptions.BadRequestException
MQ.Client.exceptions.InternalServerErrorException
MQ.Client.exceptions.ConflictException
MQ.Client.exceptions.ForbiddenException
:return: {}
:returns:
(dict) -- HTTP Status Code 200: OK.
"""
pass
| 5,078 |
def rho_MC(delta, rhoeq=4.39e-38):
"""
returns the characteristic density of an
axion minicluster in [solar masses/km^3]
forming from an overdensity with
overdensity parameter delta.
rhoeq is the matter density at matter
radiation equality in [solar masses/km^3]
"""
return 140 * (1 + delta) * delta**3 * rhoeq
| 5,079 |
def _parse_parameter_from_value(
string: str,
parameter_to_wordlist_mapping: Dict[Union[TimeResolution, PeriodType, Parameter], List[List[str]]]
) -> Optional[Union[TimeResolution, PeriodType, Parameter]]:
"""
Function to parse a parameter from a given string based on a list of parameter enumerations and corresponding list
of words.
Args:
string: string containing the circa name of the parameter
parameter_to_wordlist_mapping: mapping of parameter and list of words
Returns:
None or one of the found enumerations
"""
string_split = string.split("_")
for parameter, wordlist in parameter_to_wordlist_mapping.items():
cond1 = len(wordlist) == len(string_split)
cond2 = _find_any_one_word_from_wordlist(string_split, wordlist)
if cond1 and cond2:
return parameter
return None
| 5,080 |
def start(mainGuiClass, **kwargs):
"""This method starts the webserver with a specific App subclass."""
debug = kwargs.pop('debug', False)
standalone = kwargs.pop('standalone', False)
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO,
format='%(name)-16s %(levelname)-8s %(message)s')
logging.getLogger('remi').setLevel(
level=logging.DEBUG if debug else logging.INFO)
if standalone:
s = StandaloneServer(mainGuiClass, start=True, **kwargs)
else:
s = Server(mainGuiClass, start=True, **kwargs)
| 5,081 |
def test_list_ncname_min_length_4_nistxml_sv_iv_list_ncname_min_length_5_4(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet minLength with value 10.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-minLength-5.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-minLength-5-4.xml",
class_name="NistschemaSvIvListNcnameMinLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,082 |
def AffineMomentsF(I, returnShape=False):
"""
Input: - I: A 2D image
Output: - Out: A (1x6) vector containing 6 moment features
"""
# ************************************************************************
# Modified for MRI feature extraction by the Department of Diagnostic
# and Interventional Radiology, University Hospital of Tuebingen, Germany
# and the Institute of Signal Processing and System Theory University of
# Stuttgart, Germany. Last modified: November 2016
#
# This implementation is part of ImFEATbox, a toolbox for image feature
# extraction and analysis. Available online at:
# https://github.com/annikaliebgott/ImFEATbox
#
# Contact: annika.liebgott@iss.uni-stuttgart.de
# ************************************************************************
#
# Implementation based on: Tomas Suk, Jan Flusser, "Combined Blur and
# Affine Moment Invariants and their use in
# Pattern Recognition", Pattern Recognition,
# vol. 36, 2003.
#
# Implemented by: Asad Ali. Email: m.aliasad@yahoo.com
if returnShape:
return (6,1)
# x,y = np.nonzero(I[:,:,1]) TODO: how to handle color image?
x,y = np.nonzero(I)
pixelValues = I[x,y]
m00 = np.sum(pixelValues)
x = x - np.sum(x*pixelValues)/m00
y = y - np.sum(y*pixelValues)/m00
## calculate moments
# second order central moments
m20 = CentralMoments(x,y,2,0,pixelValues)
m02 = CentralMoments(x,y,0,2,pixelValues)
m11 = CentralMoments(x,y,1,1,pixelValues)
# third order central moments
m30 = CentralMoments(x,y,3,0,pixelValues)
m03 = CentralMoments(x,y,0,3,pixelValues)
m21 = CentralMoments(x,y,2,1,pixelValues)
m12 = CentralMoments(x,y,1,2,pixelValues)
# fouth order central moments
m40 = CentralMoments(x,y,4,0,pixelValues)
m04 = CentralMoments(x,y,0,4,pixelValues)
m31 = CentralMoments(x,y,3,1,pixelValues)
m13 = CentralMoments(x,y,1,3,pixelValues)
m22 = CentralMoments(x,y,2,2,pixelValues)
# fifth order central moments
m50 = CentralMoments(x,y,5,0,pixelValues)
m05 = CentralMoments(x,y,0,5,pixelValues)
m41 = CentralMoments(x,y,4,1,pixelValues)
m14 = CentralMoments(x,y,1,4,pixelValues)
m32 = CentralMoments(x,y,3,2,pixelValues)
m23 = CentralMoments(x,y,2,3,pixelValues)
# seventh order central moments
m70 = CentralMoments(x,y,7,0,pixelValues)
m07 = CentralMoments(x,y,0,7,pixelValues)
m16 = CentralMoments(x,y,1,6,pixelValues)
m61 = CentralMoments(x,y,6,1,pixelValues)
m52 = CentralMoments(x,y,5,2,pixelValues)
m25 = CentralMoments(x,y,2,5,pixelValues)
m43 = CentralMoments(x,y,4,3,pixelValues)
m34 = CentralMoments(x,y,3,4,pixelValues)
# for blur invariance we recompute certain values
m50 = m50 - (10*m30*m20/m00)
m41 = m41 - (2*(3*m21*m20 + 2*m30*m11)/m00)
m32 = m32 - ((3*m12*m20 + m30*m02 + 6*m21*m11)/m00)
m23 = m23 - ((3*m21*m02 + m03*m20 + 6*m12*m11)/m00)
m14 = m14 - (2*(3*m12*m02 + 2*m03*m11)/m00)
m05 = m05 - (10*m03*m02/m00)
# for blur invariance seventh order moments recomputed
m70 = m70 - 7 * (3*m50*m20 + 5*m30*m40)/m00 + (210*m30*m20**2 / m00**2)
m61 = m61 - (6*m50*m11 + 15*m41*m20 + 15*m40*m21 + 20*m31*m30)/m00 + 30*(3*m21*m20**2 + 4*m30*m20*m11)/m00**2
m52 = m52 - (m50*m02 +10*m30*m22 + 10*m32*m20 + 20*m31*m21 +10*m41*m11 + 5*m40*m12)/m00 + 10* (3*m12*m20**2 + 2*m30*m20*m02 + 4*m30*m11**2 + 12*m21*m20*m11)/m00**2
m43 = m43 - (m40*m03 + 18*m21*m22 + 12*m31*m12 + 4*m30*m13 + 3*m41*m02 + 12*m32*m11 + 6*m23*m20)/m00 + 6*(m03*m20**2 + 4*m30*m11*m02 + 12*m21*m11**2 + 12*m12*m20*m11 + 6*m21*m02*m20)
m34 = m34 - (m04*m30 + 18*m12*m22 + 12*m13*m21 + 4*m03*m31 + 3*m14*m20 + 12*m23*m11 + 6*m32*m02)/m00 + 6 *(m30*m02**2 + 4*m03*m11*m20 + 12*m12*m11**2 + 12*m21*m02*m11 + 6*m12*m20*m02)/m00**2
m25 = m25 - (m05*m20 + 10*m03*m22 + 10*m23*m02 + 20*m13*m12 + 10*m14*m11 + 5*m04*m21)/m00 + 10*(3*m21*m02**2 + 2*m03*m02*m20 +4*m03*m11**2 + 12*m12*m02*m11)/m00**2
m16 = m16 - (6*m05*m11 + 15*m14*m02 + 15*m04*m12 + 20*m13*m03)/m00 + 30*(3*m12*m02**2 + 4*m03*m02*m11)/m00**2
m07 = m07 - 7*(3*m05*m02 + 5*m03*m04)/m00 + (210*m03*m02**2 / m00**2)
# first invariant computed from the determinant of the polynomial
I1 = (m30**2*m03**2 - 6*m30*m21*m12*m03 + 4*m30*m12**3 + 4*m21**3*m03 - 3*m21**2*m12**2) / m00**10
I2 = (m50**2*m05**2 - 10*m50*m41*m14*m05 + 4*m50*m32*m23*m05 + 16*m50*m32*m14**2 - 12*m50*m23**2*m14 + 16*m41**2*m23*m05 + 9*m41**2*m14**2 - 12*m41*m32**2*m05 - 76*m41*m32*m23*m14 + 48*m41*m23**3 + 48*m32**3*m14 - 32*m32**2*m23**2)/m00**14
I3 = (m30**2*m12*m05 - m30**2*m03*m14 - m30*m21**2*m05 - 2*m30*m21*m12*m14 + 4*m30*m21*m03*m23 + 2*m30*m12**2*m23 - 4*m30*m12*m03*m32 + m30*m03**2*m41 + 3*m21**3*m14 - 6*m21**2*m12*m23 - 2*m21**2*m03*m32 + 6*m21*m12**2*m32 + 2*m21*m12*m03*m41 - m21*m03**2*m50 - 3*m12**3*m41 + m12**2*m03*m50) / m00**11
I4 = (2*m30*m12*m41*m05 - 8*m30*m12*m32*m14 + 6*m30*m12*m23**2 - m30*m03*m50*m05 + 3*m30*m03*m41*m14 - 2*m30*m03*m32*m23 - 2*m21**2*m41*m05 + 8*m21**2*m32*m14 - 6*m21**2*m23**2 + m21*m12*m50*m05 - 3*m21*m12*m41*m14 + 2*m21*m12*m32*m23 + 2*m21*m03*m50*m14 - 8*m21*m03*m41*m23 + 6*m21*m03*m32**2 - 2*m12**2*m50*m14 + 8*m12**2*m41*m23 - 6*m12**2*m32**2)/m00**12
I5 = (m30*m41*m23*m05 - m30*m41*m14**2 - m30*m32**2*m05 + 2*m30*m32*m23*m14 - m30*m23**3 - m21*m50*m23*m05 + m21*m50*m14**2 + m21*m41*m32*m05 - m21*m41*m23*m14 - m21*m32**2*m14 + m21*m32*m23**2 + m12*m50*m32*m05 - m12*m50*m23*m14 - m12*m41**2*m05 + m12*m41*m32*m14 + m12*m41*m23**2 - m12*m32**2*m23 - m03*m50*m32*m14 + m03*m50*m23**2 + m03*m41**2*m14 - 2*m03*m41*m32*m23 + m03*m32**3)/m00**13
I6 = (m70**2*m07**2 - 14*m70*m61*m16*m07 + 18*m70*m52*m25*m07 + 24*m70*m52*m16**2 - 10*m70*m43*m34*m07 - 60*m70*m43*m25*m16 + 40*m70*m34**2*m16 + 24*m61**2*m25*m07 + 25*m61**2*m16**2 - 60*m61*m52*m34*m07 - 234*m61*m52*m25*m16 + 40*m61*m43**2*m07 + 50*m61*m43*m34*m16 + 360*m61*m43*m25**2 - 240*m61*m34**2*m25 + 360*m52**2*m34*m16 + 81*m52**2*m25**2 - 240*m52*m43**2*m16 - 990*m52*m43*m34*m25 + 600*m52*m34**3 + 600*m43**3*m25 - 375*m43**2*m34**2)/m00**18
## return feature vector
Out = np.array([I1, I2, I3, I4, I5, I6])
return Out
# Calculate Central Moments
| 5,083 |
def render(scene):
"""
:param scene: Scene description
:return: [H, W, 3] image
"""
# Construct rays from the camera's eye position through the screen coordinates
camera = scene['camera']
eye, ray_dir, H, W = generate_rays(camera)
# Ray-object intersections
scene_objects = scene['objects']
obj_intersections, ray_dist, normals, material_idx = ray_object_intersections(eye, ray_dir, scene_objects)
# Valid distances
pixel_dist = ray_dist
valid_pixels = (camera['near'] <= ray_dist) & (ray_dist <= camera['far'])
pixel_dist[~valid_pixels] = np.inf # Will have to use gather operation for TF and pytorch
# Nearest object needs to be compared for valid regions only
nearest_obj = np.argmin(pixel_dist, axis=0)
C = np.arange(0, nearest_obj.size) # pixel idx
# Create depth image for visualization
# use nearest_obj for gather/select the pixel color
im_depth = pixel_dist[nearest_obj, C].reshape(H, W)
##############################
# Fragment processing
##############################
# Lighting
color_table = scene['colors']
light_pos = scene['lights']['pos']
light_clr_idx = scene['lights']['color_idx']
light_colors = color_table[light_clr_idx]
# Generate the fragments
"""
Get the normal and material for the visible objects.
"""
frag_normals = normals[nearest_obj, C]
frag_pos = obj_intersections[nearest_obj, C]
frag_albedo = scene['materials']['albedo'][material_idx[nearest_obj]]
# Fragment shading
light_dir = light_pos[np.newaxis, :] - frag_pos[:, np.newaxis, :]
light_dir_norm = np.sqrt(np.sum(light_dir ** 2, axis=-1))[..., np.newaxis]
light_dir_norm[light_dir_norm <= 0 | np.isinf(light_dir_norm)] = 1
light_dir = ops.nonzero_divide(light_dir, light_dir_norm)
im_color = np.sum(frag_normals[:, np.newaxis, :] * light_dir, axis=-1)[..., np.newaxis] * \
light_colors[np.newaxis, ...] * frag_albedo[:, np.newaxis, :]
im = np.sum(im_color, axis=1).reshape(H, W, 3)
im[(im_depth < camera['near']) | (im_depth > camera['far'])] = 0
# clip negative values
im[im < 0] = 0
# Tonemapping
if 'tonemap' in scene:
im = tonemap(im, **scene['tonemap'])
return {'image': im,
'depth': im_depth,
'ray_dist': ray_dist,
'obj_dist': pixel_dist,
'nearest': nearest_obj.reshape(H, W),
'ray_dir': ray_dir,
'valid_pixels': valid_pixels
}
| 5,084 |
def state_fidelity(state1, state2):
"""Return the state fidelity between two quantum states.
Either input may be a state vector, or a density matrix. The state
fidelity (F) for two density matrices is defined as::
F(rho1, rho2) = Tr[sqrt(sqrt(rho1).rho2.sqrt(rho1))] ^ 2
For a pure state and mixed state the fidelity is given by::
F(|psi1>, rho2) = <psi1|rho2|psi1>
For two pure states the fidelity is given by::
F(|psi1>, |psi2>) = |<psi1|psi2>|^2
Args:
state1 (array_like): a quantum state vector or density matrix.
state2 (array_like): a quantum state vector or density matrix.
Returns:
array_like: The state fidelity F(state1, state2).
"""
# convert input to numpy arrays
s1 = np.array(state1)
s2 = np.array(state2)
# fidelity of two state vectors
if s1.ndim == 1 and s2.ndim == 1:
return np.abs(s2.conj().dot(s1)) ** 2
# fidelity of vector and density matrix
elif s1.ndim == 1:
# psi = s1, rho = s2
return np.abs(s1.conj().dot(s2).dot(s1))
elif s2.ndim == 1:
# psi = s2, rho = s1
return np.abs(s2.conj().dot(s1).dot(s2))
# fidelity of two density matrices
s1sq = _funm_svd(s1, np.sqrt)
s2sq = _funm_svd(s2, np.sqrt)
return np.linalg.norm(s1sq.dot(s2sq), ord='nuc') ** 2
| 5,085 |
def linear_regression(x,y,title='',xlabel='X',ylabel='Y'):
"""Does simple linear regression"""
b = estimate_coef(x, y)
print("Estimated eqauation:\ny = %s + %s*x + e"%(b[0], b[1]))
x_a,y_a = give_time_series(x,y)
plot_regression_line(x_a, y_a, b, title=title,xlabel=xlabel,ylabel=ylabel)
| 5,086 |
def process_state(request):
"""Procesa una request GET o POST para consultar datos de provincias.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET o POST de flask.
Returns:
flask.Response: respuesta HTTP
"""
return _process_entity(request, N.STATES, params.PARAMS_STATES, {
N.ID: 'ids',
N.NAME: 'name',
N.INTERSECTION: 'geo_shape_ids',
N.EXACT: 'exact',
N.ORDER: 'order',
N.FIELDS: 'fields',
N.OFFSET: 'offset',
N.MAX: 'size'
})
| 5,087 |
def is_ascii(string):
"""Return True is string contains only is us-ascii encoded characters."""
def is_ascii_char(char):
return 0 <= ord(char) <= 127
return all(is_ascii_char(char) for char in string)
| 5,088 |
def _get_predictions_from_data(
model: Union[Model, SKLEARN_MODELS],
data: Union[
tf.data.Dataset,
Tuple[Inputs, Outputs],
Tuple[Inputs, Outputs, Paths],
],
batch_size: Optional[int],
tensor_maps_in: Optional[List[TensorMap]],
tensor_maps_out: Optional[List[TensorMap]],
) -> Tuple[Predictions, Outputs, Optional[Paths]]:
"""
Get model predictions, output data, and paths from data source. Data must not
be infinite.
:param model: Model
:param data: finite tensorflow Dataset or tuple of inputs, outputs, and
optionally paths
:param batch_size: Number of samples to use in a batch, required if data is a
tuple input and output numpy arrays
:return: Tuple of predictions as a list of numpy arrays, a dictionary of
output data, and optionally paths
"""
if isinstance(data, tuple):
if len(data) == 2:
input_data, output_data = data
paths = None
elif len(data) == 3:
input_data, output_data, paths = data
else:
raise ValueError(
f"Expected 2 or 3 elements to dataset tuple, got {len(data)}",
)
if batch_size is None:
raise ValueError(
"When providing dataset as tuple of inputs and outputs, batch_size "
"is required, got {batch_size}",
)
y_predictions = model.predict(x=input_data, batch_size=batch_size)
elif isinstance(data, tf.data.Dataset):
y_prediction_batches = defaultdict(list)
output_data_batches = defaultdict(list)
id_batches = []
if isinstance(model, Model):
for batch in data:
output_data_batch = batch[BATCH_OUTPUT_INDEX]
for output_name, output_tensor in output_data_batch.items():
output_data_batches[output_name].append(output_tensor.numpy())
batch_y_predictions = model.predict(batch[BATCH_INPUT_INDEX])
if not isinstance(batch_y_predictions, list):
batch_y_predictions = [batch_y_predictions]
for prediction_idx, batch_y_prediction in enumerate(
batch_y_predictions,
):
y_prediction_batches[prediction_idx].append(batch_y_prediction)
if len(batch) == 3:
id_batches.append(batch[BATCH_IDS_INDEX].numpy().astype(str))
y_predictions = [
np.concatenate(y_prediction_batches[prediction_idx])
for prediction_idx in sorted(y_prediction_batches)
]
elif isinstance(model, SKLEARN_MODELS.__args__):
data = get_dicts_of_arrays_from_dataset(dataset=data)
assert all(tm.axes == 1 for tm in tensor_maps_in + tensor_maps_out)
assert len(tensor_maps_out) == 1
# Isolate arrays from datasets for desired tensor maps
X = get_array_from_dict_of_arrays(
tensor_maps=tensor_maps_in,
data=data[BATCH_INPUT_INDEX],
drop_redundant_columns=False,
)
y_predictions = model.predict_proba(X)
for output_name, output_tensor in data[BATCH_OUTPUT_INDEX].items():
output_data_batches[output_name].append(output_tensor)
if len(data) == 3:
id_batches.append(data[BATCH_IDS_INDEX])
else:
raise NotImplementedError(
f"Cannot perform inference on model of type {type(model).__name}",
)
# Iterate over batches and concatenate into dict of arrays
output_data = {
output_name: np.concatenate(output_data_batches[output_name])
for output_name in output_data_batches
}
paths = None if len(id_batches) == 0 else np.concatenate(id_batches).tolist()
else:
raise NotImplementedError(
"Cannot get data for inference from data of type "
"{type(data).__name__}: {data}",
)
if not isinstance(y_predictions, list):
y_predictions = [y_predictions]
return y_predictions, output_data, paths
| 5,089 |
def process_ud_treebank(treebank, udbase_dir, tokenizer_dir, short_name, short_language, augment=True):
"""
Process a normal UD treebank with train/dev/test splits
SL-SSJ and other datasets with inline modifications all use this code path as well.
"""
prepare_ud_dataset(treebank, udbase_dir, tokenizer_dir, short_name, short_language, "train", augment)
prepare_ud_dataset(treebank, udbase_dir, tokenizer_dir, short_name, short_language, "dev", augment)
prepare_ud_dataset(treebank, udbase_dir, tokenizer_dir, short_name, short_language, "test", augment)
| 5,090 |
def execute():
"""Set default module for standard Web Template, if none."""
vmraid.reload_doc('website', 'doctype', 'Web Template Field')
vmraid.reload_doc('website', 'doctype', 'web_template')
standard_templates = vmraid.get_list('Web Template', {'standard': 1})
for template in standard_templates:
doc = vmraid.get_doc('Web Template', template.name)
if not doc.module:
doc.module = 'Website'
doc.save()
| 5,091 |
def get_data(data, frame_nos, dataset, topic, usernum, fps, milisec, width, height, view_width, view_height):
"""
Read and return the viewport data
"""
VIEW_PATH = '../../Viewport/'
view_info = pickle.load(open(VIEW_PATH + 'ds{}/viewport_ds{}_topic{}_user{}'.format(dataset, dataset, topic, usernum), 'rb'), encoding='latin1')
if dataset == 1:
max_frame = int(view_info[-1][0]*1.0*fps/milisec)
for i in range(len(view_info)-1):
frame = int(view_info[i][0]*1.0*fps/milisec)
frame += int(offset*1.0*fps/milisec)
frame_nos.append(frame)
if(frame > max_frame):
break
X={}
X['VIEWPORT_x']=int(view_info[i][1][1]*width/view_width)
X['VIEWPORT_y']=int(view_info[i][1][0]*height/view_height)
data.append((X, int(view_info[i+1][1][1]*width/view_width),int(view_info[i+1][1][0]*height/view_height)))
elif dataset == 2:
for k in range(len(view_info)-1):
if view_info[k][0]<=offset+60 and view_info[k+1][0]>offset+60:
max_frame = int(view_info[k][0]*1.0*fps/milisec)
break
for k in range(len(view_info)-1):
if view_info[k][0]<=offset and view_info[k+1][0]>offset:
min_index = k+1
break
prev_frame = 0
for i in range(min_index,len(view_info)-1):
frame = int((view_info[i][0])*1.0*fps/milisec)
if frame == prev_frame:
continue
if(frame > max_frame):
break
frame_nos.append(frame)
X={}
X['VIEWPORT_x']=int(view_info[i][1][1]*width/view_width)
X['VIEWPORT_y']=int(view_info[i][1][0]*height/view_height)
data.append((X, int(view_info[i+1][1][1]*width/view_width),int(view_info[i+1][1][0]*height/view_height)))
prev_frame = frame
return data, frame_nos, max_frame
| 5,092 |
def MinimizeParameters(metaTemplate, parametersLines, noOfAttributes):
""" Reduces the number of parameters required by replacing different parameters with a single parameter if they agree on all devices."""
lineParamMap = {}
for block in metaTemplate.blocks:
for line in block.lines:
for attribute in range(noOfAttributes):
if "P" in line[attribute]:
lineParamMap.setdefault(
line[LINENUM], set()).add(line[attribute])
for device in parametersLines.lineMapping:
myParam = set()
for lineNumber in parametersLines.lineMapping[device]:
if lineNumber in lineParamMap:
myParam.update(lineParamMap.get(lineNumber))
extraParams = set(parametersLines.parameters[device].keys()) - myParam
[parametersLines.parameters[device].pop(
extra, None) for extra in extraParams]
common = parametersLines.commonValueParams()
for x in common:
ModifyErase(metaTemplate, parametersLines, x[0], x[1:], noOfAttributes)
parametersLines.predicateGenerator(
metaTemplate.blocks[-1].lines[-1][LINENUM])
parametersLines.groupAndSortPredicates(metaTemplate)
RemapParameters(metaTemplate, parametersLines, noOfAttributes)
| 5,093 |
def test_binarytree_repr_as_expected():
""" After instantiating with a list, does the BinaryTree repr look right
"""
input = [13, 42, 7]
expected = '<BinaryTree | Root: 13>'
r = BinaryTree(input)
actual = repr(r)
assert expected == actual
| 5,094 |
def tail_owner_changed(tail, owner_func):
"""This hook is for when a chunk is moved to another function and is for older versions of IDA.
We simply iterate through the new chunk, decrease all of its tags in its
previous function's context, and increase their reference within the new
function's context.
"""
# XXX: this is for older versions of IDA
# this is easy as we just need to walk through tail and add it
# to owner_func
for ea in database.address.iterate(interface.range.bounds(tail)):
for k in database.tag(ea):
internal.comment.contents.dec(ea, k)
internal.comment.contents.inc(ea, k, target=owner_func)
logging.debug(u"{:s}.tail_owner_changed({:#x}, {:#x}) : Exchanging (increasing) reference count for contents tag {!s} and (decreasing) reference count for contents tag {!s}.".format(__name__, interface.range.start(tail), owner_func, utils.string.repr(k), utils.string.repr(k)))
continue
return
| 5,095 |
def getListGroups(config):
"""
Get list of groups
"""
print("Retrieve list of group")
data = None
grpList = None
__grpList = gitlabGroupList()
if (DUMMY_DATA):
curDir = os.path.dirname(os.path.abspath(__file__))
testFile = getFullFilePath(GROUPS_TEST_FILE)
with open (testFile, 'rt') as f:
data = f.read()
f.close()
else:
# retrieve data from server
url = getApiUrl(config, "groups")
logD("URL " + url)
token = config.getToken()
hdrs = {"PRIVATE-TOKEN":config.getToken()}
__totalPage = 0
__page = 1
while True:
logD("Page %d" % (__page))
params = {'page': __page}
logD("header %s" % hdrs)
resp = requests.get(url, headers=hdrs, params=params)
logD("resp status_code %s" % resp.status_code)
if (resp.status_code == 200):
data = resp.content
logD (resp.headers)
if (len(resp.headers.get('X-Next-Page')) > 0):
__page = int(resp.headers.get('X-Next-Page'))
else:
__page = 0
logD("next page %d" % (__page))
else:
__page = 0
break
if (data is not None) and (len(data) > 0):
logD("data %s" % data)
__grpList.parseData(data)
__totalPage += 1
if (config.getMaxGroup() is not None) and (__grpList.getLen() >= config.getMaxGroup()):
print("Reach max %s/%s" % (__grpList.getLen(), config.getMaxGroup()))
break
if (__page == 0): #ok, reach end, out
break
if (__totalPage > 500): # 500 pages? no way, something wrong, out
print("SOMETHING WRONG, total is to big, out")
break
print("Total pages %d" % (__totalPage))
return __grpList
| 5,096 |
def call(args, version):
"""Converts callList into functionString."""
# Find keyword
keywords = [i for i in args if i in Variables.keywords(version)]
# Too many keywords is a syntax error.
if len(keywords) > 1:
raise UdebsSyntaxError("CallList contains to many keywords '{}'".format(args))
# No keywords creates a tuple object.
elif len(keywords) == 0:
return "(" + ",".join(formatS(i, version) for i in args) + ")"
keyword = keywords[0]
# Get and fix data for this keyword.
data = copy.copy(Variables.default)
data.update(Variables.keywords(version)[keyword])
# Create dict of values
current = args.index(keyword)
nodes = copy.copy(data["default"])
for index in range(len(args)):
value = "$" if index >= current else "-$"
value += str(abs(index - current))
if args[index] != keyword:
nodes[value] = args[index]
# Force strings into quoted arguments.
for string in data["string"]:
nodes[string] = "'" + str(nodes[string]).replace("'", "\\'") + "'"
# Claim keyword arguments.
kwargs = {}
for key, value in data["kwargs"].items():
if value in nodes:
new_value = nodes[value]
del nodes[value]
else:
new_value = value
kwargs[key] = formatS(new_value, version)
arguments = []
# Insert positional arguments
for key in data["args"]:
if key in nodes:
arguments.append(formatS(nodes[key], version))
del nodes[key]
else:
arguments.append(formatS(key, version))
# Insert ... arguments.
if data["all"]:
for key in sorted(nodes.keys(), key=lambda x: int(x.replace("$", ""))):
arguments.append(formatS(nodes[key], version))
del nodes[key]
if len(nodes) > 0:
raise UdebsSyntaxError("Keyword contains unused arguments. '{}'".format(" ".join(args)))
# Insert keyword arguments.
for key in sorted(kwargs.keys()):
arguments.append(str(key) + "=" + str(kwargs[key]))
return data["f"] + "(" + ",".join(arguments) + ")"
| 5,097 |
def get_object_locations(obj_refs: List[ObjectRef], timeout_ms: int = -1
) -> Dict[ObjectRef, Dict[str, Any]]:
"""Lookup the locations for a list of objects.
It returns a dict maps from an object to its location. The dict excludes
those objects whose location lookup failed.
Args:
object_refs (List[ObjectRef]): List of object refs.
timeout_ms (int): The maximum amount of time in micro seconds to wait
before returning. Wait infinitely if it's negative.
Returns:
A dict maps from an object to its location. The dict excludes those
objects whose location lookup failed.
The location is stored as a dict with following attributes:
- node_ids (List[str]): The hex IDs of the nodes that have a
copy of this object.
- object_size (int): The size of data + metadata in bytes.
Raises:
RuntimeError: if the processes were not started by ray.init().
ray.exceptions.GetTimeoutError: if it couldn't finish the
request in time.
"""
if not ray.is_initialized():
raise RuntimeError("Ray hasn't been initialized.")
return ray.worker.global_worker.core_worker.get_object_locations(
obj_refs, timeout_ms)
| 5,098 |
def remove_host(plateform=None, name=None, environment=None):
""" Remove Host Object from Platform Object attribute hosts and return updated Platform Object.
:param: plateform: host's plateform (same as type yaml file) passed by user
:param: name: host's name passed by user
:param: name: host's environment passed by user
:type: plateform: list of one str
:type: name: list of one str
:type: environment: list of one str
:return: Updated Plateform
:rtype: Plateform Object
.. seealso:: heimdall.conf.hosts.getPlateformObject(), heimdall.core.plateform.Plateform
"""
from conf.hosts import getPlateformObject
from core.exceptions import EnvironmentDoesNotExist
p = getPlateformObject(plateform[0])
try:
if not p.check_environment(environment[0]):
raise EnvironmentDoesNotExist('Environment %s in plateform %s does not exists!' % (environment[0], p.name),
p.name)
except EnvironmentDoesNotExist as ede:
print ede
exit(ede.code)
if name[0] == -1: # remove all
p.environment[environment[0]] = []
else:
[p.remove_host(host) for host in p.environment[environment[0]] for n in name if host.name == n]
return p
| 5,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.