content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def grid_square_neighbors_1d_from(shape_slim):
"""
From a (y,x) grid of coordinates, determine the 8 neighors of every coordinate on the grid which has 8
neighboring (y,x) coordinates.
Neighbor indexes use the 1D index of the pixel on the masked grid counting from the top-left right and down.
For example:
x x x x x x x x x x
x x x x x x x x x x Th s s an example mask.Mask2D, where:
x x x x x x x x x x
x x x 0 1 2 3 x x x x = `True` (P xel s masked and excluded from the gr d)
x x x 4 5 6 7 x x x o = `False` (P xel s not masked and ncluded n the gr d)
x x x 8 9 10 11 x x x
x x x x x x x x x x
x x x x x x x x x x
x x x x x x x x x x
x x x x x x x x x x
On the grid above, the grid cells in 1D indxes 5 and 6 have 8 neighboring pixels and their entries in the
grid_neighbors_1d array will be:
grid_neighbors_1d[0,:] = [0, 1, 2, 4, 6, 8, 9, 10]
grid_neighbors_1d[1,:] = [1, 2, 3, 5, 7, 9, 10, 11]
The other pixels will be included in the grid_neighbors_1d array, but correspond to `False` entries in
grid_has_neighbors and be omitted from calculations that use the neighbor array.
Parameters
----------
shape_slim : np.ndarray
The irregular 1D grid of (y,x) coordinates over which a square uniform grid is overlaid.
pixel_scales : (float, float)
The pixel scale of the uniform grid that laid over the irregular grid of (y,x) coordinates.
"""
shape_of_edge = int(np.sqrt(shape_slim))
has_neighbors = np.full(shape=shape_slim, fill_value=False)
neighbors_1d = np.full(shape=(shape_slim, 8), fill_value=-1.0)
index = 0
for y in range(shape_of_edge):
for x in range(shape_of_edge):
if y > 0 and x > 0 and y < shape_of_edge - 1 and x < shape_of_edge - 1:
neighbors_1d[index, 0] = index - shape_of_edge - 1
neighbors_1d[index, 1] = index - shape_of_edge
neighbors_1d[index, 2] = index - shape_of_edge + 1
neighbors_1d[index, 3] = index - 1
neighbors_1d[index, 4] = index + 1
neighbors_1d[index, 5] = index + shape_of_edge - 1
neighbors_1d[index, 6] = index + shape_of_edge
neighbors_1d[index, 7] = index + shape_of_edge + 1
has_neighbors[index] = True
index += 1
return neighbors_1d, has_neighbors
| 5,400 |
def get_total_entries(df, pdbid, cdr):
"""
Get the total number of entries of the particular CDR and PDBID in the database
:param df: dataframe.DataFrame
:rtype: int
"""
return len(get_all_entries(df, pdbid, cdr))
| 5,401 |
def _gf2mulxinvmod(a,m):
"""
Computes ``a * x^(-1) mod m``.
*NOTE*: Does *not* check whether `a` is smaller in degree than `m`.
Parameters
----------
a, m : integer
Polynomial coefficient bit vectors.
Polynomial `a` should be smaller degree than `m`.
Returns
-------
c : integer
Polynomial coefficient bit vector of ``c = a * x^(-1) mod m``.
"""
c = (a ^ ((a&1)*m)) >> 1
return c
| 5,402 |
def parse_Transpose(onnx_node, weights, graph):
"""
parse Transpose to Permute
:param onnx_node:
:param weights:
:param graph:
:return:
"""
onnx_node['visited'] = True
onnx_node['ak_type'] = 'Permute'
ak_attr = onnx_node['ak_attr']
data = onnx_node['onnx_attr']['perm']
shape = []
if len(data) == 5 and data[0] == 0:
shape = [data[1]-1, data[2]-1, data[3]-1, data[4]-1]
# elif len(data) >= 5:
# shape = data
# print ('Permute does not support 5 dims permute ', data)
# # exit(0)
else:
shape = data
# print('data: ', data)
# print('shape: ', shape)
ak_attr['shape'] = shape
| 5,403 |
def find_cuda_family_config(repository_ctx, script_path, cuda_libraries):
"""Returns CUDA config dictionary from running find_cuda_config.py"""
python_bin = repository_ctx.which("python3")
exec_result = execute(repository_ctx, [python_bin, script_path] + cuda_libraries)
if exec_result.return_code:
errmsg = err_out(exec_result)
auto_configure_fail("Failed to run find_cuda_config.py: {}".format(errmsg))
# Parse the dict from stdout.
return dict([tuple(x.split(": ")) for x in exec_result.stdout.splitlines()])
| 5,404 |
def _prediction_feature_weights(booster, dmatrix, n_targets,
feature_names, xgb_feature_names):
""" For each target, return score and numpy array with feature weights
on this prediction, following an idea from
http://blog.datadive.net/interpreting-random-forests/
"""
# XGBClassifier does not have pred_leaf argument, so use booster
leaf_ids, = booster.predict(dmatrix, pred_leaf=True)
xgb_feature_names = {f: i for i, f in enumerate(xgb_feature_names)}
tree_dumps = booster.get_dump(with_stats=True)
assert len(tree_dumps) == len(leaf_ids)
target_feature_weights = partial(
_target_feature_weights,
feature_names=feature_names, xgb_feature_names=xgb_feature_names)
if n_targets > 1:
# For multiclass, XGBoost stores dumps and leaf_ids in a 1d array,
# so we need to split them.
scores_weights = [
target_feature_weights(
leaf_ids[target_idx::n_targets],
tree_dumps[target_idx::n_targets],
) for target_idx in range(n_targets)]
else:
scores_weights = [target_feature_weights(leaf_ids, tree_dumps)]
return scores_weights
| 5,405 |
def group_by_repo(repository_full_name_column_name: str,
repos: Sequence[Collection[str]],
df: pd.DataFrame,
) -> List[np.ndarray]:
"""Group items by the value of their "repository_full_name" column."""
if df.empty:
return [np.array([], dtype=int)] * len(repos)
df_repos = df[repository_full_name_column_name].values.astype("S")
repos = [
np.array(repo_group if not isinstance(repo_group, set) else list(repo_group), dtype="S")
for repo_group in repos
]
unique_repos, imap = np.unique(np.concatenate(repos), return_inverse=True)
if len(unique_repos) <= len(repos):
matches = np.array([df_repos == repo for repo in unique_repos])
pos = 0
result = []
for repo_group in repos:
step = len(repo_group)
cols = imap[pos:pos + step]
group = np.flatnonzero(np.sum(matches[cols], axis=0, dtype=bool))
pos += step
result.append(group)
else:
result = [
np.flatnonzero(np.in1d(df_repos, repo_group))
for repo_group in repos
]
return result
| 5,406 |
def test_count_complete(opt, server_url):
"""Starts two worlds even though only one is requested by using the
count_complete flag.
"""
global completed_threads
print('{} Starting'.format(COUNT_COMPLETE_TEST))
opt['task'] = COUNT_COMPLETE_TEST
opt['count_complete'] = True
opt['num_conversations'] = 1
hit_id = FAKE_HIT_ID.format(COUNT_COMPLETE_TEST)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(COUNT_COMPLETE_TEST, 1)
worker_id_1 = FAKE_WORKER_ID.format(COUNT_COMPLETE_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(COUNT_COMPLETE_TEST, 2)
worker_id_2 = FAKE_WORKER_ID.format(COUNT_COMPLETE_TEST, 2)
last_command = None
message_num_1 = 0
message_num_2 = 0
expected_messages = [TestSoloWorld.TEST_TEXT_1, TestSoloWorld.TEST_TEXT_2]
mturk_agent_id = AGENT_1_ID
mturk_manager = MTurkManager(opt=opt,
mturk_agent_ids=[mturk_agent_id],
is_test=True)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_solo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# Create an agent and set it up to connect
def msg_callback_1(packet):
nonlocal last_command
nonlocal message_num_1
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
else:
assert packet.data['text'] == expected_messages[message_num_1], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num_1],
message_num_1,
packet.data['text']
)
message_num_1 += 1
test_agent_1 = \
MockAgent(opt, hit_id, assign_id_1, worker_id_1, task_group_id)
message_handler = \
make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1)
test_agent_1.setup_socket(server_url, message_handler)
test_agent_1.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_1, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
# Run through onboarding
test_agent_1.always_beat = True
test_agent_1.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run through task
assert test_agent_1.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
test_agent_1.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_IN_TASK)
assert len(assign_state_1.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_1.messages))
# Start the second agent while the first is still waiting
def msg_callback_2(packet):
nonlocal last_command
nonlocal message_num_2
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
else:
assert packet.data['text'] == expected_messages[message_num_2], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num_2],
message_num_2,
packet.data['text']
)
message_num_2 += 1
test_agent_2 = \
MockAgent(opt, hit_id, assign_id_2, worker_id_2, task_group_id)
message_handler = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2)
test_agent_2.setup_socket(server_url, message_handler)
test_agent_2.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_2, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
# Run through onboarding
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run through task
assert test_agent_2.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
test_agent_2.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_2.status, AssignState.STATUS_IN_TASK)
assert len(assign_state_2.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_2.messages))
test_agent_2.send_message('Hello2', dummy)
test_agent_2.always_beat = False
# Finish agent 1's task
test_agent_1.send_message('Hello2', dummy)
test_agent_1.always_beat = False
wait_for_state_time(2, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
# Wait for both to disconnect
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
assert len(assign_state_1.messages) == 0, \
'Messages were not cleared upon completion of the task'
assert len(assign_state_2.messages) == 0, \
'Messages were not cleared upon completion of the task'
assert mturk_manager_assign_1.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_2.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager.started_conversations == 2, \
'At least one conversation wasn\'t successfully logged'
assert mturk_manager.completed_conversations == 2, \
'At least one conversation wasn\'t successfully logged'
assert message_num_1 == 2, 'Not all messages were successfully processed'
assert message_num_2 == 2, 'Not all messages were successfully processed'
completed_threads[COUNT_COMPLETE_TEST] = True
pass
| 5,407 |
def _row_or_col_is_header(s_count, v_count):
"""
Utility function for subdivide
Heuristic for whether a row/col is a header or not.
"""
if s_count == 1 and v_count == 1:
return False
else:
return (s_count + 1) / (v_count + s_count + 1) >= 2. / 3.
| 5,408 |
def remove_tau(aut):
"""
Modify automaton in-place, removing the 'tau' event.
Will fail if there is more than one otgoing 'tau' event.
@param aut: Existing automaton.
@type aut: L{BaseAutomaton}
"""
coll = aut.collection
tau_event = coll.events['tau']
# Count edges from initial state
tau_edge_count = 0 #: Number of 'tau' edges.
tau_edge_dest = None #: A destination of a 'tau' edge.
for edge in aut.initial.get_outgoing():
assert edge.label is tau_event
tau_edge_count = tau_edge_count + 1
tau_edge_dest = edge.succ
if tau_edge_count > 1:
msg = "Cannot remove 'tau' event, there are %d 'tau' edges from " \
"initial state while expected exactly one." % tau_edge_count
raise exceptions.ModelError(msg)
assert tau_edge_dest != aut.initial
aut.remove_state(aut.initial)
aut.set_initial(tau_edge_dest)
# Check there are no other 'tau' events in the automaton.
for state in aut.get_states():
for edge in state.get_outgoing():
assert edge.label is not tau_event
aut.alphabet.remove(tau_event)
| 5,409 |
def test_can_change_vport_attributes(vports):
"""
vport attributes can be changed from client
:given: session: a couple of vports
:when: perform vport attributes operation
:then: should edit a vport
"""
vport_1,vport_2 = vports
vport_1.TxMode = 'sequential'
vport_1.TransmitIgnoreLinkStatus=True
vport_1.L1Config.Ethernet.Media = 'fiber'
vport_1.L1Config.Ethernet.AutoNegotiate = False
vport_1.L1Config.CurrentType = 'krakenFourHundredGigLan'
| 5,410 |
def remove_deb_packages(packages):
"""Remove debian packages listed in space-separated string"""
print ' ---- Remove debian packages ---- \n', packages, '\n'
sudo('apt-get -y remove %s' % (packages))
| 5,411 |
def values_hash(array, step=0):
"""
Return consistent hash of array values
:param array array: (n,) array with or without structure
:param uint64 step: optional step number to modify hash values
:returns: (n,) uint64 array
"""
cls, cast_dtype, view_dtype = _get_optimal_cast(array)
array = cls._cast(array, cast_dtype, view_dtype)
return cls._hash(array, UINT64(step))
| 5,412 |
async def get_molecule_image(optimization_id: str):
"""Render the molecule associated with a particular bespoke optimization to an
SVG file."""
task = _get_task(optimization_id=optimization_id)
svg_content = smiles_to_image(urllib.parse.unquote(task.input_schema.smiles))
svg_response = Response(svg_content, media_type="image/svg+xml")
return svg_response
| 5,413 |
def total_variation_divergence(logits, targets, reduction='mean'):
"""
Loss.
:param logits: predicted logits
:type logits: torch.autograd.Variable
:param targets: target distributions
:type targets: torch.autograd.Variable
:param reduction: reduction type
:type reduction: str
:return: error
:rtype: torch.autograd.Variable
"""
assert len(list(logits.size())) == len(list(targets.size()))
assert logits.size()[0] == targets.size()[0]
assert logits.size()[1] == targets.size()[1]
assert logits.size()[1] > 1
divergences = torch.sum(
torch.abs(torch.nn.functional.softmax(logits, dim=1) - targets), dim=1)
if reduction == 'mean':
return torch.mean(divergences)
elif reduction == 'sum':
return torch.sum(divergences)
else:
return divergences
| 5,414 |
def initialize_parameters(n_in, n_out, ini_type='plain'):
"""
Helper function to initialize some form of random weights and Zero biases
Args:
n_in: size of input layer
n_out: size of output/number of neurons
ini_type: set initialization type for weights
Returns:
params: a dictionary containing W and b
"""
params = dict() # initialize empty dictionary of neural net parameters W and b
if ini_type == 'plain':
params['W'] = np.random.randn(n_out, n_in) *0.01 # set weights 'W' to small random gaussian
elif ini_type == 'xavier':
params['W'] = np.random.randn(n_out, n_in) / (np.sqrt(n_in)) # set variance of W to 1/n
elif ini_type == 'he':
# Good when ReLU used in hidden layers
# Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification
# Kaiming He et al. (https://arxiv.org/abs/1502.01852)
# http: // cs231n.github.io / neural - networks - 2 / # init
params['W'] = np.random.randn(n_out, n_in) * np.sqrt(2/n_in) # set variance of W to 2/n
params['b'] = np.zeros((n_out, 1)) # set bias 'b' to zeros
return params
| 5,415 |
def logcdf(samples, data, prior_bounds, weights=None, direction=DEFAULT_CUMULATIVE_INTEGRAL_DIRECTION, num_proc=DEFAULT_NUM_PROC):
"""estimates the log(cdf) at all points in samples based on data and integration in "direction".
Does this directly by estimating the CDF from the weighted samples WITHOUT building a KDE"""
### this should be relatively quick (just an ordered summation), so we do it once
data, cweights = stats.samples2cdf(data, weights=weights)
if direction=='increasing':
pass ### we already integrate up from the lower values to higher values
elif direction=='decreasing':
cweights = 1. - cweights ### reverse the order of the integral
else:
raise ValueError('direction=%s not understood!'%direction)
logcdfs = np.empty(len(samples), dtype=float)
if num_proc==1: ### do everything on this one core
logcdfs[:] = _logcdf_worker(samples, data, cweights, prior_bounds)
else: ### parallelize
# partition work amongst the requested number of cores
Nsamp = len(samples)
sets = _define_sets(Nsamp, num_proc)
# set up and launch processes.
procs = []
for truth in sets:
conn1, conn2 = mp.Pipe()
proc = mp.Process(target=_logcdf_worker, args=(samples[truth], data, cweights, prior_bounds), kwargs={'conn':conn2})
proc.start()
procs.append((proc, conn1))
conn2.close()
# read in results from process
for truth, (proci, conni) in zip(sets, procs):
proci.join() ### should clean up child...
logcdfs[truth] = conni.recv()
return logcdfs
| 5,416 |
def gen_build_rules(generator):
"""
Generate yocto build rules for ninja
"""
# Create build dir by calling poky/oe-init-build-env script
cmd = " && ".join([
"cd $yocto_dir",
"source poky/oe-init-build-env $work_dir",
])
generator.rule("yocto_init_env",
command=f'bash -c "{cmd}"',
description="Initialize Yocto build environment")
generator.newline()
# Add bitbake layers by calling bitbake-layers script
cmd = " && ".join([
"cd $yocto_dir",
"source poky/oe-init-build-env $work_dir",
"bitbake-layers add-layer $layers",
"touch $out",
])
generator.rule("yocto_add_layers",
command=f'bash -c "{cmd}"',
description="Add yocto layers",
pool="console")
generator.newline()
# Append local.conf with our own configuration
cmd = " && ".join([
"cd $yocto_dir",
"for x in $conf; do echo $$x >> $work_dir/conf/local.conf; done",
"touch $out",
])
generator.rule("yocto_update_conf",
command=cmd,
description="Update local.conf")
generator.newline()
# Invoke bitbake. This rule uses "console" pool so we can see the bitbake output.
cmd = " && ".join([
"cd $yocto_dir",
"source poky/oe-init-build-env $work_dir",
"bitbake $target",
])
generator.rule("yocto_build",
command=f'bash -c "{cmd}"',
description="Yocto Build: $name",
pool="console")
| 5,417 |
def post_gist(description, files):
"""Post a gist of the analysis"""
username, password = get_auth()
sess = requests.Session()
sess.auth = (username, password)
params = {
'description': description,
'files': files,
'public': False,
}
headers = {
'Content-Type': 'application/json',
'Accept': '*/*',
'User-Agent': 'stolaf-cs-toolkit/v1',
}
req = sess.post('https://api.github.com/gists',
headers=headers,
data=json.dumps(params))
result = req.json()
return result.get('html_url', '"' + result.get('message', 'Error') + '"')
| 5,418 |
def dlna_handle_notify_last_change(state_var):
"""
Handle changes to LastChange state variable.
This expands all changed state variables in the LastChange state variable.
Note that the callback is called twice:
- for the original event;
- for the expanded event, via this function.
"""
if state_var.name != 'LastChange':
raise UpnpError('Call this only on state variable LastChange')
service = state_var.service
changed_state_variables = []
el_event = ET.fromstring(state_var.value)
_LOGGER.debug("Event payload: %s" % state_var.value)
for el_instance in el_event:
if not el_instance.tag.endswith("}InstanceID"):
continue
if el_instance.attrib['val'] != '0':
_LOGGER.warning('Only InstanceID 0 is supported')
continue
for el_state_var in el_instance:
name = el_state_var.tag.split('}')[1]
state_var = service.state_variable(name)
if state_var is None:
_LOGGER.debug("State variable %s does not exist, ignoring", name)
continue
value = el_state_var.attrib['val']
try:
state_var.upnp_value = value
except vol.error.MultipleInvalid:
_LOGGER.error('Got invalid value for %s: %s', state_var, value)
changed_state_variables.append(state_var)
service.notify_changed_state_variables(changed_state_variables)
| 5,419 |
def compute_exposure_params(reference, tone_mapper="aces", t_max=0.85, t_min=0.85):
"""
Computes start and stop exposure for HDR-FLIP based on given tone mapper and reference image.
Refer to the Visualizing Errors in Rendered High Dynamic Range Images
paper for details about the formulas
:param reference: float tensor (with CxHxW layout) containing reference image (nonnegative values)
:param tone_mapper: (optional) string describing the tone mapper assumed by HDR-FLIP
:param t_max: (optional) float describing the t value used to find the start exposure
:param t_max: (optional) float describing the t value used to find the stop exposure
:return: two floats describing start and stop exposure, respectively, to use for HDR-FLIP
"""
if tone_mapper == "reinhard":
k0 = 0
k1 = 1
k2 = 0
k3 = 0
k4 = 1
k5 = 1
x_max = t_max * k5 / (k1 - t_max * k4)
x_min = t_min * k5 / (k1 - t_min * k4)
elif tone_mapper == "hable":
# Source: https://64.github.io/tonemapping/
A = 0.15
B = 0.50
C = 0.10
D = 0.20
E = 0.02
F = 0.30
k0 = A * F - A * E
k1 = C * B * F - B * E
k2 = 0
k3 = A * F
k4 = B * F
k5 = D * F * F
W = 11.2
nom = k0 * np.power(W, 2) + k1 * W + k2
denom = k3 * np.power(W, 2) + k4 * W + k5
white_scale = denom / nom # = 1 / (nom / denom)
# Include white scale and exposure bias in rational polynomial coefficients
k0 = 4 * k0 * white_scale
k1 = 2 * k1 * white_scale
k2 = k2 * white_scale
k3 = 4 * k3
k4 = 2 * k4
#k5 = k5 # k5 is not changed
c0 = (k1 - k4 * t_max) / (k0 - k3 * t_max)
c1 = (k2 - k5 * t_max) / (k0 - k3 * t_max)
x_max = - 0.5 * c0 + np.sqrt(((0.5 * c0) ** 2) - c1)
c0 = (k1 - k4 * t_min) / (k0 - k3 * t_min)
c1 = (k2 - k5 * t_min) / (k0 - k3 * t_min)
x_min = - 0.5 * c0 + np.sqrt(((0.5 * c0) ** 2) - c1)
else: #tone_mapper == "aces":
# Source: ACES approximation: https://knarkowicz.wordpress.com/2016/01/06/aces-filmic-tone-mapping-curve/
# Include pre-exposure cancelation in constants
k0 = 0.6 * 0.6 * 2.51
k1 = 0.6 * 0.03
k2 = 0
k3 = 0.6 * 0.6 * 2.43
k4 = 0.6 * 0.59
k5 = 0.14
c0 = (k1 - k4 * t_max) / (k0 - k3 * t_max)
c1 = (k2 - k5 * t_max) / (k0 - k3 * t_max)
x_max = - 0.5 * c0 + np.sqrt(((0.5 * c0) ** 2) - c1)
c0 = (k1 - k4 * t_min) / (k0 - k3 * t_min)
c1 = (k2 - k5 * t_min) / (k0 - k3 * t_min)
x_min = - 0.5 * c0 + np.sqrt(((0.5 * c0) ** 2) - c1)
# Convert reference to luminance
lum_coeff_r = 0.2126
lum_coeff_g = 0.7152
lum_coeff_b = 0.0722
Y_reference = reference[0:1, :, :] * lum_coeff_r + reference[1:2, :, :] * lum_coeff_g + reference[2:3, :, :] * lum_coeff_b
# Compute start exposure
Y_hi = np.amax(Y_reference)
if Y_hi == 0:
return 0, 0
start_exposure = np.log2(x_max / Y_hi)
# Compute stop exposure
Y_lo = np.percentile(Y_reference, 50)
stop_exposure = np.log2(x_min / Y_lo)
return start_exposure, stop_exposure
| 5,420 |
def gen_filenames(only_new=False):
"""Returns a list of filenames referenced in sys.modules and translation
files.
"""
global _cached_modules, _cached_filenames
module_values = set(module_white_list())
if _cached_modules == module_values:
# No changes in module list, short-circuit the function
if only_new:
return []
else:
return _cached_filenames
new_modules = module_values - _cached_modules
new_filenames = [filename.__file__ for filename in new_modules
if hasattr(filename, '__file__')]
if only_new:
filelist = new_filenames
else:
filelist = _cached_filenames + new_filenames + _error_files
filenames = []
for filename in filelist:
if not filename:
continue
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
if os.path.exists(filename):
filenames.append(filename)
_cached_modules = _cached_modules.union(new_modules)
_cached_filenames += new_filenames
return filenames
| 5,421 |
def scores_generic_graph(
num_vertices: int,
edges: NpArrayEdges,
weights: NpArrayEdgesFloat,
cond: Literal["or", "both", "out", "in"] = "or",
is_directed: bool = False,
) -> NpArrayEdgesFloat:
"""
Args:
num_vertices: int
number ofvertices
edges: np.array
edges
weights: np.array
edge weights
cond: str
"out", "in", "both", "or"
Returns:
np.array:
**alphas** edge scores
"""
w_adj, adj = construct_sp_matrices(
weights, edges, num_vertices, is_directed=is_directed
)
def calc_degree(adj: Any, i: int) -> NpArrayEdgesFloat:
return np.asarray(adj.sum(axis=i)).flatten().astype(np.float64)
iin = edges[:, 1]
iout = edges[:, 0]
wdegree_out = calc_degree(w_adj, 0)[iout]
degree_out = calc_degree(adj, 0)[iout]
wdegree_in = calc_degree(w_adj, 1)[iin]
degree_in = calc_degree(adj, 1)[iin]
if cond == "out":
alphas = stick_break_scores(wdegree_out, degree_out, edges, weights)
elif cond == "in":
alphas = stick_break_scores(wdegree_in, degree_in, edges, weights)
else:
alphas_out = stick_break_scores(wdegree_out, degree_out, edges, weights)
alphas_in = stick_break_scores(wdegree_in, degree_in, edges, weights)
if cond == "both":
alphas = np.maximum(alphas_out, alphas_in)
elif cond == "or":
alphas = np.minimum(alphas_out, alphas_in)
return alphas
| 5,422 |
def handle_storage_class(vol):
"""
vol: dict (send from the frontend)
If the fronend sent the special values `{none}` or `{empty}` then the
backend will need to set the corresponding storage_class value that the
python client expects.
"""
if "class" not in vol:
return None
if vol["class"] == "{none}":
return ""
if vol["class"] == "{empty}":
return None
else:
return vol["class"]
| 5,423 |
def spherical_to_cartesian(radius, theta, phi):
""" Convert from spherical coordinates to cartesian.
Parameters
-------
radius: float
radial coordinate
theta: float
axial coordinate
phi: float
azimuthal coordinate
Returns
-------
list: cartesian vector
"""
cartesian = [radius * np.sin(theta) * np.cos(phi), radius * np.sin(theta) * np.sin(phi), radius * np.cos(theta)]
return cartesian
| 5,424 |
def compute_mrcnn_bbox_loss(mrcnn_target_deltas, mrcnn_pred_deltas, target_class_ids):
"""
:param mrcnn_target_deltas: (n_sampled_rois, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param mrcnn_pred_deltas: (n_sampled_rois, n_classes, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param target_class_ids: (n_sampled_rois)
:return: loss: torch 1D tensor.
"""
if 0 not in torch.nonzero(target_class_ids > 0).size():
positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_roi_class_ids = target_class_ids[positive_roi_ix].long()
target_bbox = mrcnn_target_deltas[positive_roi_ix, :].detach()
pred_bbox = mrcnn_pred_deltas[positive_roi_ix, positive_roi_class_ids, :]
loss = F.smooth_l1_loss(pred_bbox, target_bbox)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
| 5,425 |
def _sharpness(prediction):
"""TODO: Implement for discrete inputs as entropy."""
_, chol_std = prediction
scale = torch.diagonal(chol_std, dim1=-1, dim2=-2)
return scale.square().mean()
| 5,426 |
def db_get_property_info(cls, prop: str):
"""
:param cls:
:param prop:
:return:
"""
objects_str = [getattr(obj, prop) for obj in cls.query.all()]
print("NUMBER OF OBJECTS: %s" % len(objects_str))
maxi = max(objects_str, key=len)
print("MAX LENGTH: %s (for '%s')" % (len(maxi), maxi))
mini = min(objects_str, key=len)
print("MIN LENGTH: %s (for '%s')" % (len(mini), mini))
| 5,427 |
def dummy_sgs(dummies, sym, n):
"""
Return the strong generators for dummy indices
Parameters
==========
dummies : list of dummy indices
`dummies[2k], dummies[2k+1]` are paired indices
sym : symmetry under interchange of contracted dummies::
* None no symmetry
* 0 commuting
* 1 anticommuting
n : number of indices
in base form the dummy indices are always in consecutive positions
Examples
========
>>> from sympy.combinatorics.tensor_can import dummy_sgs
>>> dummy_sgs(range(2, 8), 0, 8)
[[0, 1, 3, 2, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 5, 4, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 7, 6, 8, 9], [0, 1, 4, 5, 2, 3, 6, 7, 8, 9],
[0, 1, 2, 3, 6, 7, 4, 5, 8, 9]]
"""
if len(dummies) > n:
raise ValueError("List too large")
res = []
# exchange of contravariant and covariant indices
if sym is not None:
for j in dummies[::2]:
a = list(range(n + 2))
if sym == 1:
a[n] = n + 1
a[n + 1] = n
a[j], a[j + 1] = a[j + 1], a[j]
res.append(a)
# rename dummy indices
for j in dummies[:-3:2]:
a = list(range(n + 2))
a[j:j + 4] = a[j + 2], a[j + 3], a[j], a[j + 1]
res.append(a)
return res
| 5,428 |
def _save_student_model(net, model_prefix):
"""
save student model if the net is the network contains student
"""
student_model_prefix = model_prefix + "_student.pdparams"
if hasattr(net, "_layers"):
net = net._layers
if hasattr(net, "student"):
paddle.save(net.student.state_dict(), student_model_prefix)
logger.info("Already save student model in {}".format(
student_model_prefix))
| 5,429 |
def mc_tracing(func):
"""
This decorator is used below and logs certain statistics about the formula evaluation.
It measures execution time, and how many nodes are found that statisfy each subformula.
"""
@wraps(func)
def wrapper(*args):
formula = args[1]
start = time.time()
logger.debug(f"{func.__name__} for formula {str(formula)}")
retval = func(*args)
logger.debug(f"{func.__name__} found {len(retval)} nodes (in {time.time() - start} seconds) for formula {str(formula)}")
# assert isinstance(retval, (set, list, nx.classes.reportviews.NodeView)), f"{func.__name__} did not return a set, list or nx.NodeView, but a {type(retval)}"
return retval
return wrapper
| 5,430 |
def test_vt100_cursor_movements(text: Text, expected: list[Instruction[Attribute]]):
"""Ensure the classical VT100 cursor movements are supported."""
assert _instr(text) == expected
| 5,431 |
def game():
"""
Five-guess algorithm steps are directly from the Mastermind wikipedia page:
https://en.wikipedia.org/wiki/Mastermind_(board_game)#Five-guess_algorithm
"""
# 1. Create the set S of 1296 possible codes
# (1111, 1112 ... 6665, 6666)
possible_codes = init_possible_codes.copy()
guesses = set()
# 2. Start with initial guess 1122
turn_num = 1
guess = (1, 1, 2, 2)
while True:
# 3. Play the guess to get a response of coloured and white pegs.
guesses.add(guess)
fb = turn(guess, turn_num)
# 4. If the response is four colored pegs,
# the game is won, the algorithm terminates.
if fb.blacks == 4:
print()
break
# 5. Otherwise, remove from S any code that would not give the same
# response if it (the guess) were the code.
possible_codes = reduce_possible_codes(possible_codes, guess, fb)
# 6. Apply minimax technique to find a next guess.
guess = next_guess(possible_codes, guesses)
# 7. Repeat from step 3
turn_num += 1
| 5,432 |
def test_list_decimal_length_1_nistxml_sv_iv_list_decimal_length_2_2(mode, save_output, output_format):
"""
Type list/decimal is restricted by facet length with value 6.
"""
assert_bindings(
schema="nistData/list/decimal/Schema+Instance/NISTSchema-SV-IV-list-decimal-length-2.xsd",
instance="nistData/list/decimal/Schema+Instance/NISTXML-SV-IV-list-decimal-length-2-2.xml",
class_name="NistschemaSvIvListDecimalLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,433 |
def test_intersection_with_stability_selection_one_threshold():
"""Tests whether intersection correctly performs a soft intersection."""
coefs = np.array([
[[2, 1, -1, 0, 4],
[4, 0, 2, -1, 5],
[1, 2, 3, 4, 5]],
[[2, 0, 0, 0, 0],
[3, 1, 1, 0, 3],
[6, 7, 8, 9, 10]],
[[2, 0, 0, 0, 0],
[2, -1, 3, 0, 2],
[2, 4, 6, 8, 9]]])
true_intersection = np.array([
[True, False, False, False, False],
[True, True, True, False, True],
[True, True, True, True, True]])
selection_thresholds = np.array([2])
estimated_intersection = intersection(
coefs=coefs,
selection_thresholds=selection_thresholds)
# we sort the supports since they might not be in the same order
assert_array_equal(
np.sort(true_intersection, axis=0),
np.sort(estimated_intersection, axis=0))
| 5,434 |
def medview_imaging_interface_demo():
"""
Demo interface used to demo the program
Can be used to access the MedView user enroller system
and the login system
"""
while True:
print('-----------------------------------------------------')
print('MedView Imaging System Demo')
print('Please choose an action by selecting the number only:')
print('1. MedView user enrolement system')
print('2. MedView user login system')
print('3. Quit Demo')
action = input('Enter an action: ')
if action == '1':
user_enrolment_interface()
elif action == '2':
login_interface()
elif action == '3':
print('Thank you for using MedView Imaging System Demo, Goodbye')
print('-----------------------------------------------------')
break
| 5,435 |
def get_uniform_comparator(comparator):
""" convert comparator alias to uniform name
"""
if comparator in ["eq", "equals", "==", "is"]:
return "equals"
elif comparator in ["lt", "less_than"]:
return "less_than"
elif comparator in ["le", "less_than_or_equals"]:
return "less_than_or_equals"
elif comparator in ["gt", "greater_than"]:
return "greater_than"
elif comparator in ["ge", "greater_than_or_equals"]:
return "greater_than_or_equals"
elif comparator in ["ne", "not_equals"]:
return "not_equals"
elif comparator in ["str_eq", "string_equals"]:
return "string_equals"
elif comparator in ["len_eq", "length_equals", "count_eq"]:
return "length_equals"
elif comparator in ["len_gt", "count_gt", "length_greater_than", "count_greater_than"]:
return "length_greater_than"
elif comparator in ["len_ge", "count_ge", "length_greater_than_or_equals", \
"count_greater_than_or_equals"]:
return "length_greater_than_or_equals"
elif comparator in ["len_lt", "count_lt", "length_less_than", "count_less_than"]:
return "length_less_than"
elif comparator in ["len_le", "count_le", "length_less_than_or_equals", \
"count_less_than_or_equals"]:
return "length_less_than_or_equals"
else:
return comparator
| 5,436 |
def project_list(config, server):
"""プロジェクト一覧を表示する"""
server_, entity_bucket = build_entity_bucket(config, server)
repos_factory = build_repository_factory(server_)
command.project_list(
project_repository=repos_factory.create_project_repository()
)
| 5,437 |
def test_conll2000_dataset_get_datasetsize():
"""
Feature: CoNLL2000ChunkingDataset.
Description: test param check of CoNLL2000ChunkingDataset.
Expectation: throw correct error and message.
"""
data = ds.CoNLL2000Dataset(DATA_DIR, usage="test", shuffle=False)
size = data.get_dataset_size()
assert size == 12
| 5,438 |
def inorder_traversal(root):
"""Function to traverse a binary tree inorder
Args:
root (Node): The root of a binary tree
Returns:
(list): List containing all the values of the tree from an inorder search
"""
res = []
if root:
res = inorder_traversal(root.left)
res.append(root.data)
res = res + inorder_traversal(root.right)
return res
| 5,439 |
def unlock():
"""Releases a lock file.
Raises:
OSError: If lock file cannot be released.
"""
if os.path.isfile(LOCK_FILE):
os.remove(LOCK_FILE)
Log.info("Removed temporary lock")
| 5,440 |
def test_search_print_results_should_contain_latest_versions(caplog):
"""
Test that printed search results contain the latest package versions
"""
hits = [
{
'name': 'testlib1',
'summary': 'Test library 1.',
'versions': ['1.0.5', '1.0.3']
},
{
'name': 'testlib2',
'summary': 'Test library 1.',
'versions': ['2.0.1', '2.0.3']
}
]
print_results(hits)
log_messages = sorted([r.getMessage() for r in caplog.records])
assert log_messages[0].startswith('testlib1 (1.0.5)')
assert log_messages[1].startswith('testlib2 (2.0.3)')
| 5,441 |
def get_extractor_metadata(clowder_md, extractor_name, extractor_version=None):
"""Crawl Clowder metadata object for particular extractor metadata and return if found.
If extractor_version specified, returned metadata must match."""
for sub_metadata in clowder_md:
if 'agent' in sub_metadata:
agent_data = sub_metadata['agent']
if 'name' in agent_data and agent_data['name'].find(extractor_name) > -1:
if not extractor_version:
return sub_metadata['content']
else:
# TODO: Eventually check this in preferred way
if 'extractor_version' in sub_metadata['content']:
existing_ver = str(sub_metadata['content']['extractor_version'])
if existing_ver == extractor_version:
return sub_metadata['content']
return None
| 5,442 |
def intensity_scale(X_f, X_o, name, thrs, scales=None, wavelet="Haar"):
"""
Compute an intensity-scale verification score.
Parameters
----------
X_f: array_like
Array of shape (m, n) containing the forecast field.
X_o: array_like
Array of shape (m, n) containing the verification observation field.
name: string
A string indicating the name of the spatial verification score
to be used:
+------------+--------------------------------------------------------+
| Name | Description |
+============+========================================================+
| FSS | Fractions skill score |
+------------+--------------------------------------------------------+
| BMSE | Binary mean squared error |
+------------+--------------------------------------------------------+
thrs: float or array_like
Scalar or 1-D array of intensity thresholds for which to compute the
verification.
scales: float or array_like, optional
Scalar or 1-D array of spatial scales in pixels,
required if ``name="FSS"``.
wavelet: str, optional
The name of the wavelet function to use in the BMSE.
Defaults to the Haar wavelet, as described in Casati et al. 2004.
See the documentation of PyWavelets for a list of available options.
Returns
-------
out: array_like
The two-dimensional array containing the intensity-scale skill scores
for each spatial scale and intensity threshold.
References
----------
:cite:`CRS2004`, :cite:`RL2008`, :cite:`EWWM2013`
See also
--------
pysteps.verification.spatialscores.binary_mse,
pysteps.verification.spatialscores.fss
"""
intscale = intensity_scale_init(name, thrs, scales, wavelet)
intensity_scale_accum(intscale, X_f, X_o)
return intensity_scale_compute(intscale)
| 5,443 |
def make_count(bits, default_count=50):
"""
Return items count from URL bits if last bit is positive integer.
>>> make_count(['Emacs'])
50
>>> make_count(['20'])
20
>>> make_count(['бред', '15'])
15
"""
count = default_count
if len(bits) > 0:
last_bit = bits[len(bits)-1]
if last_bit.isdigit():
count = int(last_bit)
return count
| 5,444 |
def synthesize_ntf_minmax(order=32, osr=32, H_inf=1.5, f0=0, zf=False,
**options):
"""
Alias of :func:`ntf_fir_minmax`
.. deprecated:: 0.11.0
Function is now available from the :mod:`NTFdesign` module with
name :func:`ntf_fir_minmax`
"""
warn("Function superseded by ntf_fir_minmax in "
"NTFdesign module", PyDsmDeprecationWarning)
return ntf_fir_minmax(order, osr, H_inf, f0, zf, **options)
| 5,445 |
def r2f(value):
"""
converts temperature in R(degrees Rankine) to F(degrees Fahrenheit)
:param value: temperature in R(degrees Rankine)
:return: temperature in F(degrees Fahrenheit)
"""
return const.convert_temperature(value, 'R', 'F')
| 5,446 |
def test_load_translations_files(hass):
"""Test the load translation files function."""
# Test one valid and one invalid file
file1 = hass.config.path(
'custom_components', 'switch', '.translations', 'test.en.json')
file2 = hass.config.path(
'custom_components', 'switch', '.translations', 'invalid.json')
assert translation.load_translations_files({
'switch.test': file1,
'invalid': file2
}) == {
'switch.test': {
'state': {
'string1': 'Value 1',
'string2': 'Value 2',
}
},
'invalid': {},
}
| 5,447 |
def summary(
infile, outfile, max_taxa_per_query,
taxdump, chunksize=100000, threads=1):
"""
Parses merged output from binning process into a csv with
rows that have more than max_taxa_per_query hits removed.
Returns a list of unique taxa that are present in the remaining rows.
"""
logging.info(
"Parsing merged binning file: {}".format(
os.path.abspath(infile)))
ncbi = get_ete_ncbi(taxdump)
summary_table = parse_merged_file_in_chunks(
taxdump, infile, chunksize, max_taxa_per_query, threads)
summary_table = format_summary_table(summary_table, ncbi)
summary_table.to_csv(outfile, index=False, float_format="%.0f")
logging.info(
"Finished writing summary to: {}".format(
os.path.abspath(outfile)))
| 5,448 |
def get_currently_playing_track():
"""Returns currently playing track as a file
No request params.
"""
try:
pt, _, _ = Track.get_currently_playing()
path = pt.track.path
return send_file( os.path.join( '..', path ) )
except DoesNotExist:
return error_response( 'Nije moguće dohvatiti trenutno svirani zapis: Trenutno se ne emitira ništa.', 404 )
except IndexError:
return error_response( 'Nije moguće dohvatiti trenutno svirani zapis: Lista za reprodukciju je završila prije vremena.', 404 )
except:
return error_response( 'Nije moguće dohvatiti trenutno svirani zapis.', 404 )
| 5,449 |
def run(env_id, seed, noise_type, layer_norm, evaluation, **kwargs):
"""
run the training of DDPG
:param env_id: (str) the environment ID
:param seed: (int) the initial random seed
:param noise_type: (str) the wanted noises ('adaptive-param', 'normal' or 'ou'), can use multiple noise type by
seperating them with commas
:param layer_norm: (bool) use layer normalization
:param evaluation: (bool) enable evaluation of DDPG training
:param kwargs: (dict) extra keywords for the training.train function
"""
# Configure things.
rank = MPI.COMM_WORLD.Get_rank()
if rank != 0:
logger.set_level(logger.DISABLED)
# Create envs.
env = gym.make(env_id)
env = bench.Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))
if evaluation and rank == 0:
eval_env = gym.make(env_id)
eval_env = bench.Monitor(eval_env, os.path.join(logger.get_dir(), 'gym_eval'))
env = bench.Monitor(env, None)
else:
eval_env = None
# Parse noise_type
action_noise = None
param_noise = None
nb_actions = env.action_space.shape[-1]
for current_noise_type in noise_type.split(','):
current_noise_type = current_noise_type.strip()
if current_noise_type == 'none':
pass
elif 'adaptive-param' in current_noise_type:
_, stddev = current_noise_type.split('_')
param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(stddev), desired_action_stddev=float(stddev))
elif 'normal' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = NormalActionNoise(mean=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))
elif 'ou' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(nb_actions),
sigma=float(stddev) * np.ones(nb_actions))
else:
raise RuntimeError('unknown noise type "{}"'.format(current_noise_type))
# Seed everything to make things reproducible.
seed = seed + 1000000 * rank
logger.info('rank {}: seed={}, logdir={}'.format(rank, seed, logger.get_dir()))
tf.reset_default_graph()
set_global_seeds(seed)
env.seed(seed)
if eval_env is not None:
eval_env.seed(seed)
# Disable logging for rank != 0 to avoid noise.
start_time = 0
if rank == 0:
start_time = time.time()
if layer_norm:
policy = LnMlpPolicy
else:
policy = MlpPolicy
num_timesteps = kwargs['num_timesteps']
del kwargs['num_timesteps']
model = DDPG(policy=policy, env=env, memory_policy=Memory, eval_env=eval_env, param_noise=param_noise,
action_noise=action_noise, memory_limit=int(1e6), verbose=2, **kwargs)
model.learn(total_timesteps=num_timesteps)
env.close()
if eval_env is not None:
eval_env.close()
if rank == 0:
logger.info('total runtime: {}s'.format(time.time() - start_time))
| 5,450 |
def get_statistics_percentiles(d_min, stats):
"""
For a given set of statistics, determine their percentile ranking compared
to other crystal structures at similar resolution.
"""
if (d_min is None):
return dict([ (s, None) for s in stats.keys() ])
try :
db = load_db()
except Exception as e :
return {}
d_min_mvd = flex.double([ float(x) for x in db['high_resolution'] ])
sel_perm = flex.sort_permutation(d_min_mvd)
d_min_mvd = d_min_mvd.select(sel_perm)
def find_value_in_list(values, value):
i = 0
j = len(values) - 1
while (i != j):
k = i + (j - i) // 2
if (value and value <= values[k]):
j = k
else :
i = k + 1
return i
index = find_value_in_list(d_min_mvd, d_min)
sel_around = flex.bool(d_min_mvd.size(), False)
index_tmp = index
while (index_tmp > 0):
d_min_other = d_min_mvd[index_tmp]
if (d_min_other < d_min - 0.1):
break
sel_around[index_tmp] = True
index_tmp -= 1
index_tmp = index
while (index_tmp < d_min_mvd.size()):
d_min_other = d_min_mvd[index_tmp]
if (d_min_other > d_min + 0.1):
break
sel_around[index_tmp] = True
index_tmp += 1
#print "%d structures around %g" % (sel_around.count(True), d_min)
percentiles = {}
for stat_name in stats.keys():
stat = stats[stat_name]
if (not stat_name in db):
percentiles[stat_name] = None
continue
values = db[stat_name].select(sel_perm).select(sel_around)
fvalues = flex.double()
for value in values :
try :
fvalues.append(float(value))
except ValueError :
pass
assert fvalues.size() != 0
fvalues_sorted = fvalues.select(flex.sort_permutation(fvalues))
stat_index = find_value_in_list(fvalues_sorted, stat)
# FIXME I think for some of these statistics we need to reverse this -
# i.e. if higher statistics are better
stat_perc = 100 * (1 - (stat_index / fvalues.size()))
percentiles[stat_name] = stat_perc
#print stat_name, stat_index, fvalues.size(), stat_perc
#flex.histogram(fvalues, n_slots=10).show(prefix=" ")
return percentiles
| 5,451 |
def normalize_number(value: str, number_format: str) -> str:
"""
Transform a string that essentially represents a number to the corresponding number with the given number format.
Return a string that includes the transformed number. If the given number format does not match any supported one, return the given string.
:param value: the string
:param number_format: number format with which the value is normalized
:return: the normalized string
"""
if number_format == 'COMMA_POINT' or number_format == 'Comma Point':
nor_str = re.sub(pattern=',', repl='', string=value)
elif number_format == 'POINT_COMMA' or number_format == 'Point Comma':
nor_str = re.sub(pattern=',', repl='.', string=re.sub(pattern='\.', repl='', string=value))
elif number_format == 'SPACE_POINT' or number_format == 'Space Point':
nor_str = re.sub(pattern='\s', repl='', string=value)
elif number_format == 'SPACE_COMMA' or number_format == 'Space Comma':
nor_str = re.sub(pattern=',', repl='.', string=re.sub(pattern='\s', repl='', string=value))
elif number_format == 'NONE_COMMA' or number_format == 'None Comma':
nor_str = re.sub(pattern=',', repl='.', string=value)
else:
nor_str = value
return nor_str
| 5,452 |
def execute_command(command):
"""Execute the sequence of key presses."""
for key in command:
keyboard.press(key)
keyboard.release_all()
| 5,453 |
def get_returning_users(returning_count):
"""
Returns a list of returning users
:return:
"""
# Read the exclusion file
if os.path.exists(stats_dir + 'exclusion.lst'):
exclusion_file = open(stats_dir + 'exclusion.lst', 'r')
exclusion_list = exclusion_file.readlines()
exclusion_list = [u.strip() for u in exclusion_list]
else:
exclusion_list = []
# Read the user database
cmd_out = subprocess.getstatusoutput("sqlite3 " + data_dir + "jupyterhub.sqlite \"select name from users order by last_activity\"")[1]
all_users = cmd_out.split('\n')
# Exclude members of the lab
users_minus_exclusions = [user for user in all_users if user not in exclusion_list]
return users_minus_exclusions[:returning_count]
| 5,454 |
def benchmark(Algorithm_, Network_, test):
"""
Benchmarks the Algorithm on a given class of Networks. Samples variable network size, and plots results.
@param Algorithm_: a subclass of Synchronous_Algorithm, the algorithm to test.
@param Network_: a subclass of Network, the network on which to benchmark the algorithm.
@param test: a function that may throw an assertion error
"""
def averages(x,y):
"""
Groups x's with the same value, averages corresponding y values.
@param x: A sorted list of x values
@param y: A list of corresponding y values
@return: (x grouped by value, corresponding mean y values)
Example:
averages([1,1,2,2,2,3], [5,6,3,5,1,8]) --> ([1, 2, 3], [5.5, 3.0, 8.0])
"""
new_x = [x[0]]
new_y = []
cur_x = new_x[0]
cur_ys = []
for x_i, y_i in zip(x,y):
if x_i == cur_x:
cur_ys.append(y_i)
else:
new_y.append( sum(cur_ys)/float(len(cur_ys) ) )
new_x.append( x_i )
cur_ys = [y_i]
cur_x = x_i
new_y.append( sum(cur_ys)/float(len(cur_ys) ) )
return new_x, new_y
def plot(x, y, title):
"""Plots the points (x[i],y[i]) for all i, fig."""
fig, ax = plt.subplots()
x_ave,y_ave = averages(x,y)
ax.scatter(x, y, label="data", color='b')
ax.scatter(x_ave, y_ave, label="means", color='r')
ax.set_xlim( xmin=0 )
ax.set_ylim( ymin=0 )
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_title(title)
ax.set_xlabel(Network_.__name__ +' size')
data = sample(Algorithm_, Network_, test)
if data is None: return
size, comm, time = data
if issubclass(Algorithm_, Synchronous_Algorithm):
plot(size, time, Algorithm_.__name__ + ' Time Complexity')
plot(size, comm, Algorithm_.__name__ + ' Communication Complexity')
| 5,455 |
def add_ip_to_host(port=8000):
"""
Returns
None
Args:
port which handles the request
Add local IPv4 and public IP addresses to ALLOWED_HOST
"""
IP_PRIVATE = getoutput('hostname -I').strip()
try:
IP_PUBLIC = urllib.request.urlopen(
'https://ident.me').read().decode('utf8')
ALLOWED_HOSTS.append(IP_PUBLIC)
except URLError:
print('Not connected to internet, the developement server will not be accessible from outside')
finally:
ALLOWED_HOSTS.append(IP_PRIVATE)
print('You may connect at any of the following:')
[print(f'http://{i}:{port}') for i in ALLOWED_HOSTS]
# Just add a blank file after the allowed addresses
print()
| 5,456 |
def scale(obj, scale_ratio):
"""
:param obj: trimesh or file path
:param scale_ratio: float, scale all axis equally
:return:
author: weiwei
date: 20201116
"""
if isinstance(obj, trm.Trimesh):
tmpmesh = obj.copy()
tmpmesh.apply_scale(scale_ratio)
return tmpmesh
elif isinstance(obj, str):
originalmesh = trm.load(obj)
tmpmesh = originalmesh.copy()
tmpmesh.apply_scale(scale_ratio)
return tmpmesh
| 5,457 |
def _case_verify_and_canonicalize_args(pred_fn_pairs, exclusive, name,
allow_python_preds):
"""Verifies input arguments for the case function.
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor, and a
callable which returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
name: A name for the case operation.
allow_python_preds: if true, pred_fn_pairs may contain Python bools in
addition to boolean Tensors
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
Returns:
a tuple <list of scalar bool tensors, list of callables>.
"""
del name
if not isinstance(pred_fn_pairs, (list, tuple, dict)):
raise TypeError('fns must be a list, tuple, or dict')
if isinstance(pred_fn_pairs, collections.OrderedDict):
pred_fn_pairs = pred_fn_pairs.items()
elif isinstance(pred_fn_pairs, dict):
# No name to sort on in eager mode. Use dictionary traversal order,
# which is nondeterministic in versions of Python < 3.6
if not exclusive:
raise ValueError('Unordered dictionaries are not supported for the '
'`pred_fn_pairs` argument when `exclusive=False` and '
'eager mode is enabled.')
pred_fn_pairs = list(pred_fn_pairs.items())
for pred_fn_pair in pred_fn_pairs:
if not isinstance(pred_fn_pair, tuple) or len(pred_fn_pair) != 2:
raise TypeError('Each entry in pred_fn_pairs must be a 2-tuple')
pred, fn = pred_fn_pair
if ops.is_tensor(pred):
if pred.dtype != dtype.bool:
raise TypeError('pred must be Tensor of type bool: %s' % pred.name)
elif not allow_python_preds:
raise TypeError('pred must be a Tensor, got: %s' % pred)
elif not isinstance(pred, bool):
raise TypeError('pred must be a Tensor or bool, got: %s' % pred)
if not callable(fn):
raise TypeError('fn for pred %s must be callable.' % pred.name)
predicates, actions = zip(*pred_fn_pairs)
return predicates, actions
| 5,458 |
def get_policy(policy_name: str) -> Policy:
"""Returns a mixed precision policy parsed from a string."""
# Loose grammar supporting:
# - "c=f16" (params full, compute+output in f16),
# - "p=f16,c=f16" (params, compute and output in f16).
# - "p=f16,c=bf16" (params in f16, compute in bf16, output in bf16)
# For values that are not specified params defaults to f32, compute follows
# params and output follows compute (e.g. 'c=f16' -> 'p=f32,c=f16,o=f16').
param_dtype = jnp.float32
compute_dtype = output_dtype = None
if "=" in policy_name:
for part in policy_name.split(","):
key, value = part.split("=", 2)
value = parse_dtype(value)
if key == "p" or key == "params":
param_dtype = value
elif key == "c" or key == "compute":
compute_dtype = value
elif key == "o" or key == "output":
output_dtype = value
else:
raise ValueError(f"Unknown key '{key}' in '{policy_name}' should be "
"'params', 'compute' or 'output'.")
if compute_dtype is None:
compute_dtype = param_dtype
if output_dtype is None:
output_dtype = compute_dtype
else:
# Assume policy name is a dtype (e.g. 'f32' or 'half') that all components
# of the policy should contain.
param_dtype = compute_dtype = output_dtype = parse_dtype(policy_name)
return Policy(param_dtype=param_dtype, compute_dtype=compute_dtype,
output_dtype=output_dtype)
| 5,459 |
def reset():
"""Reset password page. User launch this page via the link in
the find password email."""
if g.user:
return redirect('/')
token = request.values.get('token')
if not token:
flash(_('Token is missing.'), 'error')
return redirect('/')
user = verify_auth_token(token, expires=1)
if not user:
flash(_('Invalid or expired token.'), 'error')
return redirect(url_for('.find'))
form = ResetForm()
if form.validate_on_submit():
user.change_password(form.password.data).save()
login_user(user)
flash(_('Your password is updated.'), 'info')
return redirect(url_for('.setting'))
return render_template('account/reset.html', form=form, token=token)
| 5,460 |
def usort_file(path: Path, dry_run: bool = False, diff: bool = False) -> Result:
"""
Sorts one file, optionally writing the result back.
Returns: a Result object.
Note: Not intended to be run concurrently, as the timings are stored in a
global.
"""
result = Result(path)
result.timings = []
with save_timings(result.timings):
try:
config = Config.find(path)
src_contents = path.read_bytes()
dst_contents, encoding = usort_bytes(src_contents, config, path)
if src_contents != dst_contents:
result.changed = True
if diff:
result.diff = unified_diff(
src_contents.decode(encoding),
dst_contents.decode(encoding),
path.as_posix(),
)
if not dry_run:
path.write_bytes(dst_contents)
result.written = True
except Exception as e:
result.error = e
return result
| 5,461 |
def datetimeobj_a__d_b_Y_H_M_S_z(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like 'Tue, 18 Jun 2013 22:00:00 +1000' are able to be
converted by this function.
Args:
value: A timestamp string in the format '%a, %d %b %Y %H:%M:%S %z'.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
"""
a, d, b, Y, t, z = value.split()
H, M, S = t.split(":")
return datetime.datetime(
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S),
tzinfo=dateutil.tz.tzoffset(None, _offset(z))
)
| 5,462 |
def fn_sigmoid_proxy_func(threshold, preds, labels, temperature=1.):
"""Approximation of False rejection rate using Sigmoid."""
return tf.reduce_sum(
tf.multiply(tf.sigmoid(-1. * temperature * (preds - threshold)), labels))
| 5,463 |
def remove_disks_in_vm_provisioning(session, vm_ref):
"""Re-write the xml for provisioning disks to set a SR"""
other_config = session.xenapi.VM.get_other_config(vm_ref)
del other_config['disks']
session.xenapi.VM.set_other_config(vm_ref, other_config)
| 5,464 |
def get_images(filters: Optional[Sequence[pulumi.InputType['GetImagesFilterArgs']]] = None,
sorts: Optional[Sequence[pulumi.InputType['GetImagesSortArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetImagesResult:
"""
Get information on images for use in other resources (e.g. creating a Droplet
based on a snapshot), with the ability to filter and sort the results. If no filters are specified,
all images will be returned.
This data source is useful if the image in question is not managed by the provider or you need to utilize any
of the image's data.
Note: You can use the `getImage` data source to obtain metadata
about a single image if you already know the `slug`, unique `name`, or `id` to retrieve.
:param Sequence[pulumi.InputType['GetImagesFilterArgs']] filters: Filter the results.
The `filter` block is documented below.
:param Sequence[pulumi.InputType['GetImagesSortArgs']] sorts: Sort the results.
The `sort` block is documented below.
"""
__args__ = dict()
__args__['filters'] = filters
__args__['sorts'] = sorts
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('digitalocean:index/getImages:getImages', __args__, opts=opts, typ=GetImagesResult).value
return AwaitableGetImagesResult(
filters=__ret__.filters,
id=__ret__.id,
images=__ret__.images,
sorts=__ret__.sorts)
| 5,465 |
def polevl(x, coef):
"""Taken from http://numba.pydata.org/numba-doc/0.12.2/examples.html"""
N = len(coef)
ans = coef[0]
i = 1
while i < N:
ans = ans * x + coef[i]
i += 1
return ans
| 5,466 |
def is_regular_file(path):
"""Check whether 'path' is a regular file, especially not a symlink."""
return os.path.isfile(path) and not os.path.islink(path)
| 5,467 |
def door():
"""this creates the door"""
dave = turtle.Turtle()
dave.penup()
dave.forward(250) # this is setting the position at the bottom of square
dave.right(90)
dave.forward(250)
dave.right(90)
dave.forward(80)
dave.pendown()
dave.pencolor("purple")
dave.right(90)
dave.forward(100) # this is actually creating the door
dave.left(90)
dave.forward(100)
dave.left(90)
dave.forward(100)
| 5,468 |
def plot_displacement(A, B, save=False, labels=None):
"""
A and B are both num_samples x num_dimensions
for now, num_dimensions must = 2
"""
assert A.shape == B.shape
assert A.shape[1] == 2
if not labels is None:
assert len(labels) == A.shape[0]
delta = B - A
delta_dir = delta/np.linalg.norm(delta, axis=1).reshape(-1, 1)
fig = plt.figure()
# set size
xmin = min(min(A[:, 0]), min(B[:, 0]))
xmax = max(max(A[:, 0]), max(B[:, 0]))
ymin = min(min(A[:, 1]), min(B[:, 1]))
ymax = max(max(A[:, 1]), max(B[:, 1]))
plt.xlim(1.1*xmin, 1.1*xmax)
plt.ylim(1.1*ymin, 1.1*ymax)
# create
# add displacement arrows, possibly labels
offset = 0.05
for i in xrange(A.shape[0]):
plt.arrow(A[i, 0]+offset*delta_dir[i, 0], A[i, 1]+offset*delta_dir[i, 1],
delta[i, 0]-2*offset*delta_dir[i, 0], delta[i, 1]-2*offset*delta_dir[i, 1],
length_includes_head=True, alpha=0.5, color='grey',
head_width=0.08, head_length=0.08, width=0.009)
if not labels is None:
plt.annotate(labels[i], xy=A[i, :], xytext=A[i, :], color='red')
plt.annotate(labels[i], xy=B[i, :], xytext=B[i, :], color='blue')
if labels is None:
# without labels, just plot points
plt.scatter(A[:, 0], A[:, 1], s=35, c='red', linewidths=0)
plt.scatter(B[:, 0], B[:, 1], s=35, c='blue', linewidths=0)
plt.axhline(0, color='grey', linestyle='--')
plt.axvline(0, color='grey', linestyle='--')
# show
if save:
plt.savefig('fig.png')
else:
plt.show()
return True
| 5,469 |
def flip():
""" flip() -> None
Update the full display Surface to the screen
"""
check_video()
screen = sdl.SDL_GetVideoSurface()
if not screen:
raise SDLError("Display mode not set")
if screen.flags & sdl.SDL_OPENGL:
sdl.SDL_GL_SwapBuffers()
status = 0
else:
status = sdl.SDL_Flip(screen)
if status == -1:
raise SDLError.from_sdl_error()
| 5,470 |
def t_dp(tdb, rh):
""" Calculates the dew point temperature.
Parameters
----------
tdb: float
dry-bulb air temperature, [°C]
rh: float
relative humidity, [%]
Returns
-------
t_dp: float
dew point temperature, [°C]
"""
c = 257.14
b = 18.678
a = 6.1121
d = 234.5
gamma_m = math.log(rh / 100 * math.exp((b - tdb / d) * (tdb / (c + tdb))))
return round(c * gamma_m / (b - gamma_m), 1)
| 5,471 |
def extract_archive(filepath):
"""
Returns the path of the archive
:param str filepath: Path to file to extract or read
:return: path of the archive
:rtype: str
"""
# Checks if file path is a directory
if os.path.isdir(filepath):
path = os.path.abspath(filepath)
print("Archive already extracted. Viewing from {}...".format(path))
return path
# Checks if the filepath is a zipfile and continues to extract if it is
# if not it raises an error
elif not zipfile.is_zipfile(filepath):
# Misuse of TypeError? :P
raise TypeError("{} is not a zipfile".format(filepath))
archive_sha = SHA1_file(
filepath=filepath,
# Add version of slackviewer to hash as well so we can invalidate the cached copy
# if there are new features added
extra=to_bytes(slackviewer.__version__)
)
extracted_path = os.path.join(SLACKVIEWER_TEMP_PATH, archive_sha)
if os.path.exists(extracted_path):
print("{} already exists".format(extracted_path))
else:
# Extract zip
with zipfile.ZipFile(filepath) as zip:
print("{} extracting to {}...".format(filepath, extracted_path))
zip.extractall(path=extracted_path)
print("{} extracted to {}".format(filepath, extracted_path))
# Add additional file with archive info
create_archive_info(filepath, extracted_path, archive_sha)
return extracted_path
| 5,472 |
def LinkConfig(reset=0, loopback=0, scrambling=1):
"""Link Configuration of TS1/TS2 Ordered Sets."""
value = ( reset << 0)
value |= ( loopback << 2)
value |= ((not scrambling) << 3)
return value
| 5,473 |
def test_tedlium_release():
"""
Feature: TedliumDataset
Description: test release of tedlium
Expectation: release
set invalid data
get throw error
"""
def test_config(release):
try:
ds.TedliumDataset(DATA_DIR_TEDLIUM_RELEASE12, release)
except (ValueError, TypeError, RuntimeError) as e:
return str(e)
return None
# test the release
assert "release is not within the valid set of ['release1', 'release2', 'release3']" in test_config("invalid")
assert "Argument release with value None is not of type [<class 'str'>]" in test_config(None)
assert "Argument release with value ['list'] is not of type [<class 'str'>]" in test_config(["list"])
| 5,474 |
def test_serialize_bulk_courses(mocker):
"""
Test that serialize_bulk_courses calls serialize_course_for_bulk for every existing course
"""
mock_serialize_course = mocker.patch("search.serializers.serialize_course_for_bulk")
courses = CourseFactory.create_batch(5)
list(serialize_bulk_courses([course.id for course in Course.objects.all()]))
for course in courses:
mock_serialize_course.assert_any_call(course)
| 5,475 |
def get_data(n, input_dim, y_dim, attention_column=1):
"""
Data generation. x is purely random except that it's first value equals the target y.
In practice, the network should learn that the target = x[attention_column].
Therefore, most of its attention should be focused on the value addressed by attention_column.
:param n: the number of samples to retrieve.
:param input_dim: the number of dimensions of each element in the series.
:param attention_column: the column linked to the target. Everything else is purely random.
:return: x: model inputs, y: model targets
"""
x = np.random.standard_normal(size=(n, input_dim))
y = np.random.randint(low=0, high=2, size=(n, y_dim))
for i in range(y_dim):
x[:, i * 3] = y[:, i]
return x, y
| 5,476 |
def generate_tautomer_hydrogen_definitions(hydrogens, residue_name, isomer_index):
"""
Creates a hxml file that is used to add hydrogens for a specific tautomer to the heavy-atom skeleton
Parameters
----------
hydrogens: list of tuple
Tuple contains two atom names: (hydrogen-atom-name, heavy-atom-atom-name)
residue_name : str
name of the residue to fill the Residues entry in the xml tree
isomer_index : int
"""
hydrogen_definitions_tree = etree.fromstring("<Residues/>")
hydrogen_file_residue = etree.fromstring("<Residue/>")
hydrogen_file_residue.set("name", residue_name)
for name, parent in hydrogens:
h_xml = etree.fromstring("<H/>")
h_xml.set("name", name)
h_xml.set("parent", parent)
hydrogen_file_residue.append(h_xml)
hydrogen_definitions_tree.append(hydrogen_file_residue)
return hydrogen_definitions_tree
| 5,477 |
def test_single_key_lookup_feature_to_config():
"""Single key lookup feature config generation should work"""
user_key = TypedKey(full_name="mockdata.user", key_column="user_id", key_column_type=ValueType.INT32, description="An user identifier")
item_key = TypedKey(full_name="mockdata.item", key_column="item_id", key_column_type=ValueType.INT32, description="An item identifier")
user_item = Feature(name="user_items", feature_type=INT32_VECTOR, key=user_key)
item_price = Feature(name="item_price", feature_type=FLOAT_VECTOR, key=item_key)
# A lookup feature
lookup_feature = LookupFeature(name="user_avg_item_price",
feature_type=FLOAT,
key=user_key,
base_feature=user_item,
expansion_feature=item_price,
aggregation=Aggregation.AVG)
lookup_feature_config = """
user_avg_item_price: {
key: [user_id]
join: {
base: {key: [user_id], feature: user_items}
expansion: {key: [item_id], feature: item_price}
}
aggregation: AVG
type: {
type: TENSOR
tensorCategory: DENSE
dimensionType: []
valType: FLOAT
}
}"""
assert_config_equals(lookup_feature.to_feature_config(), lookup_feature_config)
| 5,478 |
def waitid(*args, **kwargs): # real signature unknown
"""
Returns the result of waiting for a process or processes.
idtype
Must be one of be P_PID, P_PGID or P_ALL.
id
The id to wait on.
options
Constructed from the ORing of one or more of WEXITED, WSTOPPED
or WCONTINUED and additionally may be ORed with WNOHANG or WNOWAIT.
Returns either waitid_result or None if WNOHANG is specified and there are
no children in a waitable state.
"""
pass
| 5,479 |
def login(client=None, **defaults):
"""
@param host:
@param port:
@param identityName:
@param password:
@param serviceName:
@param perspectiveName:
@returntype: Deferred RemoteReference of Perspective
"""
d = defer.Deferred()
LoginDialog(client, d, defaults)
return d
| 5,480 |
def insertInstrument():
""" Insert a new instrument or edit an existing instrument on a DAQBroker database. Guest users are not allowed to
create instruments. Created instruments are
.. :quickref: Create/Edit instrument; Creates or edits a DAQBroker instrument instrument
:param: Name : (String) unique instrument name
:param: instid : (Integer) unique instrument identifier. Used to edit an existing instrument
:param: description : (String) description of the instrument and its
:param: email : (String) contact information for the instrument operator
:param: Files : (Optional) JSON encoded list of instrument data source objects. Each Contains the following keys:
| ``name`` : (String) name of the data source
| ``metaid`` : (Integer) unique data source identifier. Only used to edit existing data sources
| ``type`` : (Integer) type of instrument data source
| ``node`` : (String) unique network node identifier
| ``remarks`` : (String) JSON encoded object of extra data source information
| ``channels`` : (Optional) JSON encoded list of data channel objects. Each contains the following keys:
| ``Name`` : (String) data channel name
| ``channelid`` : (Integer) unique channel identifier. -1 if the channel is new. Positive integer
if the channel already exists
| ``description`` : (String) data channel description
| ``units`` : (String) data channel physical units
| ``channeltype`` : (Integer) type of data channel
| ``0`` : Number
| ``1`` : Text
| ``2`` : Custom
| ``active`` : (Boolean) channel is shown on interface
| ``fileorder`` : (Integer) Used to order channels in a data source
| ``alias`` : (String) Original data channel name. Kept constant when name changes
| ``remarks`` : (String) JSON encoded object with extra information
| ``oldname`` : (String) Old channel name. Used to detect changes in the channel name
| ``channeltypeOld`` : (Integer) Old channel type. Used to detect changes in the channel type
"""
processRequest = request.get_json()
Session = sessionmaker(bind=current_user.engineObj)
session = Session()
conn = current_user.engineObj.connect()
ctx = MigrationContext.configure(conn)
op = Operations(ctx)
try:
if 'instid' in processRequest:
newInst = False
instid = processRequest['instid']
instrument = session.query(daqbrokerDatabase.instruments).filter_by(instid=instid).first()
else:
newInst = True
maxInst = session.query(func.max(daqbrokerDatabase.instruments.instid)).one_or_none()
# print(maxInst==None)
if maxInst[0]:
maxInstid = maxInst[0]
else:
maxInstid = 0
instid = maxInstid + 1
instrument = daqbrokerDatabase.instruments(
Name=processRequest['Name'],
instid=instid,
active=False,
description=processRequest['description'],
username=current_user.username,
email=processRequest['email'],
insttype=0,
log=None)
# Now I have an object called "instrument" that I can use to add sources
# and metadatas and to those metadatas I should be able to add channels.
for file in processRequest['files']:
if 'metaid' in file:
metadata = session.query(daqbrokerDatabase.instmeta).filter_by(metaid=file["metaid"]).first()
metadata.clock = time.time() * 1000
metadata.name= file['name']
metadata.type=file['type']
metadata.node=file['node']
metadata.remarks=json.dumps(file['remarks'])
else:
maxMeta = session.query(func.max(daqbrokerDatabase.instmeta.metaid)).first()
if maxMeta[0]:
maxMetaid = maxMeta[0]
else:
maxMetaid = 0
metaid = maxMetaid + 1
metadata = daqbrokerDatabase.instmeta(
clock=time.time() * 1000,
name=file['name'],
metaid=metaid,
type=file["type"],
node=file["node"],
remarks=json.dumps(
file['remarks']),
sentRequest=False,
lastAction=0,
lasterrortime=0,
lasterror='',
lockSync=False)
instrument.sources.append(metadata)
channelid = None
if 'channels' in file:
channelsInsert = []
for channel in file['channels']:
if int(channel['channelid']) < 0: # New channel - have to insert
maxChannel = session.query(func.max(daqbrokerDatabase.channels.channelid)).first()
if not channelid:
if maxChannel[0]:
maxChannelid = maxChannel[0]
else:
maxChannelid = 0
channelid = maxChannelid + 1
else:
channelid = channelid + 1
if 'remarks' in channel:
if len(channel["remarks"].keys())>0:
theRemarks = json.dumps(channel["remarks"])
else:
theRemarks = json.dumps({})
else:
theRemarks = json.dumps({})
theChannel = daqbrokerDatabase.channels(
Name=channel["Name"],
channelid=channelid,
channeltype=int(
channel["channeltype"]),
valuetype=0,
units=channel['units'],
description=channel['description'],
active=int(
channel['active']) == 1,
remarks=theRemarks,
lastclock=0,
lastValue=None,
firstClock=0,
fileorder=channel['fileorder'],
alias=channel['alias'])
metadata.channels.append(theChannel)
channelsInsert.append({'name': channel["Name"], 'type': int(channel["channeltype"])})
if not newInst:
extra = ''
if int(channel['channeltype']) == 1:
newType = daqbrokerDatabase.Float
extra = "\"" + channel["Name"] + "\"::double precision"
column = daqbrokerDatabase.Column(channel["Name"], newType)
op.add_column(processRequest['Name'] + "_data", column)
elif int(channel['channeltype']) == 2:
newType = daqbrokerDatabase.Text
column = daqbrokerDatabase.Column(channel["Name"], newType)
op.add_column(processRequest['Name'] + "_data", column)
elif int(channel['channeltype']) == 3:
extra = "\"" + channel["Name"] + "\"::double precision"
theType = daqbrokerDatabase.Float
column = daqbrokerDatabase.Column(channel["Name"], newType)
op.add_column(processRequest['Name'] + "_custom", column)
elif not newInst:
theChannel = session.query(
daqbrokerDatabase.channels).filter_by(
channelid=channel['channelid']).first()
theChannel.Name = channel["Name"]
theChannel.channeltype = int(channel["channeltype"])
theChannel.units = channel['units']
theChannel.description = channel['description']
theChannel.active = int(channel['active']) == 1
theChannel.fileorder = channel['fileorder']
theChannel.alias = channel['alias']
if (not channel['channeltypeOld'] == channel['channeltype']) or (
not channel['oldName'] == str(channel['Name'])):
if not channel['oldName'] == str(channel['Name']):
newName = str(channel['Name'])
oldName = channel['oldName']
else:
oldName = str(channel['Name'])
newName = None
if not channel['channeltypeOld'] == channel['channeltype']:
if channel['channeltype'] == 1 or channel['channeltype'] == 3:
newType = daqbrokerDatabase.Float
extra = "\"" + oldName + "\"::double precision"
else:
newType = daqbrokerDatabase.Text
extra = None
else:
newType = None
if not channel['channeltypeOld'] == channel['channeltype'] and channel['channeltype'] == 3:
if not newName:
theName = oldName
else:
theName = newName
if not newType:
theType = daqbrokerDatabase.Float
else:
theType = newType
column = daqbrokerDatabase.Column(theName, theType)
op.drop_column(processRequest['Name'] + "_data", oldName)
op.add_column(processRequest['Name'] + "_custom", column)
elif not channel['channeltypeOld'] == channel['channeltype'] and channel['channeltypeOld'] != 3:
if not newName:
theName = oldName
else:
theName = newName
if not newType:
if channel['channeltypeOld'] == 1:
theType = daqbrokerDatabase.Float
else:
theType = daqbrokerDatabase.Text
else:
theType = newType
column = daqbrokerDatabase.Column(theName, theType)
op.drop_column(processRequest['Name'] + "_custom", oldName)
op.add_column(processRequest['Name'] + "_data", column)
else:
if channel['channeltype'] == 1 or channel['channeltype'] == 2:
if extra:
op.alter_column(
processRequest['Name'] + "_data",
oldName,
new_column_name=newName,
type_=newType,
postgresql_using=extra)
else:
op.alter_column(
processRequest['Name'] + "_data", oldName, new_column_name=newName, type_=newType)
else:
if extra=='':
op.alter_column(
processRequest['Name'] + "_custom", oldName, new_column_name=newName, type_=newType)
else:
op.alter_column(
processRequest['Name'] + "_data",
oldName,
new_column_name=newName,
type_=newType,
postgresql_using=extra)
elif newInst:
raise InvalidUsage("Cannot issue edit channels on new instrument", status_code=401)
if newInst:
daqbrokerDatabase.createInstrumentTable(processRequest['Name'], channelsInsert, True)
session.add(instrument)
daqbrokerDatabase.daqbroker_database.metadata.create_all(current_user.engineObj)
session.commit()
conn.close()
current_user.updateDB()
return jsonify('done')
except Exception as e:
traceback.print_exc()
session.rollback()
# for statement in deleteStatements:
# connection.execute(statement)
raise InvalidUsage(str(e), status_code=500)
| 5,481 |
def find_distance_to_major_settlement(country, major_settlements, settlement):
"""
Finds the distance to the nearest major settlement.
"""
nearest = nearest_points(settlement['geometry'], major_settlements.unary_union)[1]
geom = LineString([
(
settlement['geometry'].coords[0][0],
settlement['geometry'].coords[0][1]
),
(
nearest.coords[0][0],
nearest.coords[0][1]
),
])
distance_km = round(geom.length / 1e3)
return distance_km
| 5,482 |
def x_for_half_max_y(xs, ys):
"""Return the x value for which the corresponding y value is half
of the maximum y value. If there is no exact corresponding x value,
one is calculated by linear interpolation from the two
surrounding values.
:param xs: x values
:param ys: y values corresponding to the x values
:return:
"""
if len(xs) != len(ys):
raise ValueError("xs and ys must be of equal length")
half_max_y = max(ys) / 2
for i in range(len(xs)-1):
if ys[i+1] >= half_max_y:
x_dist = xs[i+1] - xs[i]
y_dist = ys[i+1] - ys[i]
y_offset = half_max_y - ys[i]
if y_offset == 0:
return xs[i]
else:
x_offset = y_offset / y_dist * x_dist
return xs[i] + x_offset
return None
| 5,483 |
def get_dict_from_dotenv_file(filename: Union[Path, str]) -> Dict[str, str]:
"""
:param filename: .env file where values are extracted.
:return: a dict with keys and values extracted from the .env file.
"""
result_dict = {}
error_message = 'file {filename}: the line n°{index} is not correct: "{line}"'
with open(filename) as f:
for index, line in enumerate(f):
stripped_line = line.strip()
# we don't take into account comments
if stripped_line.startswith('#'):
continue
# we don't take into account empty lines
if not stripped_line:
continue
parts = stripped_line.split('#') # we remove inline comments if there are any
# we remove set or export command if there are any
new_line = SET_EXPORT_EXPRESSION.sub('', parts[0].strip())
# we get key and value
parts = new_line.split('=')
parts = _sanitize_key_and_value(parts)
if len(parts) != 2 or ITEM_EXPRESSION.match(parts[0]) is None \
or ITEM_EXPRESSION.match(parts[1]) is None:
line_number = index + 1
raise DecodeError(message=error_message.format(filename=filename, index=line_number, line=new_line))
result_dict[parts[0]] = parts[1]
return result_dict
| 5,484 |
def calculate_note_numbers(note_list, key_override = None):
"""
Takes in a list of notes, and replaces the key signature (second
element of each note tuple) with the note's jianpu number.
Parameters
----------
note_list : list of tuples
List of notes to calculate jianpu numbers for.
key_override : str
If this is provided, all notes will be assumed to be in the
given key.
"""
note_list_numbered = []
for note in note_list:
if note[0] != -1:
if(note[1] == 'C' or key_override == 'C'):
offset = 0
elif(note[1] == 'C#' or key_override == 'C#'
or note[1] == 'Db' or key_override == 'Db'):
offset = 1
elif(note[1] == 'D' or key_override == 'D'):
offset = 2
elif(note[1] == 'D#' or key_override == 'D#'
or note[1] == 'Eb' or key_override == 'Eb'):
offset = 3
elif(note[1] == 'E' or key_override == 'E'):
offset = 4
elif(note[1] == 'F' or key_override == 'F'):
offset = 5
elif(note[1] == 'F#' or key_override == 'F#'
or note[1] == 'Gb' or key_override == 'Gb'):
offset = 6
elif(note[1] == 'G' or key_override == 'G'):
offset = 7
elif(note[1] == 'G#' or key_override == 'G#'
or note[1] == 'Ab' or key_override == 'Ab'):
offset = 8
elif(note[1] == 'A' or key_override == 'A'):
offset = 9
elif(note[1] == 'A#' or key_override == 'A#'
or note[1] == 'Bb' or key_override == 'Bb'):
offset = 10
elif(note[1] == 'B' or key_override == 'B'):
offset = 11
num = (note[0]-offset) - ((note[0]-offset)//12)*12
num_to_jianpu = { 0:1,
1:1.5,
2:2,
3:2.5,
4:3,
5:4,
6:4.5,
7:5,
8:5.5,
9:6,
10:6.5,
11:7}
jianpu = num_to_jianpu[num]
note_list_numbered.append((note[0], jianpu, note[2], note[3]))
else:
note_list_numbered.append(note)
return note_list_numbered
| 5,485 |
def _from_Gryzinski(DATA):
"""
This function computes the cross section and energy values from the files
that store information following the Gryzinski Model
"""
import numpy as np
a_0 = DATA['a_0']['VALUES']
epsilon_i_H = DATA['epsilon_i_H']['VALUES']
epsilon_i = DATA['epsilon_i']['VALUES']
xi = DATA['xi']['VALUES']
final_E = DATA['Final_E']['VALUES']
Energy_range = np.linspace(epsilon_i, final_E, 200)
u = Energy_range/epsilon_i
gg = (1+2/3*(1-1/(2*u))*np.log(np.e+(u-1)**(1/2)))
g = ((u-1)/u**2)*((u/(u+1))**(3/2))*((1-1/u)**(1/2))*gg
Cross_sections = 4*np.pi*(a_0**2)*((epsilon_i_H/epsilon_i)**2)*xi*g
return(Energy_range, Cross_sections)
| 5,486 |
def export_tfjs(keras_or_saved_model, output_dir, **kwargs):
"""Exports saved model to tfjs.
https://www.tensorflow.org/js/guide/conversion?hl=en
Args:
keras_or_saved_model: Keras or saved model.
output_dir: Output TF.js model dir.
**kwargs: Other options.
"""
# For Keras model, creates a saved model first in a temp dir. Otherwise,
# convert directly.
is_keras = isinstance(keras_or_saved_model, tf.keras.Model)
with _create_temp_dir(is_keras) as temp_dir_name:
if is_keras:
keras_or_saved_model.save(
temp_dir_name, include_optimizer=False, save_format='tf')
path = temp_dir_name
else:
path = keras_or_saved_model
tfjs_converter.dispatch_keras_saved_model_to_tensorflowjs_conversion(
path, output_dir, **kwargs)
| 5,487 |
def stations_within_radius(stations, centre, r):
"""Returns an alphabetically-ordered list of the names of all the stations (in a list of stations objects) within a radius r (in km) of a central point
(which must be a Lat/Long coordinate)"""
from haversine import haversine, Unit
# creates empty list
name_list = []
# extracts the necessary data from the list of stations
for i in range(len(stations)):
station_entry = stations[i]
s_coord = station_entry.coord
s_distance = haversine(s_coord, centre)
# Determines if the station is within the radius
if s_distance <= r:
s_name = station_entry.name
name_list.append(s_name)
#sorts the list
name_list.sort()
return name_list
| 5,488 |
def read_data_with_plugins(
path: Union[str, Sequence[str]],
plugin: Optional[str] = None,
plugin_manager: PluginManager = napari_plugin_manager,
) -> List[LayerData]:
"""Iterate reader hooks and return first non-None LayerData or None.
This function returns as soon as the path has been read successfully,
while catching any plugin exceptions, storing them for later retrievial,
providing useful error messages, and relooping until either layer data is
returned, or no valid readers are found.
Exceptions will be caught and stored as PluginErrors
(in plugins.exceptions.PLUGIN_ERRORS)
Parameters
----------
path : str
The path (file, directory, url) to open
plugin : str, optional
Name of a plugin to use. If provided, will force ``path`` to be read
with the specified ``plugin``. If the requested plugin cannot read
``path``, a PluginCallError will be raised.
plugin_manager : plugins.PluginManager, optional
Instance of a napari PluginManager. by default the main napari
plugin_manager will be used.
Returns
-------
LayerData : list of tuples, or None
LayerData that can be passed to :func:`Viewer._add_layer_from_data()
<napari.components.add_layers_mixin.AddLayersMixin._add_layer_from_data>`.
``LayerData`` is a list tuples, where each tuple is one of
``(data,)``, ``(data, meta)``, or ``(data, meta, layer_type)`` .
If no reader plugins are (or they all error), returns ``None``
Raises
------
PluginCallError
If ``plugin`` is specified but raises an Exception while reading.
"""
hook_caller = plugin_manager.hook.napari_get_reader
if plugin:
if plugin not in plugin_manager.plugins:
names = {i.plugin_name for i in hook_caller.get_hookimpls()}
raise ValueError(
f"There is no registered plugin named '{plugin}'.\n"
f"Names of plugins offering readers are: {names}"
)
reader = hook_caller._call_plugin(plugin, path=path)
if not callable(reader):
raise ValueError(f'Plugin {plugin!r} does not support file {path}')
return reader(path) or []
errors: List[PluginCallError] = []
path = abspath_or_url(path)
skip_impls: List[HookImplementation] = []
layer_data = None
while True:
result = hook_caller.call_with_result_obj(
path=path, _skip_impls=skip_impls
)
reader = result.result # will raise exceptions if any occurred
if not reader:
# we're all out of reader plugins
break
try:
layer_data = reader(path) # try to read data
if layer_data:
break
except Exception as exc:
# collect the error and log it, but don't raise it.
err = PluginCallError(result.implementation, cause=exc)
err.log(logger=logger)
errors.append(err)
# don't try this impl again
skip_impls.append(result.implementation)
if not layer_data:
# if layer_data is empty, it means no plugin could read path
# we just want to provide some useful feedback, which includes
# whether or not paths were passed to plugins as a list.
if isinstance(path, (tuple, list)):
path_repr = f"[{path[0]}, ...] as stack"
else:
path_repr = repr(path)
# TODO: change to a warning notification in a later PR
raise ValueError(f'No plugin found capable of reading {path_repr}.')
if errors:
names = set([repr(e.plugin_name) for e in errors])
err_msg = f"({len(errors)}) error{'s' if len(errors) > 1 else ''} "
err_msg += f"occurred in plugins: {', '.join(names)}. "
err_msg += 'See full error logs in "Plugins → Plugin Errors..."'
logger.error(err_msg)
return layer_data or []
| 5,489 |
def reset_task_size(nbytes: int) -> None:
"""Reset the default task size used for parallel IO operations.
Parameters
----------
nbytes : int
The number of threads to use.
"""
libkvikio.task_size_reset(nbytes)
| 5,490 |
def ticket_message_url(request, structure_slug, ticket_id): # pragma: no cover
"""
Makes URL redirect to add ticket message by user role
:type structure_slug: String
:type ticket_id: String
:param structure_slug: structure slug
:param ticket_id: ticket code
:return: redirect
"""
structure = get_object_or_404(OrganizationalStructure,
slug=structure_slug)
user_type = get_user_type(request.user, structure)
return redirect('uni_ticket:{}_ticket_message'.format(user_type),
structure_slug, ticket_id)
| 5,491 |
def load(
fin: Path,
azelfn: Path = None,
treq: list[datetime] = None,
wavelenreq: list[str] = None,
wavelength_altitude_km: dict[str, float] = None,
) -> dict[str, T.Any]:
"""
reads FITS images and spatial az/el calibration for allsky camera
Bdecl is in degrees, from IGRF model
"""
fin = Path(fin).expanduser()
if fin.is_file() and fin.suffix in (".h5", ".hdf5"):
return load_hdf5(fin, treq, wavelenreq)
flist = _slicereq(fin, treq, wavelenreq)
if not flist:
raise FileNotFoundError(f"No files found in {fin}")
# %% load data from good files, discarding bad
imgs = _sift(flist)
# %% camera location
imgs = _camloc(imgs, flist[0].parent)
# %% az / el
imgs = _azel(azelfn, imgs)
# %% projections
imgs = _project(imgs, wavelength_altitude_km)
return imgs
| 5,492 |
def loadData(data_source, loc, run, indexes, ntry=0, __text__=None, __prog__=None):
"""
Loads the data from a remote source. Has hooks for progress bars.
"""
if __text__ is not None:
__text__.emit("Decoding File")
if data_source.getName() == "Local WRF-ARW":
url = data_source.getURLList(outlet="Local")[0].replace("file://", "")
decoder = ARWDecoder
dec = decoder((url, loc[0], loc[1]))
else:
decoder, url = data_source.getDecoderAndURL(loc, run, outlet_num=ntry)
logging.info("Using decoder: " + str(decoder))
logging.info("Data URL: " + url)
dec = decoder(url)
if __text__ is not None:
__text__.emit("Creating Profiles")
profs = dec.getProfiles(indexes=indexes)
return profs
| 5,493 |
def get_access_policy_output(access_policy_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAccessPolicyResult]:
"""
Returns an access policy based on the name.
"""
...
| 5,494 |
def comp_psip_skin(self, u):
"""psip_skin for skin effect computation
Parameters
----------
self : Conductor
An Conductor object
Returns
-------
None
"""
y = (1 / u) * (sinh(u) + sin(u)) / (cosh(u) + cos(u)) # p257 Pyrhonen
# y[u==0]=1
return y
| 5,495 |
def save_wav_file(filename, wav_data, sess, sample_rate=16000):
"""Saves audio sample data to a .wav audio file.
Args:
filename: Path to save the file to.
wav_data: 2D array of float PCM-encoded audio data.
sample_rate: Samples per second to encode in the file.
"""
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])
sample_rate_placeholder = tf.compat.v1.placeholder(tf.int32, [])
wav_data_placeholder = tf.compat.v1.placeholder(tf.float32, [None, 1])
wav_encoder = tf.audio.encode_wav(wav_data_placeholder,
sample_rate_placeholder)
wav_saver = io_ops.write_file(wav_filename_placeholder, wav_encoder)
sess.run(
wav_saver,
feed_dict={
wav_filename_placeholder: filename,
sample_rate_placeholder: sample_rate,
wav_data_placeholder: np.reshape(wav_data, (-1, 1))
})
| 5,496 |
def homepage(var=random.randint(0, 1000)):
"""
The function returns the homepage html template.
"""
return render_template("index.html", var=var)
| 5,497 |
def tier(value):
"""
A special function of ordinals which does not
correspond to any mathematically useful function.
Maps ordinals to small objects, effectively compressing the range.
Used to speed up comparisons when the operands are very different sizes.
In the current version, this is a map from ordinals to 2-tuples of integers,
however, this is subject to change at any time, so please do not retain
long lived records of what tier an ordinal number is.
"""
if isinstance(value, numbers.Real):
value = ordinal(value)
if isinstance(value, ordinal):
return value._tier
raise ValueError('Value is not of a known type representing a mathematical ordinal.')
| 5,498 |
def get_next_value(
sequence_name="default",
initial_value=1,
reset_value=None,
*,
nowait=False,
using=None,
overrite=None,
):
"""
Return the next value for a given sequence.
"""
# Inner import because models cannot be imported before their application.
from .models import Sequence
if reset_value is not None:
assert initial_value < reset_value
if using is None:
using = router.db_for_write(Sequence)
connection = connections[using]
db_table = connection.ops.quote_name(Sequence._meta.db_table)
if (
connection.vendor == "postgresql"
# Remove when dropping Django 2.2. Django 3.0 requires PostgreSQL 9.5.
and getattr(connection, "pg_version", 0) >= 90500
and reset_value is None
and not nowait
):
# PostgreSQL ≥ 9.5 supports "upsert".
# This is about 3x faster as the naive implementation.
with connection.cursor() as cursor:
cursor.execute(
POSTGRESQL_UPSERT.format(db_table=db_table),
[sequence_name, initial_value],
),
result = cursor.fetchone()
return result[0]
elif connection.vendor == "mysql" and reset_value is None and not nowait:
# MySQL supports "upsert" but not "returning".
# This is about 2x faster as the naive implementation.
with transaction.atomic(using=using, savepoint=False):
with connection.cursor() as cursor:
cursor.execute(
MYSQL_UPSERT.format(db_table=db_table),
[sequence_name, initial_value],
)
cursor.execute(
SELECT.format(db_table=db_table),
[sequence_name],
)
result = cursor.fetchone()
return result[0]
else:
# Default, ORM-based implementation for all other cases.
with transaction.atomic(using=using, savepoint=False):
sequences = Sequence.objects.select_for_update(nowait=nowait)
sequence, created = sequences.get_or_create(
name=sequence_name,
defaults={"last": initial_value},
)
if not created:
sequence.last += 1
if reset_value is not None and sequence.last >= reset_value:
sequence.last = initial_value
if overrite is not None:
sequence.last = overrite
sequence.save()
return sequence.last
| 5,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.