content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def delete_user_feed(user_id, feed_url_to_delete):
"""Удаление отслеживаемого источника пользователя из базы данных"""
try:
for source_name in sources_names:
cursor.execute(f'DELETE FROM {source_name}_sources WHERE user_id = ? and feed_url = ?',
(user_id, feed_url_to_delete))
conn.commit()
except sqlite3.Error as error:
print("Ошибка", error)
| 5,300 |
def extend_blob_sentiment_database(company_name, client_address):
"""
Calculate the 3 days and 7 days textblob sentiment scores based on 1 day sentiment average.
Perform this operation only after 1 day sentiment score is obtained.
:param company_name: the name of the company. Used as the entry in the database.
:param client_address: the address of the database.
"""
client = MongoClient(client_address)
sentiment_db = client.sentiment_current
news_dates = []
news_scores = []
all_date = sentiment_db[company_name].distinct("date")
progress_full = len(all_date)
progress_count = 0
for date in all_date:
# calculate past 1 day sentiment scores
one_day_news_score = 0
one_day_news_count = sys.float_info.epsilon
for i in range(1, 2):
current_day = sentiment_db[company_name].find_one({"date": get_date_offset(date, i)})
if current_day:
one_day_news_score += current_day["today_overall_sentiment_score"]
one_day_news_count += current_day["today_news_count"]
updated_sentiment_score = {"$set": {"1_day_sentiment_score": one_day_news_score / one_day_news_count,
"1_day_overall_sentiment_score": one_day_news_score,
"1_day_news_count": one_day_news_count}}
sentiment_db[company_name].update_one(sentiment_db[company_name].find_one({"date": date}), updated_sentiment_score)
# calculate past 3 day sentiment scores
three_day_news_score = 0
three_day_news_count = sys.float_info.epsilon
for i in range(1, 4):
current_day = sentiment_db[company_name].find_one({"date": get_date_offset(date, i)})
if current_day:
three_day_news_score += current_day["today_overall_sentiment_score"]
three_day_news_count += current_day["today_news_count"]
updated_sentiment_score = {"$set": {"3_day_sentiment_score": three_day_news_score / three_day_news_count,
"3_day_overall_sentiment_score": three_day_news_score,
"3_day_news_count": three_day_news_count}}
sentiment_db[company_name].update_one(sentiment_db[company_name].find_one({"date": date}), updated_sentiment_score)
# calculate past 7 day sentiment scores
seven_day_news_score = 0
seven_day_news_count = sys.float_info.epsilon
for i in range(1, 8):
current_day = sentiment_db[company_name].find_one({"date": get_date_offset(date, i)})
if current_day:
seven_day_news_score += current_day["today_overall_sentiment_score"]
seven_day_news_count += current_day["today_news_count"]
updated_sentiment_score = {"$set": {"7_day_sentiment_score": seven_day_news_score / seven_day_news_count,
"7_day_overall_sentiment_score": seven_day_news_score,
"7_day_news_count": seven_day_news_count}}
sentiment_db[company_name].update_one(sentiment_db[company_name].find_one({"date": date}), updated_sentiment_score)
progress_count += 1
print("extend", company_name, "progress:", progress_count, "/", progress_full)
client.close()
| 5,301 |
def transform_pairwise(X, y):
"""Transforms data into pairs with balanced labels for ranking
Transforms a n-class ranking problem into a two-class classification
problem. Subclasses implementing particular strategies for choosing
pairs should override this method.
In this method, all pairs are choosen, except for those that have the
same target value. The output is an array of balanced classes, i.e.
there are the same number of -1 as +1
Reference: "Large Margin Rank Boundaries for Ordinal Regression",
R. Herbrich, T. Graepel, K. Obermayer.
Authors: Fabian Pedregosa <fabian@fseoane.net>
Alexandre Gramfort <alexandre.gramfort@inria.fr>
Args:
X: (np.array), shape (n_samples, n_features)
The data
y: (np.array), shape (n_samples,) or (n_samples, 2)
Target labels. If it's a 2D array, the second column represents
the grouping of samples, i.e., samples with different groups will
not be considered.
Returns:
X_trans: (np.array), shape (k, n_feaures)
Data as pairs, where k = n_samples * (n_samples-1)) / 2 if grouping
values were not passed. If grouping variables exist, then returns
values computed for each group.
y_trans: (np.array), shape (k,)
Output class labels, where classes have values {-1, +1}
If y was shape (n_samples, 2), then returns (k, 2) with groups on
the second dimension.
"""
X_new, y_new, y_group = [], [], []
y_ndim = y.ndim
if y_ndim == 1:
y = np.c_[y, np.ones(y.shape[0])]
comb = itertools.combinations(range(X.shape[0]), 2)
for k, (i, j) in enumerate(comb):
if y[i, 0] == y[j, 0] or y[i, 1] != y[j, 1]:
# skip if same target or different group
continue
X_new.append(X[i] - X[j])
y_new.append(np.sign(y[i, 0] - y[j, 0]))
y_group.append(y[i, 1])
# output balanced classes
if y_new[-1] != (-1) ** k:
y_new[-1] = -y_new[-1]
X_new[-1] = -X_new[-1]
if y_ndim == 1:
return np.asarray(X_new), np.asarray(y_new).ravel()
elif y_ndim == 2:
return np.asarray(X_new), np.vstack((np.asarray(y_new), np.asarray(y_group))).T
| 5,302 |
def run_process(args, palette):
"""Process qrc files."""
# Generate qrc file based on the content of the resources folder
id_ = palette.ID
# Create palette and resources png images
print('Generating {} palette image ...'.format(id_))
create_palette_image(palette=palette)
print('Generating {} images ...'.format(id_))
create_images(palette=palette)
print('Generating {} qrc ...'.format(id_))
generate_qrc_file(palette=palette)
print('Converting .qrc to _rc.py and/or .rcc ...')
if not args.qrc_dir:
main_dir = os.path.join(PACKAGE_PATH, palette.ID)
os.chdir(main_dir)
for qrc_file in glob.glob('*.qrc'):
# get name without extension
filename = os.path.splitext(qrc_file)[0]
print(filename, '...')
ext = '_rc.py'
ext_c = '.rcc'
# Create variables SCSS files and compile SCSS files to QSS
print('Compiling SCSS/SASS files to QSS ...')
create_qss(palette=palette)
# creating names
py_file_pyqt5 = 'pyqt5_' + filename + ext
py_file_pyqt = 'pyqt_' + filename + ext
py_file_pyside = 'pyside_' + filename + ext
py_file_pyside2 = 'pyside2_' + filename + ext
py_file_qtpy = '' + filename + ext
py_file_pyqtgraph = 'pyqtgraph_' + filename + ext
# calling external commands
if args.create in ['pyqt', 'pyqtgraph', 'all']:
print("Compiling for PyQt4 ...")
try:
call(['pyrcc4', '-py3', qrc_file, '-o', py_file_pyqt], shell=True)
except FileNotFoundError:
print("You must install pyrcc4")
if args.create in ['pyqt5', 'qtpy', 'all']:
print("Compiling for PyQt5 ...")
try:
call(['pyrcc5', qrc_file, '-o', py_file_pyqt5], shell=True)
except FileNotFoundError:
print("You must install pyrcc5")
if args.create in ['pyside', 'all']:
print("Compiling for PySide ...")
try:
call(['pyside-rcc', '-py3', qrc_file, '-o', py_file_pyside], shell=True)
except FileNotFoundError:
print("You must install pyside-rcc")
if args.create in ['pyside2', 'all']:
print("Compiling for PySide 2...")
try:
call(['pyside2-rcc', qrc_file, '-o', py_file_pyside2], shell=True)
except FileNotFoundError:
print("You must install pyside2-rcc")
if args.create in ['qtpy', 'all']:
print("Compiling for QtPy ...")
# special case - qtpy - syntax is PyQt5
with open(py_file_pyqt5, 'r') as file:
filedata = file.read()
# replace the target string
filedata = filedata.replace('from PyQt5', 'from qtpy')
with open(py_file_qtpy, 'w+') as file:
# write the file out again
file.write(filedata)
if args.create not in ['pyqt5']:
os.remove(py_file_pyqt5)
if args.create in ['pyqtgraph', 'all']:
print("Compiling for PyQtGraph ...")
# special case - pyqtgraph - syntax is PyQt4
with open(py_file_pyqt, 'r') as file:
filedata = file.read()
# replace the target string
filedata = filedata.replace('from PyQt4', 'from pyqtgraph.Qt')
with open(py_file_pyqtgraph, 'w+') as file:
# write the file out again
file.write(filedata)
| 5,303 |
def handle_in(distance):
"""Within range
:param distance: Distance
"""
print("in range", distance)
| 5,304 |
def delete_volume(volid, region):
"""
Delete a volume
"""
Dryrun = True
if GOLIVE.lower() == 'true':
Dryrun = False
else:
print('Running in Dryrun mode')
ec2 = connect('ec2', region)
response = ec2.delete_volume(
VolumeId=volid,
DryRun=Dryrun
)
return
| 5,305 |
def format_project_title(rank: int, project_id: str, status: str) -> str:
"""Formats a project title for display in Slack.
Args:
rank: The rank of in the list. Will be prepended to the title.
project_id: The project ID.
status: The status of the project. This is used to determine which
emoji is used to prefix the title string.
Returns:
A formatted title string.
"""
project_link = generate_gcp_project_link(project_id)
if status == SETTINGS.STATUS_WARNING:
return f':warning: *{rank}. {project_link}*'
return f':white_check_mark: *{rank}. {project_link}*'
| 5,306 |
def fake_3dimage_vis():
"""
:return: a Nifti1Image (3D) in RAS+ space
Following characteristics:
- shape[LR] = 7
- shape[PA] = 8
- shape[IS] = 9
Visual thing using voxel art...
"""
shape = (7,8,9)
data = np.zeros(shape, dtype=np.float32, order="F")
# "L"
indices =np.array([
(0,1,6),
(0,1,5),
(0,1,4),
(0,1,3),
(0,1,2),
(0,1,1),
(0,2,1),
(0,3,1),
(0,4,1),
(0,5,1),
]).T
data[indices[0], indices[1], indices[2]] = 7
# "P"
indices =np.array([
(1,0,6),
(1,0,5),
(1,0,4),
(1,0,3),
(1,0,2),
(1,0,1),
(2,0,6),
(3,0,5),
(3,0,4),
(2,0,3),
(1,0,2),
]).T
data[indices[0], indices[1], indices[2]] = 9
# "I"
indices =np.array([
(3,1,0),
(2,1,0),
(1,1,0),
(4,1,0),
(5,1,0),
(3,1,0),
(3,2,0),
(3,3,0),
(3,4,0),
(3,5,0),
(3,6,0),
(3,7,0),
(2,7,0),
(1,7,0),
(4,7,0),
(5,7,0),
]).T
data[indices[0], indices[1], indices[2]] = 9
affine = np.eye(4)
return nibabel.nifti1.Nifti1Image(data, affine)
| 5,307 |
def watch_dependencies(dependency, func, time_execution=15000, registry=None, app=current_app):
"""
Register dependencies metrics up
"""
if not registry:
registry = app.extensions.get("registry", CollectorRegistry())
app.extensions["registry"] = registry
# pylint: disable=invalid-name
DEPENDENCY_UP = Gauge(
'dependency_up',
'records if a dependency is up or down. 1 for up, 0 for down',
["name"],
registry=registry
)
def register_dependecy():
DEPENDENCY_UP.labels(dependency).set(func())
scheduler = BackgroundScheduler()
scheduler.add_job(
func=register_dependecy,
trigger="interval",
seconds=time_execution/1000,
max_instances=1,
name='dependency',
misfire_grace_time=2,
replace_existing=True
)
scheduler.start()
# Shut down the scheduler when exiting the app
atexit.register(scheduler.shutdown)
return scheduler
| 5,308 |
def view_cache_key(func, args, kwargs, extra=None):
"""
Calculate cache key for view func.
Use url instead of not properly serializable request argument.
"""
if hasattr(args[0], 'build_absolute_uri'):
uri = args[0].build_absolute_uri()
else:
uri = args[0]
return 'v:' + func_cache_key(func, args[1:], kwargs, extra=(uri, extra))
| 5,309 |
def get_filter(sampling_freq, f_pass, f_stop, taps):
"""Get FIR filter coefficients using the Remez exchange algorithm.
Args:
f_pass (float): Passband edge.
f_stop (float): Stopband edge.
taps (int): Number of taps or coefficients in the resulting filter.
Returns:
(numpy.ndarray): Computed filter coefficients.
"""
return ffd.remez(taps, [0, f_pass/sampling_freq, f_stop/sampling_freq, 0.5], [0, 1])
| 5,310 |
def errorcode_from_error(e):
"""
Get the error code from a particular error/exception caused by PostgreSQL.
"""
return e.orig.pgcode
| 5,311 |
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : SlotW28
A SlotW28 object
Returns
-------
point_dict: dict
A dict of the slot point coordinates
"""
Rbo = self.get_Rbo()
# alpha is the angle to rotate Z0 so ||Z1,Z8|| = W0
alpha = float(arcsin(self.W0 / (2 * Rbo)))
slot_pitch = 2 * pi / self.Zs
# comp point coordinate (in complex)
Z0 = Rbo * exp(1j * 0)
Z8 = Z0 * exp(-1j * alpha)
if self.is_outwards():
Z7 = Z8 + self.H0
# Rotation to get the tooth on X axis
Z7 = Z7 * exp(1j * slot_pitch / 2)
Z8 = Z8 * exp(1j * slot_pitch / 2)
# Z7 = x7 + 1j*y7
# Z6 = x + 1j * W3/2
# C2,Z6 _|_ Z6,Z5 => Re(C2) = Re(Z6)
# ||Z6,zc2|| = R1 => Zc2 = x + 1j*(W3/2+R1)
# ||Z7,zc2||² = R1² => (x7-x)²+ (y7-(W3/2+R1))² = R1²
# x² - 2*x7 x + (x7²+(y7-(W3/2+R1))²-R1²) = 0
# D = 4*x7² - 4*(x7²+(y7-(W3/2+R1))²-R1²) = -4((y7-(W3/2+R1))²-R1²)
# x = x7 + sqrt(-4((y7-(W3/2+R1))²-R1²))/2
Z6 = (
Z7.real
+ sqrt(-4 * ((Z7.imag - (self.W3 / 2.0 + self.R1)) ** 2 - self.R1 ** 2)) / 2
+ 1j * self.W3 / 2.0
)
Z5 = Z6 + self.H3
rot_sign = 1
else: # inward slot
Z7 = Z8 - self.H0
# Rotation to get the tooth on X axis
Z7 = Z7 * exp(1j * slot_pitch / 2)
Z8 = Z8 * exp(1j * slot_pitch / 2)
Z6 = (
Z7.real
- sqrt(-4 * ((Z7.imag - (self.W3 / 2.0 + self.R1)) ** 2 - self.R1 ** 2)) / 2
+ 1j * self.W3 / 2.0
)
Z5 = Z6 - self.H3
rot_sign = -1
# Tooth ref to slot
Z1, Z2, Z3, Z4 = (
Z8 * exp(-1j * slot_pitch / 2),
Z7 * exp(-1j * slot_pitch / 2),
Z6 * exp(-1j * slot_pitch / 2),
Z5 * exp(-1j * slot_pitch / 2),
)
point_dict = dict()
point_dict["Z1"] = Z1
point_dict["Z2"] = Z2
point_dict["Z3"] = Z3
point_dict["Z4"] = Z4
# symetry
point_dict["Z5"] = Z4.conjugate()
point_dict["Z6"] = Z3.conjugate()
point_dict["Z7"] = Z2.conjugate()
point_dict["Z8"] = Z1.conjugate()
# Center
A = Arc1(Z2, Z3, rot_sign * self.R1, self.is_outwards())
point_dict["Zc1"] = A.get_center()
point_dict["Zc2"] = (point_dict["Z4"] + point_dict["Z5"]) / 2
point_dict["Zc3"] = point_dict["Zc1"].conjugate()
return point_dict
| 5,312 |
def save_expected_plot(series: pd.Series, colour="C0") -> IO:
"""Return an image of the plot with the given `series` and `colour`."""
fig, ax = plt.subplots()
ax.add_line(mpl_lines.Line2D(series.index, series.values, color=colour))
return _save_fig(fig, ax)
| 5,313 |
def GRU_architecture(
GRU_layers,
GRU_neurons,
Dense_layers,
Dense_neurons,
add_Dropout,
Dropout_rate,
data_shape,
):
"""
Parameters
----------
GRU_layers : int
Number of GRU layers.
GRU_neurons : list
List with the numbers of GRU cells in each GRU layer.
Dense_layers : int
Number of Dense layers after GRU layers.
Dense_neurons : list
List with the numbers of neurons in each fully-connecred layer.
add_Dropout : bool
Specifies whether dropout regularization should be applied.
Dropout_rate : float
Dropout rate - the number between 0 and 1.
data_shape : tuple
Shape of the training data.
Returns
-------
model : keras.engine.training.Model
Model with the specified architecture.
"""
# data_shape[1] - lag, data_shape[2] - number of signals
input_layer = Input((data_shape[1], data_shape[2]))
# If there is only one GRU layer, than return_sequences should be false
if GRU_layers == 1:
layers_gru = GRU(
GRU_neurons[0],
activation="tanh",
recurrent_activation="tanh",
use_bias=True,
return_sequences=False,
)(input_layer)
# For many GRU layers return_sequences should be True, to conncect layers with each other
else:
layers_gru = input_layer
# Adding GRU layers
for grul in range(0, GRU_layers - 1):
layers_gru = GRU(
GRU_neurons[grul],
activation="tanh",
recurrent_activation="tanh",
use_bias=True,
return_sequences=True,
)(layers_gru)
# Adding Dropout
if add_Dropout:
layers_gru = Dropout(Dropout_rate)(layers_gru)
# Adding last GRU layer
layers_gru = GRU(
GRU_neurons[-1],
activation="tanh",
recurrent_activation="tanh",
use_bias=True,
return_sequences=False,
)(layers_gru)
# Adding Dropout
if add_Dropout:
layers_gru = Dropout(Dropout_rate)(layers_gru)
# Adding Dense layers if asked
for densel in range(Dense_layers):
layers_gru = Dense(Dense_neurons[densel], activation="relu")(layers_gru)
# Adding Dropout
if add_Dropout:
layers_gru = Dropout(Dropout_rate)(layers_gru)
# Adding output layer
output = Dense(1, activation="linear")(layers_gru)
model = Model(inputs=input_layer, outputs=output)
return model
| 5,314 |
def _responds_plaintext(response):
"""sichert zu, dass die Antwort plaintext war."""
response.responds_http_status(200)
response.responds_content_type('text/plain')
| 5,315 |
def test_message():
"""Test converting value and coords to message."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
data[-1] = [0, 0]
layer = Points(data)
msg = layer.get_message()
assert type(msg) == str
| 5,316 |
def create_lock(name):
"""Creates a file in the /locks folder by the given name"""
lock_path = get_lock_path(name)
if not check_lock(lock_path):
return touch_file(lock_path)
else:
return False
| 5,317 |
def gen_rigid_tform_rot(image, spacing, angle):
"""
generate a SimpleElastix transformation parameter Map to rotate image by angle
Parameters
----------
image : sitk.Image
SimpleITK image that will be rotated
spacing : float
Physical spacing of the SimpleITK image
angle : float
angle of rotation in degrees, rotates counter-clockwise if positive
Returns
-------
SimpleITK.ParameterMap of rotation transformation (EulerTransform)
"""
tform = BASE_RIG_TFORM.copy()
image.SetSpacing((spacing, spacing))
bound_w, bound_h = compute_rot_bound(image, angle=angle)
rot_cent_pt = image.TransformContinuousIndexToPhysicalPoint(
((bound_w - 1) / 2, (bound_h - 1) / 2)
)
c_x, c_y = (image.GetSize()[0] - 1) / 2, (image.GetSize()[1] - 1) / 2
c_x_phy, c_y_phy = image.TransformContinuousIndexToPhysicalPoint(
(c_x, c_y)
)
t_x = rot_cent_pt[0] - c_x_phy
t_y = rot_cent_pt[1] - c_y_phy
tform["Spacing"] = [str(spacing), str(spacing)]
tform["Size"] = [str(int(np.ceil(bound_w))), str(int(np.ceil(bound_h)))]
tform["CenterOfRotationPoint"] = [str(rot_cent_pt[0]), str(rot_cent_pt[1])]
tform["TransformParameters"] = [
str(np.radians(angle)),
str(-1 * t_x),
str(-1 * t_y),
]
return tform
| 5,318 |
def get_param_response(param_name, dict_data, num=0, default=None):
"""
:param param_name: 从接口返回值中要提取的参数
:param dict_data: 接口返回值
:param num: 返回值中存在list时,取指定第几个
:param default: 函数异常返回
:return: 提取的参数值
"""
if isinstance(dict_data, dict):
for k, v in dict_data.items():
if k == param_name:
return v
else:
if isinstance(v, dict):
ret = get_param_response(param_name, v)
if ret is not default:
return ret
if isinstance(v, list):
if num:
try:
if isinstance(v[num], dict):
ret = get_param_response(param_name, v[num])
if ret is not default:
return ret
except IndexError:
return {'error': ErrorCode.index_error}
else:
for i in v:
if isinstance(i, dict):
ret = get_param_response(param_name, i)
if ret is not default:
return ret
if isinstance(v, str):
try:
value = eval(v)
ret = get_param_response(param_name, value)
if ret is not default:
return ret
except Exception:
pass
elif isinstance(dict_data, list):
for content in dict_data:
ret = get_param_response(param_name, content)
if ret is not default:
return ret
return default
| 5,319 |
def _strict_conv1d(x, h):
"""Return x * h for rank 1 tensors x and h."""
with ops.name_scope('strict_conv1d', values=[x, h]):
x = array_ops.reshape(x, (1, -1, 1, 1))
h = array_ops.reshape(h, (-1, 1, 1, 1))
result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME')
return array_ops.reshape(result, [-1])
| 5,320 |
def explorer():
"""Explorer"""
pass
| 5,321 |
def timed_zip_map_agent(func, in_streams, out_stream,
call_streams=None, name=None):
"""
Parameters
----------
in_streams: list of Stream
The list of input streams of the agent.
Each input stream is timed, i.e. the elements
are pairs (timestamp, value)
out_stream: Stream
The single output stream of the agent.
The output_stream is also timed.
call_streams: list of Stream
The list of call_streams. A new value in any stream in this
list causes a state transition of this agent.
name: Str
Name of the agent created by this function.
Returns
-------
Agent.
The agent created by this function.
Notes
-----
Each stream in in_streams must be a stream of tuples or lists
or NumPy arrays where element[0] is a time and where time is
a total order. Each stream in in_stream must be strictly
monotonically increasing in time.
out_stream merges the in_streams in order of time. An element
of out_stream is a list where element[0] is a time T and
element[1] is a list consisting of all elements of in in_streams
that have time T.
"""
# Check types of arguments
check_list_of_streams_type(list_of_streams=in_streams,
agent_name=name, parameter_name='in_streams')
check_stream_type(name, 'out_stream', out_stream)
check_list_of_streams_type(list_of_streams=call_streams,
agent_name=name, parameter_name='call_streams')
num_in_streams = len(in_streams)
indices = range(num_in_streams)
# The transition function for this agent.
def transition(in_lists, state):
# Check the types of in_lists
check_in_lists_type(name, in_lists, num_in_streams)
# input_lists is the list of lists that this agent can operate on
# in this transition.
input_lists = [in_list.list[in_list.start:in_list.stop]
for in_list in in_lists]
# pointers is a list where pointers[i] is a pointer into the i-th
# input lists
pointers = [0 for i in indices]
# stops is a list where pointers[i] must not exceed stops[i].
stops = [len(input_lists[i]) for i in indices]
# output_list is the single output list for this agent.
output_list = []
while all(pointers[i] < stops[i] for i in indices):
# slice is a list with one element per input stream.
# slice[i] is the value pointed to by pointers[i].
slice = [input_lists[i][pointers[i]] for i in indices]
# slice[i][0] is the time field for slice[i].
# earliest_time is the earliest time pointed to by pointers.
earliest_time = min(slice[i][0] for i in indices)
# slice[i][1:] is the list of fields other than the time
# field for slice[i].
# next_output_value is a list with one element for
# each input stream.
# next_output_value[i] is the empty list if the time
# for slice[i] is later than earliest time. If the time
# for slice[i] is the earliest time, hen next_output_value[i]
# is the list of all the non-time fields.
next_output_value = [slice[i][1] if slice[i][0] == earliest_time
else None for i in indices]
# increment pointers for this indexes where the time was the
# earliest time.
pointers = [pointers[i]+1 if slice[i][0] == earliest_time
else pointers[i] for i in indices]
# Make next_output a list consisting of a time: the earliest time
# followed by a sequence of lists, one for each input stream.
# Each list in this sequence consists of the non-time fields.
next_output = [earliest_time]
next_output.append(next_output_value)
next_output = func(next_output)
# output_list has an element for each time in the input list.
output_list.append(next_output)
# Return: (1) output_lists, the list of outputs, one per
# output stream. This agent has a single output stream
# and so output_lists = [output_list]
# (2) the new state; the state is irrelevant for this
# agent because all it does is merge streams.
# (3) the new starting pointer into this stream for
# this agent. Since this agent has read
# pointers[i] number of elements in the i-th input
# stream, move the starting pointer for the i-th input
# stream forward by pointers[i].
return [output_list], state, [in_lists[i].start+pointers[i] for i in indices]
# Finished transition
# Create agent
state = None
# Create agent with the following parameters:
# 1. list of input streams.
# 2. list of output streams. This agent has a single output stream and so
# out_streams is [out_stream].
# 3. transition function
# 4. new state (irrelevant for this agent), so state is None
# 5. list of calling streams
# 6. Agent name
return Agent(in_streams, [out_stream], transition, state, call_streams, name)
| 5,322 |
def get_repository_ids_requiring_prior_install( trans, tsr_ids, repository_dependencies ):
"""
Inspect the received repository_dependencies and determine if the encoded id of each required repository is in the received tsr_ids. If so,
then determine whether that required repository should be installed prior to it's dependent repository. Return a list of encoded repository
ids, each of which is contained in the received list of tsr_ids, and whose associated repositories must be installed prior to the dependent
repository associated with the received repository_dependencies.
"""
prior_install_ids = []
if repository_dependencies:
for key, rd_tups in repository_dependencies.items():
if key in [ 'description', 'root_key' ]:
continue
for rd_tup in rd_tups:
tool_shed, name, owner, changeset_revision, prior_installation_required = suc.parse_repository_dependency_tuple( rd_tup )
if asbool( prior_installation_required ):
repository = suc.get_repository_for_dependency_relationship( trans.app, tool_shed, name, owner, changeset_revision )
if repository:
encoded_repository_id = trans.security.encode_id( repository.id )
if encoded_repository_id in tsr_ids:
prior_install_ids.append( encoded_repository_id )
return prior_install_ids
| 5,323 |
def get_words_and_spaces(
words: Iterable[str], text: str
) -> Tuple[List[str], List[bool]]:
"""Given a list of words and a text, reconstruct the original tokens and
return a list of words and spaces that can be used to create a Doc. This
can help recover destructive tokenization that didn't preserve any
whitespace information.
words (Iterable[str]): The words.
text (str): The original text.
RETURNS (Tuple[List[str], List[bool]]): The words and spaces.
"""
if "".join("".join(words).split()) != "".join(text.split()):
raise ValueError(Errors.E194.format(text=text, words=words))
text_words = []
text_spaces = []
text_pos = 0
# normalize words to remove all whitespace tokens
norm_words = [word for word in words if not word.isspace()]
# align words with text
for word in norm_words:
try:
word_start = text[text_pos:].index(word)
except ValueError:
raise ValueError(Errors.E194.format(text=text, words=words)) from None
if word_start > 0:
text_words.append(text[text_pos : text_pos + word_start])
text_spaces.append(False)
text_pos += word_start
text_words.append(word)
text_spaces.append(False)
text_pos += len(word)
if text_pos < len(text) and text[text_pos] == " ":
text_spaces[-1] = True
text_pos += 1
if text_pos < len(text):
text_words.append(text[text_pos:])
text_spaces.append(False)
return (text_words, text_spaces)
| 5,324 |
def parse_names(source: str) -> List['Name']:
"""Parse names from source."""
tree = ast.parse(source)
visitor = ImportTrackerVisitor()
visitor.visit(tree)
return sum([split_access(a) for a in visitor.accessed], [])
| 5,325 |
def get_supported_language_variant(lang_code, strict=False):
"""
Returns the language-code that's listed in supported languages, possibly
selecting a more generic variant. Raises LookupError if nothing found.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
if lang_code:
# If 'fr-ca' is not supported, try special fallback or language-only 'fr'.
possible_lang_codes = [lang_code]
try:
possible_lang_codes.extend(LANG_INFO[lang_code]['fallback'])
except KeyError:
pass
generic_lang_code = lang_code.split('-')[0]
possible_lang_codes.append(generic_lang_code)
supported_lang_codes = get_languages()
for code in possible_lang_codes:
if code in supported_lang_codes and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported_lang_codes:
if supported_code.startswith(generic_lang_code + '-'):
return supported_code
raise LookupError(lang_code)
| 5,326 |
def mean_square_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate MSE loss
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
Returns
-------
MSE of given predictions
"""
return (1 / y_true.shape[0]) * (np.sum((y_true - y_pred) ** 2))
| 5,327 |
def gmail_auth(logfile, mode):
"""Handles Gmail authorization via Gmail API."""
creds = None
# the file .token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time
pickled_token = "../conf/.token.pickle"
if mode == "dev":
pickled_token = "../conf/.dev.token.pickle"
elif mode != "norm":
sys.stderr.write("Error: Call to gmail_auth with unknown mode: '" +
mode + "'...")
exit(1)
if os.path.exists(pickled_token):
with open(pickled_token, "rb") as token:
creds = pickle.load(token)
# if there are no (valid) credentials available, let the user log in
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
req_ret = None
try:
req_ret = func_timeout(LIMIT, Request)
except FunctionTimedOut:
log(logfile, "Request was not completed within " + str(LIMIT) +
" seconds.")
exit(1)
except:
log(logfile, "Something unexpected happened when trying to " +
"refresh Google credentials.")
exit(1)
creds.refresh(req_ret)
else:
flow = InstalledAppFlow.from_client_secrets_file("../conf/" +
"credentials.json",
SCOPES)
creds = None
try:
creds = func_timeout(LIMIT, flow.run_local_server)
except FunctionTimedOut:
log(logfile, "Authorization was not completed within " +
str(LIMIT) + " seconds.")
exit(1)
except:
log(logfile, "Something unexpected happened when trying to " +
"load Google credentials.")
exit(1)
# save the credentials for the next run
with open(pickled_token, "wb") as token:
pickle.dump(creds, token)
return creds
| 5,328 |
def graduation_threshold(session):
"""get graduation threshold
url : "/user/graduation-threshold"
Args:
session ([requests.session]): must be login webap!
Returns:
[requests.models.Response]: requests response
other error will return False
"""
# post it, it will return Aength.kuas.edu.tw cookie
Aength_login = session.post('https://webap.nkust.edu.tw/nkust/fnc.jsp',
data={'fncid': 'AG635'})
# get post data
try:
root = etree.HTML(Aength_login.text)
term_form_xpath = root.xpath('//input[@type="hidden"]')
term_form = {i.values()[1]: i.values()[-1] for i in term_form_xpath}
except:
return False
# final post
query_url = 'http://Aength.kuas.edu.tw/AUPersonQ.aspx'
res = session.post(url=query_url, data=term_form)
return res
| 5,329 |
def q_conjugate(q):
"""
quarternion conjugate
"""
w, x, y, z = q
return (w, -x, -y, -z)
| 5,330 |
def finalize_schemas(fields_nested):
"""Clean up all schema level attributes"""
for schema_name in fields_nested:
schema = fields_nested[schema_name]
schema_cleanup_values(schema)
| 5,331 |
def put_data(
click_ctx,
mount_dir,
project,
source,
source_path_file,
break_on_fail,
overwrite,
num_threads,
silent,
):
"""Upload data to a project.
Limited to Unit Admins and Personnel.
To upload a file (with the same name) a second time, use the `--overwrite` flag.
Prior to the upload, the DDS checks if the files are compressed and if not compresses them,
followed by encryption. After this the files are uploaded to the cloud.
NB! The current setup requires compression and encryption to be performed locally. Make sure you
have enough space. This will be improved on in future releases.
The default number of files to compress, encrypt and upload at a time is four. This can be
changed by altering the `--num-threads` option, but whether or not it works depends on the
machine you are running the CLI on.
The token is valid for 7 days. Make sure your token is valid long enough for the
delivery to finish. To avoid that a delivery fails because of an expired token, we recommend
reauthenticating yourself before uploading data.
"""
try:
dds_cli.data_putter.put(
mount_dir=mount_dir,
project=project,
source=source,
source_path_file=source_path_file,
break_on_fail=break_on_fail,
overwrite=overwrite,
num_threads=num_threads,
silent=silent,
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
)
except (
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.UploadError,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
) as err:
LOG.error(err)
sys.exit(1)
| 5,332 |
def displayMetaDataSubWindow(weasel, tableTitle, dataset):
"""
Creates a subwindow that displays a DICOM image's metadata.
"""
try:
logger.info('ViewMetaData.displayMetaDataSubWindow called.')
title = "DICOM Image Metadata"
widget = QWidget()
widget.setLayout(QVBoxLayout())
metaDataSubWindow = QMdiSubWindow()
metaDataSubWindow.setAttribute(Qt.WA_DeleteOnClose)
metaDataSubWindow.setWidget(widget)
metaDataSubWindow.setObjectName("metaData_Window")
metaDataSubWindow.setWindowTitle(title)
height, width = weasel.getMDIAreaDimensions()
metaDataSubWindow.setGeometry(width * 0.4,0,width*0.6,height)
lblImageName = QLabel('<H4>' + tableTitle + '</H4>')
widget.layout().addWidget(lblImageName)
DICOM_Metadata_Table_View = buildTableView(dataset)
# Add Search Bar
searchField = QLineEdit()
searchField.textEdited.connect(lambda x=searchField.text(): searchTable(DICOM_Metadata_Table_View, x))
# Add export to Excel/CSV buttons
export_excel_button = QPushButton('&Export To Excel', clicked=lambda: exportToFile(weasel, DICOM_Metadata_Table_View, excel=True))
export_csv_button = QPushButton('&Export To CSV', clicked=lambda: exportToFile(weasel, DICOM_Metadata_Table_View, csv=True))
horizontalBox = QHBoxLayout()
horizontalBox.addWidget(searchField)
horizontalBox.addWidget(export_excel_button)
horizontalBox.addWidget(export_csv_button)
widget.layout().addLayout(horizontalBox)
widget.layout().addWidget(DICOM_Metadata_Table_View)
weasel.addSubWindow(metaDataSubWindow)
metaDataSubWindow.show()
except Exception as e:
print('Error in : ViewMetaData.displayMetaDataSubWindow' + str(e))
logger.error('Error in : ViewMetaData.displayMetaDataSubWindow' + str(e))
| 5,333 |
def row_r(row, boxsize):
"""Cell labels in 'row' of Sudoku puzzle of dimension 'boxsize'."""
nr = n_rows(boxsize)
return range(nr * (row - 1) + 1, nr * row + 1)
| 5,334 |
def send_warning_mail_patron_has_active_loans(patron_pid):
"""Send email to librarians user cannot be deleted because active loans.
:param patron_pid: the pid of the patron.
:param message_ctx: any other parameter to be passed as ctx in the msg.
"""
Patron = current_app_ils.patron_cls
patron = Patron.get_patron(patron_pid)
loans = [
loan.to_dict()
for loan in get_active_loans_by_patron_pid(patron_pid).scan()
]
if len(loans) > 0: # Email is sent only if there are active loans
recipients = current_app.config[
"ILS_MAIL_NOTIFY_MANAGEMENT_RECIPIENTS"
]
msg = UserDeletionWarningActiveLoanMessage(
patron, loans, recipients=recipients
)
send_ils_email(msg)
| 5,335 |
def get_set_from_word(permutation: Sequence[int], digit: Digit) -> set[int]:
"""
Returns a digit set from a given digit word,
based on the current permutation.
i.e. if:
permutation = [6, 5, 4, 3, 2, 1, 0]
digit = 'abcd'
then output = {6, 5, 4, 3}
"""
return {permutation[ord(char) - ord("a")] for char in digit}
| 5,336 |
def get_avg_sentiment(sentiment):
"""
Compiles and returnes the average sentiment
of all titles and bodies of our query
"""
average = {}
for coin in sentiment:
# sum up all compound readings from each title & body associated with the
# coin we detected in keywords
average[coin] = sum([item['compound'] for item in sentiment[coin]])
# get the mean compound sentiment if it's not 0
if average[coin] != 0:
average[coin] = average[coin] / len(sentiment[coin])
return average
| 5,337 |
def is_valid(url):
"""
Checks whether `url` is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
| 5,338 |
def data_context_connectivity_context_connectivity_serviceuuid_requested_capacity_total_size_put(uuid, tapi_common_capacity_value=None): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_requested_capacity_total_size_put
creates or updates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added or updated
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
| 5,339 |
def statistics_power_law_alpha(A_in):
"""
Compute the power law coefficient of the degree distribution of the input graph
Parameters
----------
A_in: sparse matrix or np.array
The input adjacency matrix.
Returns
-------
Power law coefficient
"""
degrees = A_in.sum(axis=0)
return powerlaw.Fit(degrees, xmin=max(np.min(degrees), 1)).power_law.alpha
| 5,340 |
def does_file_exist(path):
""" Checks if the given file is in the local filesystem.
Args:
path: A str, the path to the file.
Returns:
True on success, False otherwise.
"""
return os.path.isfile(path)
| 5,341 |
def common_gnuplot_settings():
""" common gnuplot settings. """
g_plot = Gnuplot.Gnuplot(persist=1)
# The following line is for rigor only. It seems to be assumed for .csv files
g_plot('set datafile separator \",\"')
g_plot('set ytics nomirror')
g_plot('set xtics nomirror')
g_plot('set xtics font ", 10"')
g_plot('set ytics font ", 10"')
g_plot('set tics out scale 1.0')
g_plot('set grid')
g_plot('set key out horiz')
g_plot('set key bot center')
g_plot('set key samplen 2 spacing .8 font ", 9"')
g_plot('set term png size 1200, 600')
g_plot('set title font ", 11"')
g_plot('set ylabel font ", 10"')
g_plot('set xlabel font ", 10"')
g_plot('set xlabel offset 0, 0.5')
g_plot('set xlabel "Elapsed Time (Seconds)"')
return(g_plot)
| 5,342 |
def video_feed_cam1():
"""Video streaming route. Put this in the src attribute of an img tag."""
cam = Camera(0)
return Response(gen(cam), mimetype='multipart/x-mixed-replace; boundary=frame')
| 5,343 |
def listDatasets(objects = dir()):
"""
Utility function to identify currently loaded datasets.
Function should be called with default parameters,
ie as 'listDatasets()'
"""
datasetlist = []
for item in objects:
try:
if eval(item + '.' + 'has_key("DATA")') == True:
datasetlist.append(item)
except AttributeError:
pass
return datasetlist
| 5,344 |
def test_asynchronous_ops(sut: SystemUnderTest):
"""Perform asynchronous operation tests"""
# TODO(bdodd): At this time, there seems to be no async operation that
# would be implemented by many vendors and be non-invasive to test.
# Revisit in future.
pass
| 5,345 |
def _(node: FromReference, ctx: AnnotateContext) -> BoxType:
"""Check that the parent node had {node.name} as a valid reference. Raises
an error if not, else copy over the set of references.
"""
t = box_type(node.over)
ft = t.row.fields.get(node.name, None)
if not isinstance(ft, RowType):
raise ErrReference(
ErrType.INVALID_TABLE_REF, name=node.name, path=ctx.get_path(node.over)
)
return BoxType(node.name, ft)
| 5,346 |
def save_to_file(log_file):
"""
Change default output to be SaveFile()
"""
if len(log_file) is not 0:
sys.stdout = SaveFile(log_file)
| 5,347 |
def invert(d: Mapping):
"""
invert a mapper's key and value
:param d:
:return:
"""
r: Dict = {}
for k, v in d.items():
r[v] = of(r[v], k) if v in r else k
return r
| 5,348 |
def data_dir() -> str:
"""The directory where result data is written to"""
return '/tmp/bingads/'
| 5,349 |
def intensity_weighted_dispersion(data, x0=0.0, dx=1.0, rms=None,
threshold=None, mask_path=None, axis=0):
"""
Returns the intensity weighted velocity dispersion (second moment).
"""
# Calculate the intensity weighted velocity first.
m1 = intensity_weighted_velocity(data=data, x0=x0, dx=dx, rms=rms,
threshold=threshold, mask_path=mask_path,
axis=axis)[0]
# Rearrange the data to what we need.
mask = _read_mask_path(mask_path=mask_path, data=data)
data = np.moveaxis(data, axis, 0)
mask = np.moveaxis(mask, axis, 0)
mask = _threshold_mask(data=data, mask=mask, rms=rms, threshold=threshold)
npix = np.sum(mask, axis=0)
weights = get_intensity_weights(data, mask)
npix_mask = np.where(npix > 1, 1, np.nan)
vpix = dx * np.arange(data.shape[0]) + x0
vpix = vpix[:, None, None] * np.ones(data.shape)
# Intensity weighted dispersion.
m1 = m1[None, :, :] * np.ones(data.shape)
m2 = np.sum(weights * (vpix - m1)**2, axis=0) / np.sum(weights, axis=0)
m2 = np.sqrt(m2)
if rms is None:
return m2 * npix_mask, None
# Calculate the uncertainties.
dm2 = ((vpix - m1)**2 - m2**2) * rms / np.sum(weights, axis=0)
dm2 = np.sqrt(np.sum(dm2**2, axis=0)) / 2. / m2
return m2 * npix_mask, dm2 * npix_mask
| 5,350 |
def vrtnws_api_request(service, path, params=None):
"""Sends a request to the VRTNWS API endpoint"""
url = BASE_URL_VRTNWS_API.format(service, path)
try:
res = requests.get(url, params)
try:
return res.json()
except ValueError:
return None
except requests.RequestException as ex:
print("VRTNWS API request '{}' failed:".format(url), ex)
return None
| 5,351 |
def test_coreapi_schema(sdk_client_fs: ADCMClient, tested_class: Type[BaseAPIObject]):
"""Test coreapi schema"""
def _get_params(link):
result = {}
for field in link.fields:
result[field.name] = True
return result
schema_obj = sdk_client_fs._api.schema
with allure.step(f'Get {tested_class.__name__} schema objects'):
for path in tested_class.PATH:
assert path in schema_obj.data
schema_obj = schema_obj[path]
params = _get_params(schema_obj.links['list'])
with allure.step(f'Check if filters are acceptable for coreapi {tested_class.__name__}'):
for _filter in tested_class.FILTERS:
expect(
_filter in params,
f"Filter {_filter} should be acceptable for coreapi in class {tested_class.__name__}",
)
assert_expectations()
| 5,352 |
def augment_tensor(matrix, ndim=None):
"""
Increase the dimensionality of a tensor,
splicing it into an identity matrix of a higher
dimension. Useful for generalizing
transformation matrices.
"""
s = matrix.shape
if ndim is None:
ndim = s[0]+1
arr = N.identity(ndim)
arr[:s[0],:s[1]] = matrix
return arr
| 5,353 |
def ping_redis() -> bool:
"""Call ping on Redis."""
try:
return REDIS.ping()
except (redis.exceptions.ConnectionError, redis.exceptions.ResponseError):
LOGGER.warning('Redis Ping unsuccessful')
return False
| 5,354 |
def test_scenario_tables_are_solved_against_outlines():
"""Outline substitution should apply to tables within a scenario"""
expected_hashes_per_step = [
# a = 1, b = 2
(
{"Parameter": "a", "Value": "1"},
{"Parameter": "b", "Value": "2"},
), # Given ...
(), # When I run the program
(), # Then I crash hard-core
# a = 2, b = 4
({"Parameter": "a", "Value": "2"}, {"Parameter": "b", "Value": "4"}),
(),
(),
]
scenario = parse_scenario(OUTLINED_SCENARIO_WITH_SUBSTITUTIONS_IN_TABLE)
solved = solved_steps(scenario)
for step, expected in zip(solved, expected_hashes_per_step):
assert type(step) == Step
assert step.hashes == expected
| 5,355 |
def createDataset(outputPath, path,images_train, labels_train, lexiconList=None, checkValid=True):
"""
Create LMDB dataset for CRNN training.
ARGS:
outputPath : LMDB output path
imagePathList : list of image path
labelList : list of corresponding groundtruth texts
lexiconList : (optional) list of lexicon lists
checkValid : if true, check the validity of every image
"""
assert (len(images_train) == len(labels_train))
nSamples = len(images_train)
env = lmdb.open(outputPath, map_size=1099511627776)
cache = {}
cnt = 1
for i in range(nSamples):
imagePath = images_train[i]
label = labels_train[i]
if not os.path.exists(path + imagePath):
print('%s does not exist' % imagePath)
continue
with open(path + imagePath, 'rb') as f:
imageBin = f.read()
if checkValid:
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % imagePath)
continue
imageKey = 'image-%09d' % cnt
labelKey = 'label-%09d' % cnt
cache[imageKey] = imageBin
cache[labelKey] = label
if lexiconList:
lexiconKey = 'lexicon-%09d' % cnt
cache[lexiconKey] = ' '.join(lexiconList[i])
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt - 1
cache['num-samples'] = str(nSamples)
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
| 5,356 |
def calculate_pool_reward(height: uint32) -> uint64:
"""
Returns the pool reward at a certain block height. The pool earns 7/8 of the reward in each block. If the farmer
is solo farming, they act as the pool, and therefore earn the entire block reward.
These halving events will not be hit at the exact times due to fluctuations in difficulty. They will likely
come early, if the network space and VDF rates increase continuously.
We start off at 2,199,023,255,552 which is 2^41 (about 2.2 heather) and half year 2, then half again year 4, then
half again year 8 etc. after 5 halfings we drop to zero, but don't panic, that's year 64
right shift >> to half...
"""
if height == 0:
return uint64(int((7 / 8) * (_base_reward << 16)))
elif height < 1 * _blocks_per_year:
return uint64(int((7 / 8) * _base_reward))
elif height < 3 * _blocks_per_year:
return uint64(int((7 / 8) * (_base_reward >> 1)))
elif height < 7 * _blocks_per_year:
return uint64(int((7 / 8) * (_base_reward >> 2)))
elif height < 15 * _blocks_per_year:
return uint64(int((7 / 8) * (_base_reward >> 3)))
elif height < 31 * _blocks_per_year:
return uint64(int((7 / 8) * (_base_reward >> 4)))
elif height < 63 * _blocks_per_year:
return uint64(int((7 / 8) * (_base_reward >> 5)))
else:
return uint64(0)
| 5,357 |
def get_argument(index, default=''):
"""
取得 shell 參數, 或使用預設值
"""
if len(sys.argv) <= index:
return default
return sys.argv[index]
| 5,358 |
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
username = request.form.get("username").strip()
password = request.form.get("password")
# Ensure username was submitted
if not username:
return apology("must provide username", 403)
# Ensure password was submitted
elif not password:
return apology("must provide password", 403)
username = request.form.get("username")
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username", username=username)
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], password):
return apology("invalid username and/or password", 403)
print
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
| 5,359 |
def test_list_g_month_length_4_nistxml_sv_iv_list_g_month_length_5_2(mode, save_output, output_format):
"""
Type list/gMonth is restricted by facet length with value 10.
"""
assert_bindings(
schema="nistData/list/gMonth/Schema+Instance/NISTSchema-SV-IV-list-gMonth-length-5.xsd",
instance="nistData/list/gMonth/Schema+Instance/NISTXML-SV-IV-list-gMonth-length-5-2.xml",
class_name="NistschemaSvIvListGMonthLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,360 |
def get_string_property(device_t, property):
""" Search the given device for the specified string property
@param device_t Device to search
@param property String to search for.
@return Python string containing the value, or None if not found.
"""
key = cf.CFStringCreateWithCString(
kCFAllocatorDefault,
property.encode("mac_roman"),
kCFStringEncodingMacRoman
)
CFContainer = iokit.IORegistryEntryCreateCFProperty(
device_t,
key,
kCFAllocatorDefault,
0
);
output = None
if CFContainer:
output = cf.CFStringGetCStringPtr(CFContainer, 0)
return output
| 5,361 |
def gamma_from_delta(
fn: Callable[..., Tensor], *, create_graph: bool = False, **params: Any
) -> Tensor:
"""Computes and returns gamma of a derivative from the formula of delta.
Note:
The keyword argument ``**params`` should contain at least one of
the following combinations:
- ``spot``
- ``moneyness`` and ``strike``
- ``log_moneyness`` and ``strike``
Args:
fn (callable): Function to calculate delta.
create_graph (bool, default=False): If ``True``,
graph of the derivative will be constructed,
allowing to compute higher order derivative products.
**params: Parameters passed to ``fn``.
Returns:
torch.Tensor
"""
return delta(pricer=fn, create_graph=create_graph, **params)
| 5,362 |
def clean_filename(string: str) -> str:
"""
清理文件名中的非法字符,防止保存到文件系统时出错
:param string:
:return:
"""
string = string.replace(':', '_').replace('/', '_').replace('\x00', '_')
string = re.sub('[\n\\\*><?\"|\t]', '', string)
return string.strip()
| 5,363 |
def homework(request, id_class):
"""
View for listing the specified class' assignments
"""
cl = Classes.objects.get(pk=id_class)
assm = Assignments.objects.all().filter(a_class=cl)
return render_to_response("assignments.html", {"assignments": assm, "class": cl}, context_instance=RequestContext(request))
| 5,364 |
def build_model(master_config):
"""
Imports the proper model class and builds model
"""
available_models = os.listdir("lib/classes/model_classes")
available_models = [i.replace(".py", "") for i in available_models]
model_type = master_config.Core_Config.Model_Config.model_type
model_class = None
if model_type in available_models:
model_class_module = importlib.import_module("lib.classes.model_classes." + model_type)
model_class = getattr(model_class_module, model_type)
else:
print("Error: model type not available. Check lib/classes/model_classes/ for available models", flush=True)
return False
model = model_class(master_config)
if master_config.Core_Config.Reload_Config.reload:
reload_path = master_config.Core_Config.Reload_Config.reload_path
if master_config.Core_Config.Reload_Config.by_name:
model.load_weights(reload_path + "model_and_config/final_model_weights.h5", by_name=True)
else:
model.load_weights(reload_path + "model_and_config/final_model_weights.h5")
return model
| 5,365 |
def read_video_txt(path_in,frame_per_video):
"""
read txtfile
Parameters:
---------
name : str
txtfile path
frame_per_video:int
frame per video
Returns:
---------
[index,image_path,label] as iterator
"""
num_index=0
with open(path_in) as fin:
while True:
line = fin.readline()
#print line
if not line:
break
line = [i.strip() for i in line.strip().split(' ')]
line_len = len(line)
if line_len < 3:
print('lst should at least has three parts, but only has %s parts for %s' %(line_len, line))
continue
try:
label = float(line[-1])
length = int(line[1])
new_length = 1
average_duration = length/frame_per_video
for i in range(0,frame_per_video):
if average_duration >= new_length:
if (i+1)*average_duration <= length:
offset = int(random.uniform(i* average_duration,(i+1)*average_duration))
else:
offset = int(random.uniform(i* average_duration,length))
if offset <= 0:
offset = 1
elif offset >= length:
offset = length
image_path = line[0] + "/img_%05d.jpg"%(offset)
index = int(num_index + i)
item = [index] + [image_path] + [label]
yield item
num_index += frame_per_video
except Exception as e:
print('Parsing lst met error for %s, detail: %s' %(line, e))
continue
| 5,366 |
def test_extract_command_list_zsh():
"""Test the extract_command_list command with a zsh like input."""
content = """ 9597 5.1.2020 11:23 git status
9598 5.1.2020 11:23 cd .idea
9599 5.1.2020 11:23 ls
9600 5.1.2020 11:23 ..
9601 5.1.2020 11:23 rm -rf .idea
9602 5.1.2020 11:24 rm n26-csv-transactions.csv
9603 5.1.2020 11:24 ..
9604 5.1.2020 11:24 git status .
9605 5.1.2020 11:24 cd gunicorn/worker-performance
9606 5.1.2020 11:24 ls
9607 5.1.2020 11:24 git add app.py
9608 5.1.2020 11:24 isort app.py"""
df = extract_command_list(content, shell="zsh")
assert len(df) == 12
| 5,367 |
def get_digits_from_right_to_left(number):
"""Return digits of an integer excluding the sign."""
number = abs(number)
if number < 10:
return (number, )
lst = list()
while number:
number, digit = divmod(number, 10)
lst.insert(0, digit)
return tuple(lst)
| 5,368 |
def p_attr_def(p):
"""
attr_def : OBJECT_ID COLON type
| OBJECT_ID COLON type ASSIGN expr
"""
if len(p) == 4:
p[0] = ast.AttrDeclarationNode(p[1], p[3])
else:
p[0] = ast.AttrDeclarationNode(p[1], p[3], p[5])
| 5,369 |
def make_diamond(block):
"""
Return a block after rotating counterclockwise 45° to form a diamond
"""
result = []
upper = upper_triangle(block)
upper = [i.rjust(size-1) for i in upper]
upper_form = []
upper_length = len(upper)
for i in range(upper_length):
upper_form.append(diag_line(upper))
upper = upper_triangle(upper)
upper = [k.rjust(size-1-i-1) for k in upper]
upper_form = [' '.join(i) for i in upper_form]
upper_form = upper_form[::-1]
diag = diag_line(block)
diag = ' '.join(diag)
lower = lower_triangle(block)
lower = [i.ljust(size-1) for i in lower]
lower_form = []
lower_length = len(lower)
for i in range(lower_length):
lower_form.append(diag_line(lower))
lower = lower_triangle(lower)
lower = [k.ljust(size-1-i-1) for k in lower]
lower_form = [' '.join(i) for i in lower_form]
max_length = len(diag)
upper_form = [i.center(max_length) for i in upper_form]
lower_form = [i.center(max_length) for i in lower_form]
result += upper_form
result.append(diag)
result += lower_form
return result
| 5,370 |
def do_sitelist(parser, token):
"""
Allows a template-level call a list of all the active sites.
"""
return SitelistNode()
| 5,371 |
def default_model_uk_ifriar(
data,
ep,
intervention_prior=None,
basic_R_prior=None,
r_walk_noise_scale_prior=0.15,
r_walk_period=7,
n_days_seeding=7,
seeding_scale=3.0,
infection_noise_scale=5.0,
output_noise_scale_prior=5.0,
**kwargs,
):
"""
Identical to base model, except use hard coded IFR/IAR estimates.
:param data: PreprocessedData object
:param ep: EpidemiologicalParameters object
:param intervention_prior: intervention prior dict
:param basic_R_prior: basic r prior dict
:param r_walk_noise_scale_prior: scale of random walk noise scale prior
:param r_walk_period: period of random walk
:param n_days_seeding: number of days of seeding
:param seeding_scale: scale of seeded infection prior
:param infection_noise_scale: scale of infection noise
:param output_noise_scale_prior: output noise scale prior
:param kwargs: additional kwargs (not used, but maintain function signature)
"""
for k in kwargs.keys():
print(f"{k} is not being used")
# First, compute R.
# sample intervention effects from their priors.
# mean intervention effects
alpha_i = sample_intervention_effects(data.nCMs, intervention_prior)
# transmission reduction
cm_reduction = jnp.sum(data.active_cms * alpha_i.reshape((1, data.nCMs, 1)), axis=1)
basic_R = sample_basic_R(data.nRs, basic_R_prior)
# number of 'noise points'
# -2 since no change for the first 3 weeks.
nNP = int(data.nDs / r_walk_period) - 1
r_walk_noise_scale = numpyro.sample(
"r_walk_noise_scale", dist.HalfNormal(scale=r_walk_noise_scale_prior)
)
# rescaling variables by 10 for better NUTS adaptation
r_walk_noise = numpyro.sample(
"r_walk_noise",
dist.Normal(loc=jnp.zeros((data.nRs, nNP)), scale=1.0 / 10),
)
# only apply the noise every "r_walk_period" - to get full noise, repeat
expanded_r_walk_noise = jnp.repeat(
r_walk_noise_scale * 10.0 * jnp.cumsum(r_walk_noise, axis=-1),
r_walk_period,
axis=-1,
)[: data.nRs, : (data.nDs - 2 * r_walk_period)]
# except that we assume no noise for the first 3 weeks
full_log_Rt_noise = jnp.zeros((data.nRs, data.nDs))
full_log_Rt_noise = jax.ops.index_update(
full_log_Rt_noise, jax.ops.index[:, 2 * r_walk_period :], expanded_r_walk_noise
)
Rt = numpyro.deterministic(
"Rt",
jnp.exp(
jnp.log(basic_R.reshape((data.nRs, 1))) + full_log_Rt_noise - cm_reduction
),
)
# collect variables in the numpyro trace
numpyro.deterministic("Rt_walk", jnp.exp(full_log_Rt_noise))
numpyro.deterministic(
"Rt_cm", jnp.exp(jnp.log(basic_R.reshape((data.nRs, 1))) - cm_reduction)
)
# Infection Model
seeding_padding = n_days_seeding
total_padding = ep.GIv.size - 1
# note; seeding is also rescaled
init_infections, total_infections_placeholder = seed_infections(
seeding_scale, data.nRs, data.nDs, seeding_padding, total_padding
)
discrete_renewal_transition = get_discrete_renewal_transition(ep)
# we need to transpose R because jax.lax.scan scans over the first dimension.
# We want to scan over time instead of regions!
_, infections = jax.lax.scan(
discrete_renewal_transition,
init_infections,
Rt.T,
)
# corrupt infections with additive noise, adding robustness at small case and death
# counts
infection_noise = numpyro.sample(
"infection_noise",
dist.Normal(loc=0, scale=0.1 * jnp.ones((data.nRs, data.nDs))),
)
# enforce positivity!
infections = jax.nn.softplus(
infections + (infection_noise_scale * (10.0 * infection_noise.T))
)
total_infections = jax.ops.index_update(
total_infections_placeholder,
jax.ops.index[:, :seeding_padding],
init_infections[:, -seeding_padding:],
)
total_infections = numpyro.deterministic(
"total_infections",
jax.ops.index_update(
total_infections, jax.ops.index[:, seeding_padding:], infections.T
),
)
# Scale by fixed UK numbers
iar_t = jnp.array(
[
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.32831737,
0.41706005,
0.41706005,
0.41706005,
0.41706005,
0.41706005,
0.41706005,
0.41706005,
0.41706005,
0.41706005,
0.41706005,
0.41706005,
0.41706005,
0.41706005,
0.41706005,
0.49174617,
0.49174617,
0.49174617,
0.49174617,
0.49174617,
0.49174617,
0.49174617,
0.49174617,
0.49174617,
0.49174617,
0.49174617,
0.49174617,
0.49174617,
0.49174617,
0.55604659,
0.55604659,
0.55604659,
0.55604659,
0.55604659,
0.55604659,
0.55604659,
0.55604659,
0.55604659,
0.55604659,
0.55604659,
0.55604659,
0.55604659,
0.55604659,
0.59710354,
0.59710354,
0.59710354,
0.59710354,
0.59710354,
0.59710354,
0.59710354,
0.59710354,
0.59710354,
0.59710354,
0.59710354,
0.59710354,
0.59710354,
0.59710354,
0.61638832,
0.61638832,
0.61638832,
0.61638832,
0.61638832,
0.61638832,
0.61638832,
0.61638832,
0.61638832,
0.61638832,
0.61638832,
0.61638832,
0.61638832,
0.61638832,
0.61569382,
0.61569382,
0.61569382,
0.61569382,
0.61569382,
0.61569382,
0.61569382,
0.61569382,
0.61569382,
0.61569382,
0.61569382,
0.61569382,
0.61569382,
0.61569382,
0.60523783,
0.60523783,
0.60523783,
0.60523783,
0.60523783,
0.60523783,
0.60523783,
0.60523783,
0.60523783,
0.60523783,
0.60523783,
0.60523783,
0.60523783,
0.60523783,
0.58280771,
0.58280771,
0.58280771,
0.58280771,
0.58280771,
0.58280771,
0.58280771,
0.58280771,
0.58280771,
0.58280771,
0.58280771,
0.58280771,
0.58280771,
0.58280771,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
0.57764043,
]
)
ifr_t = jnp.array(
[
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00858016,
0.00780975,
0.00780975,
0.00780975,
0.00780975,
0.00780975,
0.00780975,
0.00780975,
0.00780975,
0.00780975,
0.00780975,
0.00780975,
0.00780975,
0.00780975,
0.00780975,
0.00707346,
0.00707346,
0.00707346,
0.00707346,
0.00707346,
0.00707346,
0.00707346,
0.00707346,
0.00707346,
0.00707346,
0.00707346,
0.00707346,
0.00707346,
0.00707346,
0.00626562,
0.00626562,
0.00626562,
0.00626562,
0.00626562,
0.00626562,
0.00626562,
0.00626562,
0.00626562,
0.00626562,
0.00626562,
0.00626562,
0.00626562,
0.00626562,
0.00602792,
0.00602792,
0.00602792,
0.00602792,
0.00602792,
0.00602792,
0.00602792,
0.00602792,
0.00602792,
0.00602792,
0.00602792,
0.00602792,
0.00602792,
0.00602792,
0.00616321,
0.00616321,
0.00616321,
0.00616321,
0.00616321,
0.00616321,
0.00616321,
0.00616321,
0.00616321,
0.00616321,
0.00616321,
0.00616321,
0.00616321,
0.00616321,
0.00670325,
0.00670325,
0.00670325,
0.00670325,
0.00670325,
0.00670325,
0.00670325,
0.00670325,
0.00670325,
0.00670325,
0.00670325,
0.00670325,
0.00670325,
0.00670325,
0.00748796,
0.00748796,
0.00748796,
0.00748796,
0.00748796,
0.00748796,
0.00748796,
0.00748796,
0.00748796,
0.00748796,
0.00748796,
0.00748796,
0.00748796,
0.00748796,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
0.00805689,
]
)
# use the `RC_mat` to pull the country level change in the rates for the relevant local area
future_cases_t = numpyro.deterministic(
"future_cases_t", jnp.multiply(total_infections, iar_t)
)
future_deaths_t = numpyro.deterministic(
"future_cases_t", jnp.multiply(total_infections, ifr_t)
)
# collect expected cases and deaths
expected_cases = numpyro.deterministic(
"expected_cases",
jax.scipy.signal.convolve2d(future_cases_t, ep.DPC, mode="full")[
:, seeding_padding : seeding_padding + data.nDs
],
)
expected_deaths = numpyro.deterministic(
"expected_deaths",
jax.scipy.signal.convolve2d(future_deaths_t, ep.DPD, mode="full")[
:, seeding_padding : seeding_padding + data.nDs
],
)
# country specific psi cases and deaths.
# We will use the 'RC' matrix to pull the correct local area value.
psi_cases = numpyro.sample(
"psi_cases",
dist.HalfNormal(scale=output_noise_scale_prior * jnp.ones(len(data.unique_Cs))),
)
psi_deaths = numpyro.sample(
"psi_deaths",
dist.HalfNormal(scale=output_noise_scale_prior * jnp.ones(len(data.unique_Cs))),
)
# use the per country psi_cases and psi_deaths and form a nRs x nDs array
# to use for the output distribution.
cases_conc = (
(data.RC_mat @ psi_cases).reshape((data.nRs, 1)).repeat(data.nDs, axis=-1)
)
deaths_conc = (
(data.RC_mat @ psi_deaths).reshape((data.nRs, 1)).repeat(data.nDs, axis=-1)
)
with numpyro.handlers.mask(mask=jnp.logical_not(data.new_cases.mask)):
numpyro.sample(
"observed_cases",
dist.GammaPoisson(
concentration=cases_conc,
rate=cases_conc / expected_cases,
),
obs=data.new_cases.data,
)
with numpyro.handlers.mask(mask=jnp.logical_not(data.new_deaths.mask)):
numpyro.sample(
"observed_deaths",
dist.GammaPoisson(
concentration=deaths_conc,
rate=deaths_conc / expected_deaths,
),
obs=data.new_deaths.data,
)
| 5,372 |
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
| 5,373 |
def build_null_stop_time_series(feed, date_label='20010101', freq='5Min',
*, split_directions=False):
"""
Return a stop time series with the same index and hierarchical columns
as output by :func:`compute_stop_time_series_base`,
but fill it full of null values.
"""
start = date_label
end = pd.to_datetime(date_label + ' 23:59:00')
rng = pd.date_range(start, end, freq=freq)
inds = [
'num_trips',
]
sids = feed.stops.stop_id
if split_directions:
product = [inds, sids, [0, 1]]
names = ['indicator', 'stop_id', 'direction_id']
else:
product = [inds, sids]
names = ['indicator', 'stop_id']
cols = pd.MultiIndex.from_product(product, names=names)
return pd.DataFrame([], index=rng, columns=cols).sort_index(
axis=1, sort_remaining=True)
| 5,374 |
def yulewalk(order, F, M):
"""Recursive filter design using a least-squares method.
[B,A] = YULEWALK(N,F,M) finds the N-th order recursive filter
coefficients B and A such that the filter:
B(z) b(1) + b(2)z^-1 + .... + b(n)z^-(n-1)
---- = -------------------------------------
A(z) 1 + a(1)z^-1 + .... + a(n)z^-(n-1)
matches the magnitude frequency response given by vectors F and M.
The YULEWALK function performs a least squares fit in the time domain. The
denominator coefficients {a(1),...,a(NA)} are computed by the so called
"modified Yule Walker" equations, using NR correlation coefficients
computed by inverse Fourier transformation of the specified frequency
response H.
The numerator is computed by a four step procedure. First, a numerator
polynomial corresponding to an additive decomposition of the power
frequency response is computed. Next, the complete frequency response
corresponding to the numerator and denominator polynomials is evaluated.
Then a spectral factorization technique is used to obtain the impulse
response of the filter. Finally, the numerator polynomial is obtained by a
least squares fit to this impulse response. For a more detailed explanation
of the algorithm see [1]_.
Parameters
----------
order : int
Filter order.
F : array
Normalised frequency breakpoints for the filter. The frequencies in F
must be between 0.0 and 1.0, with 1.0 corresponding to half the sample
rate. They must be in increasing order and start with 0.0 and end with
1.0.
M : array
Magnitude breakpoints for the filter such that PLOT(F,M) would show a
plot of the desired frequency response.
References
----------
.. [1] B. Friedlander and B. Porat, "The Modified Yule-Walker Method of
ARMA Spectral Estimation," IEEE Transactions on Aerospace Electronic
Systems, Vol. AES-20, No. 2, pp. 158-173, March 1984.
Examples
--------
Design an 8th-order lowpass filter and overplot the desired
frequency response with the actual frequency response:
>>> f = [0, .6, .6, 1] # Frequency breakpoints
>>> m = [1, 1, 0, 0] # Magnitude breakpoints
>>> [b, a] = yulewalk(8, f, m) # Filter design using a least-squares method
"""
F = np.asarray(F)
M = np.asarray(M)
npt = 512
lap = np.fix(npt / 25).astype(int)
mf = F.size
npt = npt + 1 # For [dc 1 2 ... nyquist].
Ht = np.array(np.zeros((1, npt)))
nint = mf - 1
df = np.diff(F)
nb = 0
Ht[0][0] = M[0]
for i in range(nint):
if df[i] == 0:
nb = nb - int(lap / 2)
ne = nb + lap
else:
ne = int(np.fix(F[i + 1] * npt)) - 1
j = np.arange(nb, ne + 1)
if ne == nb:
inc = 0
else:
inc = (j - nb) / (ne - nb)
Ht[0][nb:ne + 1] = np.array(inc * M[i + 1] + (1 - inc) * M[i])
nb = ne + 1
Ht = np.concatenate((Ht, Ht[0][-2:0:-1]), axis=None)
n = Ht.size
n2 = np.fix((n + 1) / 2)
nb = order
nr = 4 * order
nt = np.arange(0, nr)
# compute correlation function of magnitude squared response
R = np.real(np.fft.ifft(Ht * Ht))
R = R[0:nr] * (0.54 + 0.46 * np.cos(np.pi * nt / (nr - 1))) # pick NR correlations # noqa
# Form window to be used in extracting the right "wing" of two-sided
# covariance sequence
Rwindow = np.concatenate(
(1 / 2, np.ones((1, int(n2 - 1))), np.zeros((1, int(n - n2)))),
axis=None)
A = polystab(denf(R, order)) # compute denominator
# compute additive decomposition
Qh = numf(np.concatenate((R[0] / 2, R[1:nr]), axis=None), A, order)
# compute impulse response
_, Ss = 2 * np.real(signal.freqz(Qh, A, worN=n, whole=True))
hh = np.fft.ifft(
np.exp(np.fft.fft(Rwindow * np.fft.ifft(np.log(Ss, dtype=np.complex))))
)
B = np.real(numf(hh[0:nr], A, nb))
return B, A
| 5,375 |
async def get_user_from_event(event):
""" Get the user from argument or replied message. """
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
user_obj = await tbot.get_entity(previous_message.sender_id)
else:
user = event.pattern_match.group(1)
if user.isnumeric():
user = int(user)
if not user:
await event.reply("Pass the user's username, id or reply!")
return
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity,
MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await tbot.get_entity(user_id)
return user_obj
try:
user_obj = await tbot.get_entity(user)
except (TypeError, ValueError) as err:
await event.reply(str(err))
return None
return user_obj
| 5,376 |
def test_linear_chain_crf(document):
"""Linear chain CRF, with only emission and transition scores.
"""
crf_builder = SeqTagCRFBuilder(skip_chain_enabled=False)
graph = crf_builder(document)
assert len(graph) == 2
check_unary_factors(graph["unary"])
check_transition_factors(graph["transition"])
| 5,377 |
def parse_contest_list(json_file):
"""Parse a list of Contests from a JSON file.
Note:
Template for Contest format in JSON in contest_template.json
"""
with open(json_file, 'r') as json_data:
data = json.load(json_data)
contests = []
for contest in data:
contest_ballots = data[contest]['contest_ballots']
tally = data[contest]['tally']
num_winners = data[contest]['num_winners']
reported_winners = data[contest]['reported_winners']
contest_type = ContestType[data[contest]['contest_type']]
contests.append(Contest(contest_ballots, tally, num_winners, reported_winners, contest_type))
return contests
| 5,378 |
def get_image_blob(roidb, mode):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
if mode == 'train' or mode == 'val':
with open(roidb['image'], 'rb') as f:
data = f.read()
data = np.frombuffer(data, dtype='uint8')
img = cv2.imdecode(data, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gt_boxes = roidb['boxes']
gt_label = roidb['gt_classes']
# resize
if mode == 'train':
img, im_scale = _resize(img, target_size=800, max_size=1333)
need_gt_boxes = gt_boxes.copy()
need_gt_boxes[:, :4] *= im_scale
img, need_gt_boxes, need_gt_label = _rotation(
img, need_gt_boxes, gt_label, prob=1.0, gt_margin=1.4)
else:
img, im_scale = _resize(img, target_size=1000, max_size=1778)
need_gt_boxes = gt_boxes
need_gt_label = gt_label
img = img.astype(np.float32, copy=False)
img = img / 255.0
mean = np.array(cfg.pixel_means)[np.newaxis, np.newaxis, :]
std = np.array(cfg.pixel_std)[np.newaxis, np.newaxis, :]
img -= mean
img /= std
img = img.transpose((2, 0, 1))
return img, im_scale, need_gt_boxes, need_gt_label
| 5,379 |
def test_db_reuse(django_testdir):
"""
Test the re-use db functionality. This test requires a PostgreSQL server
to be available and the environment variables PG_HOST, PG_DB, PG_USER to
be defined.
"""
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
py.test.skip('Do not test db reuse since database does not support it')
create_test_module(django_testdir, '''
import pytest
from app.models import Item
@pytest.mark.django_db
def test_db_can_be_accessed():
assert Item.objects.count() == 0
''')
# Use --create-db on the first run to make sure we are not just re-using a
# database from another test run
drop_database()
assert not db_exists()
# Do not pass in --create-db to make sure it is created when it
# does not exist
result_first = django_testdir.runpytest('-v', '--reuse-db')
result_first.stdout.fnmatch_lines([
"*test_db_can_be_accessed PASSED*",
])
assert not mark_exists()
mark_database()
assert mark_exists()
result_second = django_testdir.runpytest('-v', '--reuse-db')
result_second.stdout.fnmatch_lines([
"*test_db_can_be_accessed PASSED*",
])
# Make sure the database has not been re-created
assert mark_exists()
result_third = django_testdir.runpytest('-v', '--reuse-db', '--create-db')
result_third.stdout.fnmatch_lines([
"*test_db_can_be_accessed PASSED*",
])
# Make sure the database has been re-created and the mark is gone
assert not mark_exists()
| 5,380 |
def setTimeCallback(timefunc=None):
"""Sets a function that will return the window's global time. This
will be used by the animation timing, visualization plots, and movie-saving
functions.
Args:
timefunc (callable): returns a monotonically non-decreasing float.
If None, reverts back to using time.time().
"""
_init()
scene().setTimeCallback(timefunc)
| 5,381 |
def rx_weight_fn(edge):
"""A function for returning the weight from the common vertex."""
return float(edge["weight"])
| 5,382 |
def image_inputs(images_and_videos, data_dir, text_tmp_images):
"""Generates a list of input arguments for ffmpeg with the given images."""
include_cmd = []
# adds images as video starting on overlay time and finishing on overlay end
img_formats = ['gif', 'jpg', 'jpeg', 'png']
for ovl in images_and_videos:
filename = ovl['image']
# checks if overlay is image or video
is_img = False
for img_fmt in img_formats:
is_img = filename.lower().endswith(img_fmt)
if is_img:
break
# treats image overlay
if is_img:
duration = str(float(ovl['end_time']) - float(ovl['start_time']))
is_gif = filename.lower().endswith('.gif')
has_fade = (float(ovl.get('fade_in_duration', 0)) +
float(ovl.get('fade_out_duration', 0))) > 0
# A GIF with no fade is treated as an animated GIF should.
# It works even if it is not animated.
# An animated GIF cannot have fade in or out effects.
if is_gif and not has_fade:
include_args = ['-ignore_loop', '0']
else:
include_args = ['-f', 'image2', '-loop', '1']
include_args += ['-itsoffset', str(ovl['start_time']), '-t', duration]
# GIFs should have a special input decoder for FFMPEG.
if is_gif:
include_args += ['-c:v', 'gif']
include_args += ['-i']
include_cmd += include_args + ['%s/assets/%s' % (data_dir,
filename)]
# treats video overlays
else:
duration = str(float(ovl['end_time']) - float(ovl['start_time']))
include_args = ['-itsoffset', str(ovl['start_time']), '-t', duration]
include_args += ['-i']
include_cmd += include_args + ['%s/assets/%s' % (data_dir,
filename)]
# adds texts as video starting and finishing on their overlay timing
for img2 in text_tmp_images:
duration = str(float(img2['end_time']) - float(img2['start_time']))
include_args = ['-f', 'image2', '-loop', '1']
include_args += ['-itsoffset', str(img2['start_time']), '-t', duration]
include_args += ['-i']
include_cmd += include_args + [str(img2['path'])]
return include_cmd
| 5,383 |
def generate_graph(data_sets: pd.DataFrame, data_source: str, data_state: str, toggle_new_case: bool, year: int) -> tuple[px.line, px.bar]:
"""Takes in the inputs and returns a graph object. The inputs are the source, data, location and year.
The graph is a prediction of the sentiment from the comments as a function of time. Another trace of cases can be displayed as well.
We can also have graphs directly comparing # of cases with sentiment by having cases on the x and its sentiment on that day on the y.
Depending on the input, a graph that takes into account source, state(how much the model is trained), show cases(toggle on/off), location and year.
The user can choose which type of graph to generate.
Returns a line graph and a bar chart.
"""
main_graph = px.line(
data_sets[data_source],
x="Date",
y="New Cases",
)
if toggle_new_case:
main_graph.add_trace(
go.Line(
x=data_sets[data_source].loc[:, 'Date'],
y=data_sets[data_source].loc[:, 'New Cases']
)
)
stat_data_sets = pd.DataFrame(
index=["Max", "Min", "Mean"],
data={
"Cases": [
data_sets[data_source].loc[:, "New Cases"].max(),
data_sets[data_source].loc[:, "New Cases"].min(),
data_sets[data_source].loc[:, "New Cases"].mean(),
]
},
)
stats_graph = px.bar(
stat_data_sets,
x=["Max", "Min", "Mean"],
y="Cases",
)
return main_graph, stats_graph
| 5,384 |
def f_not_null(seq):
"""过滤非空值"""
seq = filter(lambda x: x not in (None, '', {}, [], ()), seq)
return seq
| 5,385 |
def alignObjects(sources, target, position=True, rotation=True, rotateOrder=False, viaRotatePivot=False):
"""
Aligns list of sources to match target
If target has a different rotation order,
sources rotation order will be set to that of the target
"""
rotateOrderXYZ = pmc.getAttr(target + '.rotateOrder')
if viaRotatePivot:
targetPos = pmc.xform(target, q=True, worldSpace=True, rotatePivot=True)
else:
targetPos = pmc.xform(target, q=True, worldSpace=True, translation=True)
if rotation and isinstance(target, pmc.nodetypes.Joint):
# Use temporary locator in case we're aligning to joints
# xform gives inconsistent results for them
tmpLoc = pmc.spaceLocator()
pmc.setAttr(tmpLoc + '.rotateOrder', rotateOrderXYZ)
tmpConstraint = pmc.orientConstraint(target, tmpLoc, maintainOffset=False)
targetRot = pmc.xform(tmpLoc, q=True, worldSpace=True, rotation=True)
pmc.delete(tmpConstraint, tmpLoc)
else:
targetRot = pmc.xform(target, q=True, worldSpace=True, rotation=True)
if isinstance(sources, (str, pmc.PyNode)):
sources = [sources]
for src in sources:
if rotateOrder:
pmc.setAttr(src + '.rotateOrder', rotateOrderXYZ)
if position:
pmc.xform(src, worldSpace=True, translation=targetPos)
if rotation:
pmc.xform(src, worldSpace=True, rotation=targetRot)
| 5,386 |
def _update_settings(option):
"""Update global settings when qwebsettings changed."""
_global_settings.update_setting(option)
default_profile.setter.update_setting(option)
if private_profile:
private_profile.setter.update_setting(option)
| 5,387 |
def flickr(name, args, options, content, lineno,
contentOffset, blockText, state, stateMachine):
""" Restructured text extension for inserting flickr embedded slideshows """
if len(content) == 0:
return
string_vars = {
'flickid': content[0],
'width': 400,
'height': 300,
'extra': ''
}
extra_args = content[1:] # Because content[0] is ID
extra_args = [ea.strip().split("=") for ea in extra_args] # key=value
extra_args = [ea for ea in extra_args if len(ea) == 2] # drop bad lines
extra_args = dict(extra_args)
if 'width' in extra_args:
string_vars['width'] = extra_args.pop('width')
if 'height' in extra_args:
string_vars['height'] = extra_args.pop('height')
if extra_args:
params = [PARAM % (key, extra_args[key]) for key in extra_args]
string_vars['extra'] = "".join(params)
return [nodes.raw('', CODE % (string_vars), format='html')]
| 5,388 |
def serialize(key):
"""
Return serialized version of key name
"""
s = current_app.config['SERIALIZER']
return s.dumps(key)
| 5,389 |
def init_db():
"""
init_db()
Initializes the database.
If tables "books" and "authors" are already in the database, do nothing.
Return value: None or raises ValueError
The error value is the QtSql error instance.
"""
def check(func, *args):
if not func(*args):
raise ValueError(func.__self__.lastError())
db = QSqlDatabase.addDatabase("QSQLITE")
db.setDatabaseName(":memory:")
check(db.open)
q = QSqlQuery()
check(q.exec, BOOKS_SQL)
check(q.exec, AUTHORS_SQL)
check(q.exec, GENRES_SQL)
check(q.prepare, INSERT_AUTHOR_SQL)
asimovId = add_author(q, "Isaac Asimov", date(1920, 2, 1))
greeneId = add_author(q, "Graham Greene", date(1904, 10, 2))
pratchettId = add_author(q, "Terry Pratchett", date(1948, 4, 28))
check(q.prepare, INSERT_GENRE_SQL)
sfiction = add_genre(q, "Science Fiction")
fiction = add_genre(q, "Fiction")
fantasy = add_genre(q, "Fantasy")
check(q.prepare, INSERT_BOOK_SQL)
add_book(q, "Foundation", 1951, asimovId, sfiction, 3)
add_book(q, "Foundation and Empire", 1952, asimovId, sfiction, 4)
add_book(q, "Second Foundation", 1953, asimovId, sfiction, 3)
add_book(q, "Foundation's Edge", 1982, asimovId, sfiction, 3)
add_book(q, "Foundation and Earth", 1986, asimovId, sfiction, 4)
add_book(q, "Prelude to Foundation", 1988, asimovId, sfiction, 3)
add_book(q, "Forward the Foundation", 1993, asimovId, sfiction, 3)
add_book(q, "The Power and the Glory", 1940, greeneId, fiction, 4)
add_book(q, "The Third Man", 1950, greeneId, fiction, 5)
add_book(q, "Our Man in Havana", 1958, greeneId, fiction, 4)
add_book(q, "Guards! Guards!", 1989, pratchettId, fantasy, 3)
add_book(q, "Night Watch", 2002, pratchettId, fantasy, 3)
add_book(q, "Going Postal", 2004, pratchettId, fantasy, 3)
| 5,390 |
def draw_cross(bgr_img, (x, y), color=(255, 255, 255), width=2, thickness=1):
""" Draws an "x"-shaped cross at (x,y)
"""
x, y, w = int(x), int(y), int(width / 2) # ensure points are ints for cv2 methods
cv2.line(bgr_img, (x - w , y - w), (x + w , y + w), color, thickness)
cv2.line(bgr_img, (x - w , y + w), (x + w, y - w), color, thickness)
| 5,391 |
def calc_disordered_regions(limits, seq):
"""
Returns the sequence of disordered regions given a string of
starts and ends of the regions and the sequence.
Example
-------
limits = 1_5;8_10
seq = AHSEDQNAAANTH...
This will return `AHSED_AAA`
"""
seq = seq.replace(' ', '')
regions = [tuple(region.split('_')) for region
in limits.split(';')]
return '_'.join([seq[int(i)-1:int(j)] for i,j in regions])
| 5,392 |
def live_chart_edicts(edicts):
"""
plot all orders in edicts; a list of dicts
op key of each dict is "buy" or "sell"
"""
now = int(time.time())
for edict in edicts:
print("edict", edict)
if edict["op"] == "buy":
plt.plot(now, edict["price"], color="lime", marker="^", markersize=10)
if edict["op"] == "sell":
plt.plot(now, edict["price"], color="coral", marker="v", markersize=10)
| 5,393 |
def find_repos():
"""
Finds all repos matching the search criteria for a number of different package manager.
Note that this queues up 1000s of search queries which can take days to work through. This query should only be run periodically to capture newly
created projects.
:return:
"""
logger.info('beginning to queue new search terms')
count = 0
spider = GithubSpider()
for search_query in spider:
logger.info('queuing search query: {}'.format(search_query))
queue.queue_item(search_query)
count += 1
logger.info('added {} search queries'.format(count))
| 5,394 |
def person_attribute_string_factory(sqla):
"""Create a fake person attribute that is enumerated."""
create_multiple_attributes(sqla, 5, 1)
people = sqla.query(Person).all()
if not people:
create_multiple_people(sqla, random.randint(3, 6))
people = sqla.query(Person).all()
current_person = random.choice(people)
nonenumerated_values = sqla.query(Attribute).all()
if not nonenumerated_values:
create_multiple_nonenumerated_values(sqla, random.randint(3, 6))
nonenumerated_values = sqla.query(Attribute).all()
current_nonenumerated_value = random.choice(nonenumerated_values)
person_attribute = {
'personId': current_person.id,
'attributeId': current_nonenumerated_value.id,
'stringValue': rl_fake().first_name()
}
return person_attribute
| 5,395 |
def forward_pass(log_a, log_b, logprob_s0):
"""Computing the forward pass of Baum-Welch Algorithm.
By employing log-exp-sum trick, values are computed in log space, including
the output. Notation is adopted from https://arxiv.org/abs/1910.09588.
`log_a` is the likelihood of discrete states, `log p(s[t] | s[t-1], x[t-1])`,
`log_b` is the likelihood of observations, `log p(x[t], z[t] | s[t])`,
and `logprob_s0` is the likelihood of initial discrete states, `log p(s[0])`.
Forward pass calculates the filtering likelihood of `log p(s_t | x_1:t)`.
Args:
log_a: a float `Tensor` of size [batch, num_steps, num_categ, num_categ]
stores time dependent transition matrices, `log p(s[t] | s[t-1], x[t-1])`.
`A[i, j]` is the transition probability from `s[t-1]=j` to `s[t]=i`.
log_b: a float `Tensor` of size [batch, num_steps, num_categ] stores time
dependent emission matrices, 'log p(x[t](, z[t])| s[t])`.
logprob_s0: a float `Tensor` of size [num_categ], initial discrete states
probability, `log p(s[0])`.
Returns:
forward_pass: a float 3D `Tensor` of size [batch, num_steps, num_categ]
stores the forward pass probability of `log p(s_t | x_1:t)`, which is
normalized.
normalizer: a float 2D `Tensor` of size [batch, num_steps] stores the
normalizing probability, `log p(x_t | x_1:t-1)`.
"""
num_steps = log_a.get_shape().with_rank_at_least(3).dims[1].value
tas = [tf.TensorArray(tf.float32, num_steps, name=n)
for n in ["forward_prob", "normalizer"]]
# The function will return normalized forward probability and
# normalizing constant as a list, [forward_logprob, normalizer].
init_updates = utils.normalize_logprob(
logprob_s0[tf.newaxis, :] + log_b[:, 0, :], axis=-1)
tas = utils.write_updates_to_tas(tas, 0, init_updates)
prev_prob = init_updates[0]
init_state = (1,
prev_prob,
tas)
def _cond(t, *unused_args):
return t < num_steps
def _steps(t, prev_prob, fwd_tas):
"""One step forward in iterations."""
bi_t = log_b[:, t, :] # log p(x[t+1] | s[t+1])
aij_t = log_a[:, t, :, :] # log p(s[t+1] | s[t], x[t])
current_updates = tf.math.reduce_logsumexp(
bi_t[:, :, tf.newaxis] + aij_t + prev_prob[:, tf.newaxis, :],
axis=-1)
current_updates = utils.normalize_logprob(current_updates, axis=-1)
prev_prob = current_updates[0]
fwd_tas = utils.write_updates_to_tas(fwd_tas, t, current_updates)
return (t+1, prev_prob, fwd_tas)
_, _, tas_final = tf.while_loop(
_cond,
_steps,
init_state
)
# transpose to [batch, step, state]
forward_prob = tf.transpose(tas_final[0].stack(), [1, 0, 2])
normalizer = tf.transpose(tf.squeeze(tas_final[1].stack(), axis=[-1]), [1, 0])
return forward_prob, normalizer
| 5,396 |
def TonnetzToString(Tonnetz):
"""TonnetzToString: List -> String."""
TonnetzString = getKeyByValue(dictOfTonnetze, Tonnetz)
return TonnetzString
| 5,397 |
def get_mc_calibration_coeffs(tel_id):
"""
Get the calibration coefficients from the MC data file to the
data. This is ahack (until we have a real data structure for the
calibrated data), it should move into `ctapipe.io.hessio_event_source`.
returns
-------
(peds,gains) : arrays of the pedestal and pe/dc ratios.
"""
peds = pyhessio.get_pedestal(tel_id)[0]
gains = pyhessio.get_calibration(tel_id)[0]
return peds, gains
| 5,398 |
def bryc(K):
"""
基于2002年Bryc提出的一致逼近函数近似累积正态分布函数
绝对误差小于1.9e-5
:param X: 负无穷到正无穷取值
:return: 累积正态分布积分值的近似
"""
X = abs(K)
from math import exp, pi, sqrt
cnd = 1.-(X*X + 5.575192*X + 12.77436324) * exp(-X*X/2.)/(sqrt(2.*pi)*pow(X, 3) + 14.38718147*pow(X, 2) + 31.53531977*X + 2*12.77436324)
if K < 0:
cnd = 1. - cnd
return cnd
| 5,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.