hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a19dc7f71dea3bb0a1c5aed0c7447e12cb612b4
| 1,026 |
py
|
Python
|
game_scripts/furniture.py
|
AlexandreUser/Yuusha-no-isekai
|
7990ab1f5765f7dae2766fe89dd4a5acfec86f4f
|
[
"MIT"
] | null | null | null |
game_scripts/furniture.py
|
AlexandreUser/Yuusha-no-isekai
|
7990ab1f5765f7dae2766fe89dd4a5acfec86f4f
|
[
"MIT"
] | null | null | null |
game_scripts/furniture.py
|
AlexandreUser/Yuusha-no-isekai
|
7990ab1f5765f7dae2766fe89dd4a5acfec86f4f
|
[
"MIT"
] | null | null | null |
def return_2x(url,times,pygame):
img = pygame.image.load(url)
size = img.get_size()
bigger = pygame.transform.scale(img,(int(size[0]*times),int(size[1]*times)))
return bigger
class furniture:
def __init__(self,x,reduce_x,y,reduce_y,width,height,name,pygame):
self.x = x
self.y = y
self.reduce_x = reduce_x
self.reduce_y = reduce_y
self.width = width
self.height = height
self.name = name+".png"
self.size = 3
self.image = return_2x("./furniture/"+self.name,self.size,pygame)
def render(self,camera,win,pygame):
if self.name == "mesa_1.png":
self.img_size = self.image.get_size()
self.hitbox = (self.x+35*2 , self.y+35*2, self.img_size[0]-70*2, self.img_size[1]-35*2)
pygame.draw.rect(win, (48, 29, 19, 50), self.hitbox)
self.hitbox = (self.x+40 , self.y+35, self.img_size[0]-100, self.img_size[1]-35)
elif self.name == "balcao_1.png":
self.img_size = self.image.get_size()
self.hitbox = (self.x , self.y, self.img_size[0], self.img_size[1])
win.blit(self.image,(self.x,self.y))
| 39.461538 | 90 | 0.682261 |
4a19dce4731b61ee5a54dd6db9d35de17a530613
| 3,292 |
py
|
Python
|
tests/unit/trace/propagation/test_text_format.py
|
Flared/opencensus-python
|
e2535e688a50c7a06be8af93ca3b987d387da605
|
[
"Apache-2.0"
] | 650 |
2017-07-09T02:08:10.000Z
|
2022-03-22T20:39:54.000Z
|
tests/unit/trace/propagation/test_text_format.py
|
Flared/opencensus-python
|
e2535e688a50c7a06be8af93ca3b987d387da605
|
[
"Apache-2.0"
] | 735 |
2017-07-26T01:15:16.000Z
|
2022-03-29T20:17:20.000Z
|
tests/unit/trace/propagation/test_text_format.py
|
Flared/opencensus-python
|
e2535e688a50c7a06be8af93ca3b987d387da605
|
[
"Apache-2.0"
] | 256 |
2017-07-24T18:29:15.000Z
|
2022-03-15T15:33:03.000Z
|
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from opencensus.trace.propagation import text_format
class Test_from_carrier(unittest.TestCase):
def test_from_carrier_keys_exist(self):
test_trace_id = '6e0c63257de34c92bf9efcd03927272e'
test_span_id = '00f067aa0ba902b7'
test_options = 1
carrier = {
text_format._TRACE_ID_KEY: test_trace_id,
text_format._SPAN_ID_KEY: test_span_id,
text_format._TRACE_OPTIONS_KEY: test_options,
}
propagator = text_format.TextFormatPropagator()
span_context = propagator.from_carrier(carrier)
self.assertEqual(span_context.trace_id, test_trace_id)
self.assertEqual(span_context.span_id, test_span_id)
self.assertEqual(span_context.trace_options.enabled,
bool(test_options))
def test_from_carrier_keys_not_exist(self):
carrier = {}
propagator = text_format.TextFormatPropagator()
span_context = propagator.from_carrier(carrier)
self.assertIsNotNone(span_context.trace_id)
# Span_id should be None here which indicates no parent span_id for
# the child spans
self.assertIsNone(span_context.span_id)
self.assertTrue(span_context.trace_options.enabled)
def test_to_carrier_has_span_id(self):
test_trace_id = '6e0c63257de34c92bf9efcd03927272e'
test_span_id = '00f067aa0ba902b7'
test_options = '2'
span_context = mock.Mock()
span_context.trace_id = test_trace_id
span_context.span_id = test_span_id
span_context.trace_options.trace_options_byte = test_options
carrier = {}
propagator = text_format.TextFormatPropagator()
carrier = propagator.to_carrier(span_context, carrier)
self.assertEqual(carrier[text_format._TRACE_ID_KEY], test_trace_id)
self.assertEqual(carrier[text_format._SPAN_ID_KEY], str(test_span_id))
self.assertEqual(carrier[text_format._TRACE_OPTIONS_KEY], test_options)
def test_to_carrier_no_span_id(self):
test_trace_id = '6e0c63257de34c92bf9efcd03927272e'
test_options = '1'
span_context = mock.Mock()
span_context.trace_id = test_trace_id
span_context.span_id = None
span_context.trace_options.trace_options_byte = test_options
carrier = {}
propagator = text_format.TextFormatPropagator()
carrier = propagator.to_carrier(span_context, carrier)
self.assertEqual(carrier[text_format._TRACE_ID_KEY], test_trace_id)
self.assertIsNone(carrier.get(text_format._SPAN_ID_KEY))
self.assertEqual(carrier[text_format._TRACE_OPTIONS_KEY], test_options)
| 36.988764 | 79 | 0.720838 |
4a19dd02c54b95d109eb5e38dd52e241e83c03ff
| 2,207 |
py
|
Python
|
examples/spacer.py
|
LukeMS/pyglet-gui
|
20ec4b335c9af3698dfa8328894544d4d0417973
|
[
"BSD-3-Clause"
] | 52 |
2015-04-18T20:45:52.000Z
|
2021-11-21T14:50:10.000Z
|
examples/spacer.py
|
LukeMS/pyglet-gui
|
20ec4b335c9af3698dfa8328894544d4d0417973
|
[
"BSD-3-Clause"
] | 8 |
2015-06-14T19:35:55.000Z
|
2018-06-29T13:52:28.000Z
|
examples/spacer.py
|
jorgecarleitao/pyglet-gui
|
20ec4b335c9af3698dfa8328894544d4d0417973
|
[
"BSD-3-Clause"
] | 21 |
2015-07-22T16:21:11.000Z
|
2021-09-23T09:37:43.000Z
|
from setup import *
from pyglet_gui.manager import Manager
from pyglet_gui.buttons import Button
from pyglet_gui.containers import HorizontalContainer, VerticalContainer, Spacer
from pyglet_gui.theme import Theme
theme = Theme({"font": "Lucida Grande",
"font_size": 12,
"text_color": [255, 255, 255, 255],
"gui_color": [255, 0, 0, 255],
"button": {
"down": {
"image": {
"source": "button-down.png",
"frame": [8, 6, 2, 2],
"padding": [18, 18, 8, 6]
},
"text_color": [0, 0, 0, 255]
},
"up": {
"image": {
"source": "button.png",
"frame": [6, 5, 6, 3],
"padding": [18, 18, 8, 6]
}
}
},
"checkbox": {
"checked": {
"image": {
"source": "checkbox-checked.png"
}
},
"unchecked": {
"image": {
"source": "checkbox.png"
}
}
}
}, resources_path='../theme/')
# First line has two big buttons
# second line has three spacers, separated by two small buttons.
# size of the three spacers is the same.
Manager(VerticalContainer([HorizontalContainer([Button(label="Big fat button"),
Button(label="Big fat button")], padding=0),
HorizontalContainer([Spacer(),
Button(label="Small"),
Spacer(),
Button(label="Small"),
Spacer()], padding=0)],
padding=0),
window=window,
batch=batch,
theme=theme)
pyglet.app.run()
| 36.783333 | 92 | 0.363389 |
4a19ddaa984bcfaa49ff390217dd76eb9c2f3985
| 27,183 |
py
|
Python
|
skrules/skope_rules.py
|
TomLaMantia/skope-rules
|
d9a777f84836905f726cb6221fe335cc1b935ae5
|
[
"MIT"
] | 1 |
2021-12-26T06:04:25.000Z
|
2021-12-26T06:04:25.000Z
|
skrules/skope_rules.py
|
TomLaMantia/skope-rules
|
d9a777f84836905f726cb6221fe335cc1b935ae5
|
[
"MIT"
] | null | null | null |
skrules/skope_rules.py
|
TomLaMantia/skope-rules
|
d9a777f84836905f726cb6221fe335cc1b935ae5
|
[
"MIT"
] | 1 |
2019-07-25T18:38:51.000Z
|
2019-07-25T18:38:51.000Z
|
import numpy as np
from collections import Counter, Iterable
import pandas
import numbers
from warnings import warn
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import check_classification_targets
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.externals import six
from sklearn.tree import _tree
from .rule import Rule, replace_feature_name
INTEGER_TYPES = (numbers.Integral, np.integer)
BASE_FEATURE_NAME = "__C__"
class SkopeRules(BaseEstimator):
"""An easy-interpretable classifier optimizing simple logical rules.
Parameters
----------
feature_names : list of str, optional
The names of each feature to be used for returning rules in string
format.
precision_min : float, optional (default=0.5)
The minimal precision of a rule to be selected.
recall_min : float, optional (default=0.01)
The minimal recall of a rule to be selected.
n_estimators : int, optional (default=10)
The number of base estimators (rules) to use for prediction. More are
built before selection. All are available in the estimators_ attribute.
max_samples : int or float, optional (default=.8)
The number of samples to draw from X to train each decision tree, from
which rules are generated and selected.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
If max_samples is larger than the number of samples provided,
all samples will be used for all trees (no sampling).
max_samples_features : int or float, optional (default=1.0)
The number of features to draw from X to train each decision tree, from
which rules are generated and selected.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=False)
Whether samples are drawn with replacement.
bootstrap_features : boolean, optional (default=False)
Whether features are drawn with replacement.
max_depth : integer or List or None, optional (default=3)
The maximum depth of the decision trees. If None, then nodes are
expanded until all leaves are pure or until all leaves contain less
than min_samples_split samples.
If an iterable is passed, you will train n_estimators
for each tree depth. It allows you to create and compare
rules of different length.
max_depth_duplication : integer, optional (default=None)
The maximum depth of the decision tree for rule deduplication,
if None then no deduplication occurs.
max_features : int, float, string or None, optional (default="auto")
The number of features considered (by each decision tree) when looking
for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node for
each decision tree.
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional
- If int, random_state is the seed used by the random number generator.
- If RandomState instance, random_state is the random number generator.
- If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
rules_ : dict of tuples (rule, precision, recall, nb).
The collection of `n_estimators` rules used in the ``predict`` method.
The rules are generated by fitted sub-estimators (decision trees). Each
rule satisfies recall_min and precision_min conditions. The selection
is done according to OOB precisions.
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators used to generate candidate
rules.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
max_samples_ : integer
The actual number of samples
n_features_ : integer
The number of features when ``fit`` is performed.
classes_ : array, shape (n_classes,)
The classes labels.
"""
def __init__(self,
feature_names=None,
precision_min=0.5,
recall_min=0.01,
n_estimators=10,
max_samples=.8,
max_samples_features=1.,
bootstrap=False,
bootstrap_features=False,
max_depth=3,
max_depth_duplication=None,
max_features=1.,
min_samples_split=2,
n_jobs=1,
random_state=None,
verbose=0):
self.precision_min = precision_min
self.recall_min = recall_min
self.feature_names = feature_names
self.n_estimators = n_estimators
self.max_samples = max_samples
self.max_samples_features = max_samples_features
self.bootstrap = bootstrap
self.bootstrap_features = bootstrap_features
self.max_depth = max_depth
self.max_depth_duplication = max_depth_duplication
self.max_features = max_features
self.min_samples_split = min_samples_split
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X. Has to follow the convention 0 for
normal data, 1 for anomalies.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples, typically
the amount in case of transactions data. Used to grow regression
trees producing further rules to be tested.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
check_classification_targets(y)
self.n_features_ = X.shape[1]
self.classes_ = np.unique(y)
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError("This method needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if not isinstance(self.max_depth_duplication, int) \
and self.max_depth_duplication is not None:
raise ValueError("max_depth_duplication should be an integer"
)
if not set(self.classes_) == set([0, 1]):
warn("Found labels %s. This method assumes target class to be"
" labeled as 1 and normal data to be labeled as 0. Any label"
" different from 0 will be considered as being from the"
" target class."
% set(self.classes_))
y = (y > 0)
# ensure that max_samples is in [1, n_samples]:
n_samples = X.shape[0]
if isinstance(self.max_samples, six.string_types):
raise ValueError('max_samples (%s) is not supported.'
'Valid choices are: "auto", int or'
'float' % self.max_samples)
elif isinstance(self.max_samples, INTEGER_TYPES):
if self.max_samples > n_samples:
warn("max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples))
max_samples = n_samples
else:
max_samples = self.max_samples
else: # float
if not (0. < self.max_samples <= 1.):
raise ValueError("max_samples must be in (0, 1], got %r"
% self.max_samples)
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
self.rules_ = {}
self.estimators_ = []
self.estimators_samples_ = []
self.estimators_features_ = []
# default columns names :
feature_names_ = [BASE_FEATURE_NAME + x for x in
np.arange(X.shape[1]).astype(str)]
if self.feature_names is not None:
self.feature_dict_ = {BASE_FEATURE_NAME + str(i): feat
for i, feat in enumerate(self.feature_names)}
else:
self.feature_dict_ = {BASE_FEATURE_NAME + str(i): feat
for i, feat in enumerate(feature_names_)}
self.feature_names_ = feature_names_
clfs = []
regs = []
self._max_depths = self.max_depth \
if isinstance(self.max_depth, Iterable) else [self.max_depth]
for max_depth in self._max_depths:
bagging_clf = BaggingClassifier(
base_estimator=DecisionTreeClassifier(
max_depth=max_depth,
max_features=self.max_features,
min_samples_split=self.min_samples_split),
n_estimators=self.n_estimators,
max_samples=self.max_samples_,
max_features=self.max_samples_features,
bootstrap=self.bootstrap,
bootstrap_features=self.bootstrap_features,
# oob_score=... XXX may be added
# if selection on tree perf needed.
# warm_start=... XXX may be added to increase computation perf.
n_jobs=self.n_jobs,
random_state=self.random_state,
verbose=self.verbose)
bagging_reg = BaggingRegressor(
base_estimator=DecisionTreeRegressor(
max_depth=max_depth,
max_features=self.max_features,
min_samples_split=self.min_samples_split),
n_estimators=self.n_estimators,
max_samples=self.max_samples_,
max_features=self.max_samples_features,
bootstrap=self.bootstrap,
bootstrap_features=self.bootstrap_features,
# oob_score=... XXX may be added
# if selection on tree perf needed.
# warm_start=... XXX may be added to increase computation perf.
n_jobs=self.n_jobs,
random_state=self.random_state,
verbose=self.verbose)
clfs.append(bagging_clf)
regs.append(bagging_reg)
# define regression target:
if sample_weight is not None:
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
weights = sample_weight - sample_weight.min()
contamination = float(sum(y)) / len(y)
y_reg = (
pow(weights, 0.5) * 0.5 / contamination * (y > 0) -
pow((weights).mean(), 0.5) * (y == 0))
y_reg = 1. / (1 + np.exp(-y_reg)) # sigmoid
else:
y_reg = y # same as an other classification bagging
for clf in clfs:
clf.fit(X, y)
self.estimators_ += clf.estimators_
self.estimators_samples_ += clf.estimators_samples_
self.estimators_features_ += clf.estimators_features_
for reg in regs:
reg.fit(X, y_reg)
self.estimators_ += reg.estimators_
self.estimators_samples_ += reg.estimators_samples_
self.estimators_features_ += reg.estimators_features_
rules_ = []
for estimator, samples, features in zip(self.estimators_,
self.estimators_samples_,
self.estimators_features_):
# Create mask for OOB samples
mask = ~samples
if sum(mask) == 0:
warn("OOB evaluation not possible: doing it in-bag."
" Performance evaluation is likely to be wrong"
" (overfitting) and selected rules are likely to"
" not perform well! Please use max_samples < 1.")
mask = samples
rules_from_tree = self._tree_to_rules(
estimator, np.array(self.feature_names_)[features])
# XXX todo: idem without dataframe
X_oob = pandas.DataFrame((X[mask, :])[:, features],
columns=np.array(
self.feature_names_)[features])
if X_oob.shape[1] > 1: # otherwise pandas bug (cf. issue #16363)
y_oob = y[mask]
y_oob = np.array((y_oob != 0))
# Add OOB performances to rules:
rules_from_tree = [(r, self._eval_rule_perf(r, X_oob, y_oob))
for r in set(rules_from_tree)]
rules_ += rules_from_tree
# Factorize rules before semantic tree filtering
rules_ = [
tuple(rule)
for rule in
[Rule(r, args=args) for r, args in rules_]]
# keep only rules verifying precision_min and recall_min:
for rule, score in rules_:
if score[0] >= self.precision_min and score[1] >= self.recall_min:
if rule in self.rules_:
# update the score to the new mean
c = self.rules_[rule][2] + 1
b = self.rules_[rule][1] + 1. / c * (
score[1] - self.rules_[rule][1])
a = self.rules_[rule][0] + 1. / c * (
score[0] - self.rules_[rule][0])
self.rules_[rule] = (a, b, c)
else:
self.rules_[rule] = (score[0], score[1], 1)
self.rules_ = sorted(self.rules_.items(),
key=lambda x: (x[1][0], x[1][1]), reverse=True)
# Deduplicate the rule using semantic tree
if self.max_depth_duplication is not None:
self.rules_ = self.deduplicate(self.rules_)
self.rules_ = sorted(self.rules_, key=lambda x: - self.f1_score(x))
self.rules_without_feature_names_ = self.rules_
# Replace generic feature names by real feature names
self.rules_ = [(replace_feature_name(rule, self.feature_dict_), perf)
for rule, perf in self.rules_]
return self
def predict(self, X):
"""Predict if a particular sample is an outlier or not.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32``
Returns
-------
is_outlier : array, shape (n_samples,)
For each observations, tells whether or not (1 or 0) it should
be considered as an outlier according to the selected rules.
"""
return np.array((self.decision_function(X) > 0), dtype=int)
def decision_function(self, X):
"""Average anomaly score of X of the base classifiers (rules).
The anomaly score of an input sample is computed as
the weighted sum of the binary rules outputs, the weight being
the respective precision of each rule.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
Returns
-------
scores : array, shape (n_samples,)
The anomaly score of the input samples.
The higher, the more abnormal. Positive scores represent outliers,
null scores represent inliers.
"""
# Check if fit had been called
check_is_fitted(self, ['rules_', 'estimators_', 'estimators_samples_',
'max_samples_'])
# Input validation
X = check_array(X)
if X.shape[1] != self.n_features_:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time."
" Please reshape your data."
% (X.shape[1], self.n_features_))
df = pandas.DataFrame(X, columns=self.feature_names_)
selected_rules = self.rules_without_feature_names_
scores = np.zeros(X.shape[0])
for (r, w) in selected_rules:
scores[list(df.query(r).index)] += w[0]
return scores
def rules_vote(self, X):
"""Score representing a vote of the base classifiers (rules).
The score of an input sample is computed as the sum of the binary
rules outputs: a score of k means than k rules have voted positively.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
Returns
-------
scores : array, shape (n_samples,)
The score of the input samples.
The higher, the more abnormal. Positive scores represent outliers,
null scores represent inliers.
"""
# Check if fit had been called
check_is_fitted(self, ['rules_', 'estimators_', 'estimators_samples_',
'max_samples_'])
# Input validation
X = check_array(X)
if X.shape[1] != self.n_features_:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time."
" Please reshape your data."
% (X.shape[1], self.n_features_))
df = pandas.DataFrame(X, columns=self.feature_names_)
selected_rules = self.rules_
scores = np.zeros(X.shape[0])
for (r, _) in selected_rules:
scores[list(df.query(r).index)] += 1
return scores
def score_top_rules(self, X):
"""Score representing an ordering between the base classifiers (rules).
The score is high when the instance is detected by a performing rule.
If there are n rules, ordered by increasing OOB precision, a score of k
means than the kth rule has voted positively, but not the (k-1) first
rules.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
Returns
-------
scores : array, shape (n_samples,)
The score of the input samples.
Positive scores represent outliers, null scores represent inliers.
"""
# Check if fit had been called
check_is_fitted(self, ['rules_', 'estimators_', 'estimators_samples_',
'max_samples_'])
# Input validation
X = check_array(X)
if X.shape[1] != self.n_features_:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time."
" Please reshape your data."
% (X.shape[1], self.n_features_))
df = pandas.DataFrame(X, columns=self.feature_names_)
selected_rules = self.rules_without_feature_names_
scores = np.zeros(X.shape[0])
for (k, r) in enumerate(list((selected_rules))):
scores[list(df.query(r[0]).index)] = np.maximum(
len(selected_rules) - k,
scores[list(df.query(r[0]).index)])
return scores
def predict_top_rules(self, X, n_rules):
"""Predict if a particular sample is an outlier or not,
using the n_rules most performing rules.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32``
n_rules : int
The number of rules used for the prediction. If one of the
n_rules most performing rules is activated, the prediction
is equal to 1.
Returns
-------
is_outlier : array, shape (n_samples,)
For each observations, tells whether or not (1 or 0) it should
be considered as an outlier according to the selected rules.
"""
return np.array((self.score_top_rules(X) > len(self.rules_) - n_rules),
dtype=int)
def _tree_to_rules(self, tree, feature_names):
"""
Return a list of rules from a tree
Parameters
----------
tree : Decision Tree Classifier/Regressor
feature_names: list of variable names
Returns
-------
rules : list of rules.
"""
# XXX todo: check the case where tree is build on subset of features,
# ie max_features != None
tree_ = tree.tree_
feature_name = [
feature_names[i] if i != _tree.TREE_UNDEFINED else "undefined!"
for i in tree_.feature
]
rules = []
def recurse(node, base_name):
if tree_.feature[node] != _tree.TREE_UNDEFINED:
name = feature_name[node]
symbol = '<='
symbol2 = '>'
threshold = tree_.threshold[node]
text = base_name + ["{} {} {}".format(name, symbol, threshold)]
recurse(tree_.children_left[node], text)
text = base_name + ["{} {} {}".format(name, symbol2,
threshold)]
recurse(tree_.children_right[node], text)
else:
rule = str.join(' and ', base_name)
rule = (rule if rule != ''
else ' == '.join([feature_names[0]] * 2))
# a rule selecting all is set to "c0==c0"
rules.append(rule)
recurse(0, [])
return rules if len(rules) > 0 else 'True'
def _eval_rule_perf(self, rule, X, y):
detected_index = list(X.query(rule).index)
if len(detected_index) <= 1:
return (0, 0)
y_detected = y[detected_index]
true_pos = y_detected[y_detected > 0].sum()
if true_pos == 0:
return (0, 0)
pos = y[y > 0].sum()
return y_detected.mean(), float(true_pos) / pos
def deduplicate(self, rules):
return [max(rules_set, key=self.f1_score)
for rules_set in self._find_similar_rulesets(rules)]
def _find_similar_rulesets(self, rules):
"""Create clusters of rules using a decision tree based
on the terms of the rules
Parameters
----------
rules : List, List of rules
The rules that should be splitted in subsets of similar rules
Returns
-------
rules : List of list of rules
The different set of rules. Each set should be homogeneous
"""
def split_with_best_feature(rules, depth, exceptions=[]):
"""
Method to find a split of rules given most represented feature
"""
if depth == 0:
return rules
rulelist = [rule.split(' and ') for rule, score in rules]
terms = [t.split(' ')[0] for term in rulelist for t in term]
counter = Counter(terms)
# Drop exception list
for exception in exceptions:
del counter[exception]
if len(counter) == 0:
return rules
most_represented_term = counter.most_common()[0][0]
# Proceed to split
rules_splitted = [[], [], []]
for rule in rules:
if (most_represented_term + ' <=') in rule[0]:
rules_splitted[0].append(rule)
elif (most_represented_term + ' >') in rule[0]:
rules_splitted[1].append(rule)
else:
rules_splitted[2].append(rule)
new_exceptions = exceptions+[most_represented_term]
# Choose best term
return [split_with_best_feature(ruleset,
depth-1,
exceptions=new_exceptions)
for ruleset in rules_splitted]
def breadth_first_search(rules, leaves=None):
if len(rules) == 0 or not isinstance(rules[0], list):
if len(rules) > 0:
return leaves.append(rules)
else:
for rules_child in rules:
breadth_first_search(rules_child, leaves=leaves)
return leaves
leaves = []
res = split_with_best_feature(rules, self.max_depth_duplication)
breadth_first_search(res, leaves=leaves)
return leaves
def f1_score(self, x):
return 2 * x[1][0] * x[1][1] / \
(x[1][0] + x[1][1]) if (x[1][0] + x[1][1]) > 0 else 0
| 39.168588 | 79 | 0.572968 |
4a19df6d14677bda79a4696a54800494f1c97393
| 4,122 |
py
|
Python
|
packages/python/plotly/plotly/graph_objs/surface/_stream.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/surface/_stream.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/surface/_stream.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "surface"
_path_str = "surface.stream"
_valid_props = {"maxpoints", "token"}
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.surface.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.surface.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.surface.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 29.028169 | 82 | 0.540757 |
4a19e204efd52a6054c7c35b965acece36cc51b2
| 1,210 |
py
|
Python
|
model-optimizer/mo/front/kaldi/extractors/memoryoffset_ext.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | 3 |
2020-02-09T23:25:37.000Z
|
2021-01-19T09:44:12.000Z
|
model-optimizer/mo/front/kaldi/extractors/memoryoffset_ext.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/mo/front/kaldi/extractors/memoryoffset_ext.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | 2 |
2020-04-18T16:24:39.000Z
|
2021-01-19T09:42:19.000Z
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.extractor import FrontExtractorOp
from mo.ops.memoryoffset import MemoryOffset
class MemoryOffsetFrontExtractor(FrontExtractorOp):
op = 'MemoryOffset'
enabled = True
@staticmethod
def extract(node):
pb = node.parameters
mapping_rule = {
'pair_name': pb['pair_name'],
't': pb['t'],
'has_default': pb['has_default'],
'splitted': False,
}
if 'element_size' in pb:
mapping_rule['element_size'] = pb['element_size']
MemoryOffset.update_node_stat(node, mapping_rule)
return __class__.enabled
| 31.025641 | 73 | 0.690909 |
4a19e320c2a4c0b1b0b434d4a749779357179876
| 617 |
py
|
Python
|
modules/1/scicalc.py
|
mdazharuddin1011999/IoT_Assignment_3
|
1a627e0c5785b683c5f61fb7b2f7dea1e395e1cc
|
[
"MIT"
] | null | null | null |
modules/1/scicalc.py
|
mdazharuddin1011999/IoT_Assignment_3
|
1a627e0c5785b683c5f61fb7b2f7dea1e395e1cc
|
[
"MIT"
] | null | null | null |
modules/1/scicalc.py
|
mdazharuddin1011999/IoT_Assignment_3
|
1a627e0c5785b683c5f61fb7b2f7dea1e395e1cc
|
[
"MIT"
] | null | null | null |
from math import sqrt, sin, cos, tan, log, exp
def addition(a, b):
return a+b
def subtraction(a, b):
return a-b
def division(a, b):
return a/b
def multiplication(a, b):
return a*b
def modulus(a, b):
return a%b
def power(a, b):
return a**b
def square_root(n):
return sqrt(n)
def cubic_root(n):
return n**(1/3)
def sinx(x):
return sin(x)
def cosx(x):
return cos(x)
def tanx(x):
return tan(x)
def logx(x):
return log(x)
def expx(x):
return exp(x)
def absolute(x):
return abs(x)
if __name__ == "__main__":
print("You are trying to run a module!")
| 13.413043 | 46 | 0.602917 |
4a19e336f3fbc9b5f1be961969a38ea6b55c3df4
| 3,238 |
py
|
Python
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/payment_transfer/models/payment.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | 1 |
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/payment_transfer/models/payment.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/payment_transfer/models/payment.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from odoo import api, fields, models, _
from odoo.addons.payment.models.payment_acquirer import ValidationError
from odoo.tools.float_utils import float_compare
import logging
import pprint
_logger = logging.getLogger(__name__)
class TransferPaymentAcquirer(models.Model):
_inherit = 'payment.acquirer'
provider = fields.Selection(selection_add=[('transfer', 'Wire Transfer')], default='transfer')
def transfer_get_form_action_url(self):
return '/payment/transfer/feedback'
def _format_transfer_data(self):
company_id = self.env.user.company_id.id
# filter only bank accounts marked as visible
journals = self.env['account.journal'].search([('type', '=', 'bank'), ('display_on_footer', '=', True), ('company_id', '=', company_id)])
accounts = journals.mapped('bank_account_id').name_get()
bank_title = _('Bank Accounts') if len(accounts) > 1 else _('Bank Account')
bank_accounts = ''.join(['<ul>'] + ['<li>%s</li>' % name for id, name in accounts] + ['</ul>'])
post_msg = _('''<div>
<h3>Please use the following transfer details</h3>
<h4>%(bank_title)s</h4>
%(bank_accounts)s
<h4>Communication</h4>
<p>Please use the order name as communication reference.</p>
</div>''') % {
'bank_title': bank_title,
'bank_accounts': bank_accounts,
}
return post_msg
@api.model
def create(self, values):
""" Hook in create to create a default post_msg. This is done in create
to have access to the name and other creation values. If no post_msg
or a void post_msg is given at creation, generate a default one. """
if values.get('provider') == 'transfer' and not values.get('post_msg'):
values['post_msg'] = self._format_transfer_data()
return super(TransferPaymentAcquirer, self).create(values)
class TransferPaymentTransaction(models.Model):
_inherit = 'payment.transaction'
@api.model
def _transfer_form_get_tx_from_data(self, data):
reference, amount, currency_name = data.get('reference'), data.get('amount'), data.get('currency_name')
tx = self.search([('reference', '=', reference)])
if not tx or len(tx) > 1:
error_msg = _('received data for reference %s') % (pprint.pformat(reference))
if not tx:
error_msg += _('; no order found')
else:
error_msg += _('; multiple order found')
_logger.info(error_msg)
raise ValidationError(error_msg)
return tx
def _transfer_form_get_invalid_parameters(self, data):
invalid_parameters = []
if float_compare(float(data.get('amount', '0.0')), self.amount, 2) != 0:
invalid_parameters.append(('amount', data.get('amount'), '%.2f' % self.amount))
if data.get('currency') != self.currency_id.name:
invalid_parameters.append(('currency', data.get('currency'), self.currency_id.name))
return invalid_parameters
def _transfer_form_validate(self, data):
_logger.info('Validated transfer payment for tx %s: set as pending' % (self.reference))
return self.write({'state': 'pending'})
| 39.487805 | 145 | 0.64824 |
4a19e39a02849231f8e5f69892a2d24692f2d297
| 29,571 |
py
|
Python
|
src/pystdlib/values/float_value.py
|
jwcompdev/UltimateLinuxToolkit
|
7f03e6bebf2739bdd32e64f18566d68f13687421
|
[
"Apache-2.0"
] | null | null | null |
src/pystdlib/values/float_value.py
|
jwcompdev/UltimateLinuxToolkit
|
7f03e6bebf2739bdd32e64f18566d68f13687421
|
[
"Apache-2.0"
] | null | null | null |
src/pystdlib/values/float_value.py
|
jwcompdev/UltimateLinuxToolkit
|
7f03e6bebf2739bdd32e64f18566d68f13687421
|
[
"Apache-2.0"
] | null | null | null |
# PyLinuxToolkit
# Copyright (C) 2022 JWCompDev
#
# float_value.py
# Copyright (C) 2022 JWCompDev <jwcompdev@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation; either version 2.0 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with this program. If not, see <https://www.apache.org/licenses/>.
"""
Contains the FloatValue class, which provides
mutable access to a float value.
"""
from __future__ import annotations
from typing import SupportsFloat, SupportsIndex, SupportsInt, Optional, TYPE_CHECKING
from pystdlib.protocols import SupportsFloatFull
from pystdlib.str_utils import build_repr
from pystdlib.values.number_value import NumberValue
if TYPE_CHECKING:
from pystdlib.values import IntegerValue, BooleanValue, StringValue
class FloatValue(NumberValue):
"""Provides mutable access to a float"""
def __init__(self, number: SupportsFloatFull | StringValue):
self._value: float = self._verify_float(number)
@staticmethod
def _verify_float(number: SupportsFloatFull | StringValue) -> float:
from pystdlib.values.string_value import StringValue
if number is None:
raise TypeError(
"FloatValue() argument must be a string, "
"a bytes-like object or a number, not 'NoneType'"
)
if isinstance(number, (str, bytes, bytearray)):
try:
value: float = float(number)
except ValueError as ex:
if "invalid literal for float() with base 10:" in str(ex):
raise TypeError(str(ex).replace("float()", "FloatValue()"))
raise
elif isinstance(number, StringValue):
try:
value: float = float(number.get())
except ValueError as ex:
if "invalid literal for float() with base 10:" in str(ex):
raise TypeError(str(ex).replace("float()", "FloatValue()"))
raise
elif isinstance(number, (int, float)):
value: float = float(number)
elif isinstance(number, SupportsFloat):
return FloatValue._verify_float(number.__float__())
elif isinstance(number, SupportsIndex):
return FloatValue._verify_float(number.__index__())
elif isinstance(number, SupportsInt):
return FloatValue._verify_float(number.__int__())
else:
raise TypeError(
"FloatValue() argument must be a string, "
"a bytes-like object or a number,"
f" not '{type(number).__name__}'"
)
return value
########################################
# Dunder Methods #
########################################
# Must return str
def __str__(self) -> str:
return str(self._value)
# Must return str
def __repr__(self) -> str:
return build_repr(self, self._value)
# Must return str
def __format__(self, format_spec) -> str:
return self._value.__format__(format_spec)
# Must return bool
def __bool__(self) -> bool:
return self._value != 0
def __getnewargs__(self) -> tuple[float]:
return self._value.__getnewargs__()
def __eq__(self, other: int | float | IntegerValue | FloatValue) -> BooleanValue:
return self.is_equal_to(other)
def __ne__(self, other: int | float | IntegerValue | FloatValue) -> BooleanValue:
return self.is_not_equal_to(other)
__hash__ = None
# Must return int
def __int__(self) -> int:
return int(self._value)
# Must return float
def __float__(self) -> float:
return self._value
# Must return complex
def __complex__(self) -> complex:
return complex(self._value)
def __pos__(self) -> FloatValue:
return FloatValue(self._value.__pos__())
def __neg__(self) -> FloatValue:
return FloatValue(self._value.__neg__())
def __abs__(self) -> FloatValue:
return FloatValue(abs(self._value))
# noinspection SpellCheckingInspection
# Has to return int to satisfy SupportsRound
def __round__(self, ndigits: SupportsIndex = None) -> int:
return self._value.__round__()
# Has to return int to satisfy SupportsTrunc
def __trunc__(self) -> int:
return self._value.__trunc__()
def __floor__(self) -> IntegerValue:
from pystdlib.values.integer_value import IntegerValue
return IntegerValue(self._value.__floor__())
def __ceil__(self) -> IntegerValue:
from pystdlib.values.integer_value import IntegerValue
return IntegerValue(self._value.__ceil__())
def __iadd__(self, other: int | float | IntegerValue | FloatValue) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
self._value += other
return self
if isinstance(other, (IntegerValue, FloatValue)):
self._value += other.get()
return self
return NotImplemented
def __add__(
self, other: int | float | IntegerValue | FloatValue
) -> IntegerValue | FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
return FloatValue(self._value + other)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(self._value + other.get())
return NotImplemented
def __radd__(self, other: int | float | IntegerValue | FloatValue) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
return FloatValue(other + self._value)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(other.get() + self._value)
return NotImplemented
def __isub__(self, other: int | float | IntegerValue | FloatValue) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
self._value -= other
return self
if isinstance(other, (IntegerValue, FloatValue)):
self._value -= other.get()
return self
return NotImplemented
def __sub__(self, other: int | float | IntegerValue | FloatValue) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
return FloatValue(self._value - other)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(self._value - other.get())
return NotImplemented
def __rsub__(self, other: int | float | IntegerValue | FloatValue) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
return FloatValue(other - self._value)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(other.get() - self._value)
return NotImplemented
def __imul__(self, other: int | float | IntegerValue | FloatValue) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
self._value *= other
return self
if isinstance(other, (IntegerValue, FloatValue)):
self._value *= other.get()
return self
return NotImplemented
def __mul__(self, other: int | float | IntegerValue | FloatValue) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
return FloatValue(self._value * other)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(self._value * other.get())
return NotImplemented
def __rmul__(self, other: int | float | IntegerValue | FloatValue) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
return FloatValue(other * self._value)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(other.get() * self._value)
return NotImplemented
# noinspection SpellCheckingInspection
def __itruediv__(
self, other: int | float | IntegerValue | FloatValue
) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
self._value /= other
return self
if isinstance(other, (IntegerValue, FloatValue)):
self._value /= other.get()
return self
return NotImplemented
def __truediv__(self, other: int | float | IntegerValue | FloatValue) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
return FloatValue(self._value / other)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(self._value / other.get())
return NotImplemented
def __rtruediv__(
self, other: int | float | IntegerValue | FloatValue
) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
return FloatValue(other / self._value)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(other.get() / self._value)
return NotImplemented
# noinspection SpellCheckingInspection
def __ifloordiv__(
self, other: int | float | IntegerValue | FloatValue
) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
return FloatValue(self._value // other)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(self._value // other.get())
return NotImplemented
def __floordiv__(
self, other: int | float | IntegerValue | FloatValue
) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
return FloatValue(self._value // other)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(self._value // other.get())
return NotImplemented
def __rfloordiv__(
self, other: int | float | IntegerValue | FloatValue
) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
return FloatValue(self._value // other)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(self._value // other.get())
return NotImplemented
# noinspection SpellCheckingInspection
def __ipow__(self, other: int | float | IntegerValue | FloatValue) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
return FloatValue(self._value**other)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(self._value ** other.get())
return NotImplemented
def __pow__(
self,
other: int | float | IntegerValue | FloatValue,
modulo: Optional[float | FloatValue] = None,
) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if modulo is None:
if isinstance(other, (int, float)):
return FloatValue(self._value**other)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(self._value ** other.get())
return NotImplemented
is_other_int = other is int or isinstance(other, IntegerValue)
is_other_float = other is float or isinstance(other, FloatValue)
is_mod_int = modulo is int or isinstance(modulo, IntegerValue)
is_mod_float = modulo is float or isinstance(modulo, FloatValue)
# TODO: replace with less conditions
if (
(is_mod_int and is_other_int)
or (is_mod_int and is_other_float)
or (is_mod_float and is_other_int)
):
raise TypeError(
"TypeError: pow() 3rd argument not allowed "
"unless all arguments are integers"
)
return NotImplemented
def __rpow__(
self,
other: int | float | IntegerValue | FloatValue,
modulo: Optional[float | FloatValue] = None,
) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if modulo is None:
if isinstance(other, (int, float)):
return FloatValue(other**self._value)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(other.get() ** self._value)
return NotImplemented
is_other_int = other is int or isinstance(other, IntegerValue)
is_other_float = other is float or isinstance(other, FloatValue)
is_mod_int = modulo is int or isinstance(modulo, IntegerValue)
is_mod_float = modulo is float or isinstance(modulo, FloatValue)
# TODO: replace with less conditions
if (
(is_mod_int and is_other_int)
or (is_mod_int and is_other_float)
or (is_mod_float and is_other_int)
):
raise TypeError(
"TypeError: pow() 3rd argument not allowed "
"unless all arguments are integers"
)
return NotImplemented
def __imod__(self, other: int | float | IntegerValue | FloatValue) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
self._value %= other
return self
if isinstance(other, (IntegerValue, FloatValue)):
self._value %= other.get()
return self
return NotImplemented
def __mod__(self, other: int | float | IntegerValue | FloatValue) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
return FloatValue(self._value % other)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(self._value % other.get())
return NotImplemented
def __rmod__(self, other: int | float | IntegerValue | FloatValue) -> FloatValue:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
return FloatValue(other % self._value)
if isinstance(other, (IntegerValue, FloatValue)):
return FloatValue(other.get() % self._value)
return NotImplemented
# noinspection SpellCheckingInspection
def __divmod__(
self, other: int | float | IntegerValue | FloatValue
) -> tuple[FloatValue, FloatValue]:
from pystdlib.values.integer_value import IntegerValue
if isinstance(other, (int, float)):
var1, var2 = self._value.__divmod__(other)
return FloatValue(var1), FloatValue(var2)
if isinstance(other, (IntegerValue, FloatValue)):
var1, var2 = self._value.__divmod__(other.get())
return FloatValue(var1), FloatValue(var2)
return NotImplemented
def __rdivmod__(self, other: float | FloatValue) -> tuple[FloatValue, FloatValue]:
if isinstance(other, float):
var1, var2 = other.__divmod__(self._value)
return FloatValue(var1), FloatValue(var2)
if isinstance(other, FloatValue):
var1, var2 = other.get().__divmod__(self._value)
return FloatValue(var1), FloatValue(var2)
return NotImplemented
def __lt__(self, other: int | float | IntegerValue | FloatValue) -> BooleanValue:
return self.is_less_than(other)
def __le__(self, other: int | float | IntegerValue | FloatValue) -> BooleanValue:
return self.is_less_than_or_equal_to(other)
def __gt__(self, other: int | float | IntegerValue | FloatValue) -> BooleanValue:
return self.is_greater_than(other)
def __ge__(self, other: int | float | IntegerValue | FloatValue) -> BooleanValue:
return self.is_greater_than_or_equal_to(other)
########################################
# Instance Methods #
########################################
# Must return float
@property
def value(self) -> float:
"""
Returns the value.
:return: the value
"""
return self._value
# Must return float
def get(self) -> float:
"""
Returns the value.
:return the value
"""
return self._value
def set(self, value: SupportsFloatFull | StringValue) -> FloatValue:
"""
Sets the value.
:param value: the value to set
:return this instance for use in method chaining
"""
self._value = FloatValue._verify_float(value)
return self
# Must return int
def to_int(self) -> int:
"""
Converts the value to an int and returns it.
:return the value converted to an int
"""
return int(self._value)
# Must return float
def to_float(self) -> float:
"""
Converts the value to a float and returns it.
:return the value converted to a float
"""
return float(self._value)
def increment(self) -> FloatValue:
"""
Increments the value.
:return: this instance for use in method chaining
"""
self._value += 1
return self
def increment_and_get(self) -> FloatValue:
"""
Increments this instance's value by 1 then
returns the value associated with the instance
:return: the value associated with the instance
after it was incremented
"""
self._value += 1
return FloatValue(self._value)
def get_and_increment(self) -> FloatValue:
"""
Increments this instance's value by 1 then
returns the value associated with the instance
:return: the value associated with the instance
before it was incremented
"""
before = self._value
self._value += 1
return FloatValue(before)
def decrement(self) -> FloatValue:
"""
Decrements the value.
:return: this instance for use in method chaining
"""
self._value -= 1
return self
def decrement_and_get(self) -> FloatValue:
"""
Decrements this instance's value by 1 then
returns the value associated with the instance
:return: the value associated with the instance
after it was decremented
"""
self._value -= 1
return FloatValue(self._value)
def get_and_decrement(self) -> FloatValue:
"""
Decrements this instance's value by 1 then
returns the value associated with the instance
:return: the value associated with the instance
before it was decremented
"""
before = self._value
self._value -= 1
return FloatValue(before)
def add(self, other: int | float) -> FloatValue:
"""
Adds a value to the value of this instance.
:param other: the value to add
:return: this instance for use in method chaining
"""
self._value += other
return self
def add_and_get(self, other: int | float) -> FloatValue:
"""
Increments this instance's value by 'other', then
returns the value associated with the instance immediately
after the addition operation.
:param other: the quantity to add
:return: the value associated with this instance
after adding the other
"""
self._value += other
return FloatValue(self._value)
def get_and_add(self, other: int | float) -> FloatValue:
"""
Increments this instance's value by 'other', then
returns the value associated with the instance immediately
before to the addition operation.
:param other: the quantity to add
:return: the value associated with this instance
before adding the other
"""
before = self._value
self._value += other
return FloatValue(before)
def subtract(self, other: int | float) -> FloatValue:
"""
Subtracts a value to the value of this instance.
:param other: the value to subtract
:return: this instance for use in method chaining
"""
self._value -= other
return self
def subtract_and_get(self, other: int | float) -> FloatValue:
"""
Decrements this instance's value by 'other', then
returns the value associated with the instance immediately
after the subtraction operation.
:param other: the quantity to subtract
:return: the value associated with this instance
after subtracting the other
"""
self._value -= other
return FloatValue(self._value)
def get_and_subtract(self, other: int | float) -> FloatValue:
"""
Decrements this instance's value by 'other', then
returns the value associated with the instance immediately
before to the subtraction operation.
:param other: the quantity to subtract
:return: the value associated with this instance
before subtracting the other
"""
before = self._value
self._value -= other
return FloatValue(before)
def is_positive(self) -> BooleanValue:
"""
Returns True if the value is positive, False otherwise.
:return: True if the value is positive, False otherwise
"""
from pystdlib.values.boolean_value import BooleanValue
return BooleanValue(self._value > 0.0)
def is_negative(self) -> BooleanValue:
"""
Returns True if the value is negative, False otherwise.
:return: True if the value is negative, False otherwise
"""
from pystdlib.values.boolean_value import BooleanValue
return BooleanValue(self._value < 0.0)
def is_zero(self) -> BooleanValue:
"""
Returns True if the value is zero, False otherwise.
:return: True if the value is zero, False otherwise
"""
from pystdlib.values.boolean_value import BooleanValue
return BooleanValue(self._value == 0.0)
def is_not_zero(self) -> BooleanValue:
"""
Returns True if the value is not zero, False otherwise.
:return: True if the value is annotations zero, False otherwise
"""
from pystdlib.values.boolean_value import BooleanValue
return BooleanValue(self._value != 0.0)
def is_equal_to(
self, number: int | float | IntegerValue | FloatValue
) -> BooleanValue:
"""
Returns True if the value is equal to the specified number,
False otherwise.
:param number: the number to check
:return: True if the value is equal to the specified number,
False otherwise.
"""
from pystdlib.values.boolean_value import BooleanValue
from pystdlib.values.integer_value import IntegerValue
if isinstance(number, (IntegerValue, FloatValue)):
return BooleanValue(self._value == number.get())
return BooleanValue(self._value == number)
def is_not_equal_to(
self, number: int | float | IntegerValue | FloatValue
) -> BooleanValue:
"""
Returns True if the value is not equal to the specified
number, False otherwise.
:param number: the number to check
:return: True if the value is not equal to the specified number,
False otherwise.
"""
return self.is_equal_to(number).negate()
def is_less_than_or_equal_to(
self, number: int | float | IntegerValue | FloatValue
) -> BooleanValue:
"""
Returns True if the value is less than or equal to the
specified number, False otherwise.
:param number: the number to check
:return: True if the value is less than or equal to the
specified number, False otherwise.
"""
from pystdlib.values.boolean_value import BooleanValue
from pystdlib.values.integer_value import IntegerValue
if isinstance(number, (IntegerValue, FloatValue)):
return BooleanValue(self._value <= number.get())
return BooleanValue(self._value <= number)
def is_greater_than_or_equal_to(
self, number: int | float | IntegerValue | FloatValue
) -> BooleanValue:
"""
Returns True if the value is greater than or equal to the
specified number, False otherwise.
:param number: the number to check
:return: True if the value is greater than or equal to the
specified number, False otherwise.
"""
from pystdlib.values.boolean_value import BooleanValue
from pystdlib.values.integer_value import IntegerValue
if isinstance(number, (IntegerValue, FloatValue)):
return BooleanValue(self._value >= number.get())
return BooleanValue(self._value >= number)
def is_less_than(
self, number: int | float | IntegerValue | FloatValue
) -> BooleanValue:
"""
Returns True if the value is less than the
specified number, False otherwise.
:param number: the number to check
:return: True if the value is less than the
specified number, False otherwise.
"""
from pystdlib.values.boolean_value import BooleanValue
from pystdlib.values.integer_value import IntegerValue
if isinstance(number, (IntegerValue, FloatValue)):
return BooleanValue(self._value < number.get())
return BooleanValue(self._value < number)
def is_greater_than(
self, number: int | float | IntegerValue | FloatValue
) -> BooleanValue:
"""
Returns True if the value is greater than the
specified number, False otherwise.
:param number: the number to check
:return: True if the value is greater than the
specified number, False otherwise.
"""
from pystdlib.values.boolean_value import BooleanValue
from pystdlib.values.integer_value import IntegerValue
if isinstance(number, (IntegerValue, FloatValue)):
return BooleanValue(self._value > number.get())
return BooleanValue(self._value > number)
########################################
# Float Only Instance Methods #
########################################
# The following regular methods are int only methods and don't apply to 'floats'
# Must return tuple[int, int]
def as_integer_ratio(self) -> tuple[int, int]:
# noinspection PyRedundantParentheses
"""
Returns integer ratio.
Return a pair of integers, whose ratio is exactly equal to the original float
and with a positive denominator.
Raise OverflowError on infinities and a ValueError on NaNs.
>>> (10.0).as_integer_ratio()
(10, 1)
>>> (0.0).as_integer_ratio()
(0, 1)
>>> (-.25).as_integer_ratio()
(-1, 4)
:return: a pair of integers, whose ratio is exactly equal to the original float
and with a positive denominator
:raises OverflowError: on infinities and a ValueError on NaNs
"""
return self._value.as_integer_ratio()
def is_integer(self) -> BooleanValue:
"""
Returns True if the float is an integer.
:return: True if the float is an integer
"""
from pystdlib.values.boolean_value import BooleanValue
return BooleanValue(self._value.is_integer())
def hex(self) -> StringValue:
"""
Returns a hexadecimal representation of the value.
>>> (-0.1).hex()
'-0x1.999999999999ap-4'
>>> 3.14159.hex()
'0x1.921f9f01b866ep+1'
:return: a hexadecimal representation of the value
"""
from pystdlib.values.string_value import StringValue
return StringValue(self._value.hex())
# noinspection SpellCheckingInspection
@staticmethod
def fromhex(value: str) -> FloatValue:
"""
Create a floating-point number from the specified
hexadecimal string.
>>> float.fromhex('0x1.ffffp10')
2047.984375
>>> float.fromhex('-0x1p-1074')
-5e-324
:param value: the string to convert
:return: a floating-point number from the specified
hexadecimal string
"""
return FloatValue(float.fromhex(value))
| 32.711283 | 88 | 0.623787 |
4a19e3c517e26691e7e7d6ab97b55e53664627bf
| 1,195 |
py
|
Python
|
core/packages/router/route_manager.py
|
Shardj/py-gangue
|
d02e4e0b0c85069cb4ca9c507be77ecaac51dd73
|
[
"MIT"
] | null | null | null |
core/packages/router/route_manager.py
|
Shardj/py-gangue
|
d02e4e0b0c85069cb4ca9c507be77ecaac51dd73
|
[
"MIT"
] | null | null | null |
core/packages/router/route_manager.py
|
Shardj/py-gangue
|
d02e4e0b0c85069cb4ca9c507be77ecaac51dd73
|
[
"MIT"
] | null | null | null |
import sys, os, pickle, core.packages.router.route as RouteImport, core.helpers as Helpers
class RouteManger:
cliRouteImportStrings = [{'path': 'core.routes.cli_routing' , 'initFunction' : 'defineRoutes'}, {'path': 'app.routes.cli_routing' , 'initFunction' : 'defineRoutes'}]
def __init__(self):
self.routeInstance = RouteImport.Route()
args = sys.argv[1:]
if len(args) == 0:
Helpers.printError('Missing arguments')
sys.exit()
self.executeRoutingFiles()
# our self.routeInstance should now have a very large `imports` and `calls` attribute which we need to lookup with our arguments
print(pickle.dumps(self.routeInstance.imports))
sys.exit()
def executeRoutingFiles(self):
Route = self.routeInstance
for importString in self.cliRouteImportStrings:
try:
importedRouting = __import__(importString['path'], globals(), locals(), [importString['initFunction']])
getattr(importedRouting, importString['initFunction'])(Route)
except ImportError as e:
print('Route not found at ' + importString)
raise e
| 45.961538 | 169 | 0.643515 |
4a19e4b235de1814c948fe27d644fdde5a43ca27
| 3,190 |
py
|
Python
|
piccolo/apps/schema/commands/graph.py
|
0scarB/piccolo
|
27539219431874bae99b7206df48133fbe1a27eb
|
[
"MIT"
] | 750 |
2019-01-03T16:02:48.000Z
|
2022-03-30T19:53:03.000Z
|
piccolo/apps/schema/commands/graph.py
|
0scarB/piccolo
|
27539219431874bae99b7206df48133fbe1a27eb
|
[
"MIT"
] | 311 |
2019-01-14T13:07:13.000Z
|
2022-03-31T07:43:08.000Z
|
piccolo/apps/schema/commands/graph.py
|
0scarB/piccolo
|
27539219431874bae99b7206df48133fbe1a27eb
|
[
"MIT"
] | 48 |
2020-12-18T08:13:50.000Z
|
2022-03-24T03:18:06.000Z
|
"""
Credit to the Django Extensions team for inspiring this tool.
"""
import dataclasses
import os
import sys
import typing as t
import jinja2
from piccolo.conf.apps import Finder
TEMPLATE_DIRECTORY = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "templates"
)
JINJA_ENV = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath=TEMPLATE_DIRECTORY),
)
@dataclasses.dataclass
class GraphColumn:
name: str
type: str
@dataclasses.dataclass
class GraphTable:
name: str
columns: t.List[GraphColumn]
@dataclasses.dataclass
class GraphRelation:
table_a: str
table_b: str
label: str
def render_template(**kwargs):
template = JINJA_ENV.get_template("graphviz.dot.jinja")
return template.render(**kwargs)
def graph(
apps: str = "all", direction: str = "LR", output: t.Optional[str] = None
):
"""
Prints out a graphviz .dot file for your schema.
:param apps:
The name of the apps to include. If 'all' is given then every app is
included. To specify multiple app names, separate them with commas.
For example --apps="app1,app2".
:param direction:
How the tables should be orientated - by default it's "LR" which is
left to right, so the graph will be landscape. The alternative is
"TB", which is top to bottom, so the graph will be portrait.
:param output:
If specified, rather than printing out the file contents, they'll be
written to this file. For example --output=graph.dot
"""
finder = Finder()
app_names = finder.get_sorted_app_names()
if apps != "all":
given_app_names = [i.strip() for i in apps.split(",")]
delta = set(given_app_names) - set(app_names)
if delta:
sys.exit(f"These apps aren't recognised: {', '.join(delta)}.")
app_names = given_app_names
tables: t.List[GraphTable] = []
relations: t.List[GraphRelation] = []
for app_name in app_names:
app_config = finder.get_app_config(app_name=app_name)
for table_class in app_config.table_classes:
tables.append(
GraphTable(
name=table_class.__name__,
columns=[
GraphColumn(
name=i._meta.name, type=i.__class__.__name__
)
for i in table_class._meta.columns
],
)
)
for fk_column in table_class._meta.foreign_key_columns:
reference_table_class = (
fk_column._foreign_key_meta.resolved_references
)
relations.append(
GraphRelation(
table_a=table_class.__name__,
table_b=reference_table_class.__name__,
label=fk_column._meta.name,
)
)
contents = render_template(
tables=tables, relations=relations, direction=direction
)
if output is None:
print(contents)
else:
with open(output, "w") as f:
f.write(contents)
| 27.982456 | 76 | 0.599687 |
4a19e71af2fb31efbb233e3d1c621faeb35ab03f
| 20,855 |
py
|
Python
|
src/virtual-wan/azext_vwan/vendored_sdks/v2018_08_01/v2018_08_01/aio/operations/_routes_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 207 |
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/virtual-wan/azext_vwan/vendored_sdks/v2018_08_01/v2018_08_01/aio/operations/_routes_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 4,061 |
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/virtual-wan/azext_vwan/vendored_sdks/v2018_08_01/v2018_08_01/aio/operations/_routes_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 802 |
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RoutesOperations:
"""RoutesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
**kwargs
) -> "_models.Route":
"""Gets the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Route, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.Route
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
route_parameters: "_models.Route",
**kwargs
) -> "_models.Route":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_parameters, 'Route')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Route', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
route_parameters: "_models.Route",
**kwargs
) -> AsyncLROPoller["_models.Route"]:
"""Creates or updates a route in the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param route_parameters: Parameters supplied to the create or update route operation.
:type route_parameters: ~azure.mgmt.network.v2018_08_01.models.Route
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Route or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_08_01.models.Route]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
route_parameters=route_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def list(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> AsyncIterable["_models.RouteListResult"]:
"""Gets all routes in a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_08_01.models.RouteListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes'} # type: ignore
| 48.613054 | 210 | 0.664253 |
4a19e86cce7a6e620fb7775ca36a6d0b8afe95bb
| 428 |
py
|
Python
|
examples/usage_example.py
|
haikusw/jaqalpaq
|
d507e894cb897756a1e51c99582b736254995b4e
|
[
"Apache-2.0"
] | 8 |
2021-02-19T23:25:28.000Z
|
2021-09-24T20:11:13.000Z
|
examples/usage_example.py
|
haikusw/jaqalpaq
|
d507e894cb897756a1e51c99582b736254995b4e
|
[
"Apache-2.0"
] | null | null | null |
examples/usage_example.py
|
haikusw/jaqalpaq
|
d507e894cb897756a1e51c99582b736254995b4e
|
[
"Apache-2.0"
] | null | null | null |
import jaqalpaq
from jaqalpaq.parser import parse_jaqal_file
from jaqalpaq.emulator import run_jaqal_circuit
from jaqalpaq.generator import generate_jaqal_program
JaqalCircuitObject = parse_jaqal_file("jaqal/Sxx_circuit.jaqal")
JaqalCircuitResults = run_jaqal_circuit(JaqalCircuitObject)
print(f"Probabilities: {JaqalCircuitResults.subcircuits[0].probability_by_str}")
JaqalProgram = generate_jaqal_program(JaqalCircuitObject)
| 42.8 | 80 | 0.876168 |
4a19e88308cd188464c5b852b15bb530253b1f1c
| 241 |
py
|
Python
|
output/models/nist_data/list_pkg/g_day/schema_instance/nistschema_sv_iv_list_g_day_max_length_3_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1 |
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/list_pkg/g_day/schema_instance/nistschema_sv_iv_list_g_day_max_length_3_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4 |
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/list_pkg/g_day/schema_instance/nistschema_sv_iv_list_g_day_max_length_3_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.nist_data.list_pkg.g_day.schema_instance.nistschema_sv_iv_list_g_day_max_length_3_xsd.nistschema_sv_iv_list_g_day_max_length_3 import NistschemaSvIvListGDayMaxLength3
__all__ = [
"NistschemaSvIvListGDayMaxLength3",
]
| 40.166667 | 185 | 0.887967 |
4a19ea516c1e8a68ed3f135088e2760f06cda272
| 34,091 |
py
|
Python
|
src/sentry/conf/server.py
|
ccdlvc/report.iupdate.io
|
0ab451850c34af40b86eb517e39124e5b894b44b
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/conf/server.py
|
ccdlvc/report.iupdate.io
|
0ab451850c34af40b86eb517e39124e5b894b44b
|
[
"BSD-3-Clause"
] | 5 |
2019-12-28T18:13:59.000Z
|
2022-03-02T04:32:45.000Z
|
src/sentry/conf/server.py
|
ccdlvc/report.iupdate.io
|
0ab451850c34af40b86eb517e39124e5b894b44b
|
[
"BSD-3-Clause"
] | null | null | null |
"""
sentry.conf.server
~~~~~~~~~~~~~~~~~~
These settings act as the default (base) settings for the Sentry-provided web-server
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.conf.global_settings import * # NOQA
import os
import os.path
import socket
import sys
import tempfile
import sentry
from datetime import timedelta
from six.moves.urllib.parse import urlparse
gettext_noop = lambda s: s
socket.setdefaulttimeout(5)
DEBUG = False
TEMPLATE_DEBUG = True
MAINTENANCE = False
ADMINS = ()
INTERNAL_IPS = ()
MANAGERS = ADMINS
APPEND_SLASH = True
PROJECT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))
# XXX(dcramer): handle case when we've installed from source vs just running
# this straight out of the repository
if 'site-packages' in __file__:
NODE_MODULES_ROOT = os.path.join(PROJECT_ROOT, 'node_modules')
else:
NODE_MODULES_ROOT = os.path.join(PROJECT_ROOT, os.pardir, os.pardir, 'node_modules')
NODE_MODULES_ROOT = os.path.normpath(NODE_MODULES_ROOT)
sys.path.insert(0, os.path.normpath(os.path.join(PROJECT_ROOT, os.pardir)))
DATABASES = {
'default': {
'ENGINE': 'sentry.db.postgres',
'NAME': 'sentry',
'USER': 'postgres',
'PASSWORD': '',
'HOST': '',
'PORT': '',
'AUTOCOMMIT': True,
'ATOMIC_REQUESTS': False,
}
}
if 'DATABASE_URL' in os.environ:
url = urlparse(os.environ['DATABASE_URL'])
# Ensure default database exists.
DATABASES['default'] = DATABASES.get('default', {})
# Update with environment configuration.
DATABASES['default'].update({
'NAME': url.path[1:],
'USER': url.username,
'PASSWORD': url.password,
'HOST': url.hostname,
'PORT': url.port,
})
if url.scheme == 'postgres':
DATABASES['default']['ENGINE'] = 'sentry.db.postgres'
if url.scheme == 'mysql':
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
# This should always be UTC.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
LANGUAGES = (
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sv-se', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
from .locale import CATALOGS
LANGUAGES = tuple((code, name) for code, name in LANGUAGES
if code in CATALOGS)
SUPPORTED_LANGUAGES = frozenset(CATALOGS)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
USE_TZ = True
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'sentry.middleware.proxy.ContentLengthHeaderMiddleware',
'sentry.middleware.security.SecurityHeadersMiddleware',
'sentry.middleware.maintenance.ServicesUnavailableMiddleware',
'sentry.middleware.env.SentryEnvMiddleware',
'sentry.middleware.proxy.SetRemoteAddrFromForwardedFor',
'sentry.middleware.debug.NoIfModifiedSinceMiddleware',
'sentry.middleware.stats.RequestTimingMiddleware',
'sentry.middleware.stats.ResponseCodeMiddleware',
'sentry.middleware.health.HealthCheck', # Must exist before CommonMiddleware
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'sentry.middleware.auth.AuthenticationMiddleware',
'sentry.middleware.sudo.SudoMiddleware',
'sentry.middleware.superuser.SuperuserMiddleware',
'sentry.middleware.locale.SentryLocaleMiddleware',
# TODO(dcramer): kill this once we verify its safe
# 'sentry.middleware.social_auth.SentrySocialAuthExceptionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'sentry.debug.middleware.DebugMiddleware',
)
ROOT_URLCONF = 'sentry.conf.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.csrf',
'django.core.context_processors.request',
'social_auth.context_processors.social_auth_by_name_backends',
'social_auth.context_processors.social_auth_backends',
'social_auth.context_processors.social_auth_by_type_backends',
'social_auth.context_processors.social_auth_login_redirect'
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'crispy_forms',
'debug_toolbar',
'raven.contrib.django.raven_compat',
'rest_framework',
'sentry',
'sentry.nodestore',
'sentry.search',
'sentry.lang.javascript',
'sentry.lang.native',
'sentry.plugins.sentry_interface_types',
'sentry.plugins.sentry_mail',
'sentry.plugins.sentry_urls',
'sentry.plugins.sentry_useragents',
'sentry.plugins.sentry_webhooks',
'social_auth',
'south',
'sudo',
)
STATIC_ROOT = os.path.realpath(os.path.join(PROJECT_ROOT, 'static'))
STATIC_URL = '/_static/{version}/'
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
)
ASSET_VERSION = 0
# setup a default media root to somewhere useless
MEDIA_ROOT = '/tmp/sentry-media'
LOCALE_PATHS = (
os.path.join(PROJECT_ROOT, 'locale'),
)
CSRF_FAILURE_VIEW = 'sentry.web.frontend.csrf_failure.view'
CSRF_COOKIE_NAME = 'sc'
# Auth configuration
try:
from django.core.urlresolvers import reverse_lazy
except ImportError:
LOGIN_REDIRECT_URL = '/login-redirect/'
LOGIN_URL = '/auth/login/'
else:
LOGIN_REDIRECT_URL = reverse_lazy('sentry-login-redirect')
LOGIN_URL = reverse_lazy('sentry-login')
AUTHENTICATION_BACKENDS = (
'sentry.utils.auth.EmailAuthBackend',
# TODO(dcramer): we can't remove these until we rewrite more of social auth
'social_auth.backends.github.GithubBackend',
'social_auth.backends.bitbucket.BitbucketBackend',
'social_auth.backends.trello.TrelloBackend',
'social_auth.backends.asana.AsanaBackend',
)
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'sentry.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 6,
},
},
]
SOCIAL_AUTH_USER_MODEL = AUTH_USER_MODEL = 'sentry.User'
SOCIAL_AUTH_AUTHENTICATION_BACKENDS = (
'social_auth.backends.github.GithubBackend',
'social_auth.backends.bitbucket.BitbucketBackend',
'social_auth.backends.trello.TrelloBackend',
'social_auth.backends.asana.AsanaBackend',
)
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
SESSION_COOKIE_NAME = "sentrysid"
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
GOOGLE_OAUTH2_CLIENT_ID = ''
GOOGLE_OAUTH2_CLIENT_SECRET = ''
GITHUB_APP_ID = ''
GITHUB_API_SECRET = ''
TRELLO_API_KEY = ''
TRELLO_API_SECRET = ''
BITBUCKET_CONSUMER_KEY = ''
BITBUCKET_CONSUMER_SECRET = ''
SOCIAL_AUTH_PIPELINE = (
'social_auth.backends.pipeline.user.get_username',
'social_auth.backends.pipeline.social.social_auth_user',
'social_auth.backends.pipeline.associate.associate_by_email',
'social_auth.backends.pipeline.misc.save_status_to_session',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details',
'social_auth.backends.pipeline.misc.save_status_to_session',
)
SOCIAL_AUTH_REVOKE_TOKENS_ON_DISCONNECT = True
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/account/settings/identities/'
SOCIAL_AUTH_ASSOCIATE_ERROR_URL = SOCIAL_AUTH_LOGIN_REDIRECT_URL
INITIAL_CUSTOM_USER_MIGRATION = '0108_fix_user'
# Auth engines and the settings required for them to be listed
AUTH_PROVIDERS = {
'github': ('GITHUB_APP_ID', 'GITHUB_API_SECRET'),
'trello': ('TRELLO_API_KEY', 'TRELLO_API_SECRET'),
'bitbucket': ('BITBUCKET_CONSUMER_KEY', 'BITBUCKET_CONSUMER_SECRET'),
'asana': ('ASANA_CLIENT_ID', 'ASANA_CLIENT_SECRET'),
}
AUTH_PROVIDER_LABELS = {
'github': 'GitHub',
'trello': 'Trello',
'bitbucket': 'Bitbucket',
'asana': 'Asana'
}
import random
SOCIAL_AUTH_DEFAULT_USERNAME = lambda: random.choice(['Darth Vader', 'Obi-Wan Kenobi', 'R2-D2', 'C-3PO', 'Yoda'])
SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email']
SOCIAL_AUTH_FORCE_POST_DISCONNECT = True
# Queue configuration
from kombu import Exchange, Queue
BROKER_URL = "redis://localhost:6379"
BROKER_TRANSPORT_OPTIONS = {}
# Ensure workers run async by default
# in Development you might want them to run in-process
# though it would cause timeouts/recursions in some cases
CELERY_ALWAYS_EAGER = False
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
CELERY_IGNORE_RESULT = True
CELERY_SEND_EVENTS = False
CELERY_RESULT_BACKEND = None
CELERY_TASK_RESULT_EXPIRES = 1
CELERY_DISABLE_RATE_LIMITS = True
CELERY_DEFAULT_QUEUE = "default"
CELERY_DEFAULT_EXCHANGE = "default"
CELERY_DEFAULT_EXCHANGE_TYPE = "direct"
CELERY_DEFAULT_ROUTING_KEY = "default"
CELERY_CREATE_MISSING_QUEUES = True
CELERY_REDIRECT_STDOUTS = False
CELERYD_HIJACK_ROOT_LOGGER = False
CELERY_IMPORTS = (
'sentry.tasks.auth',
'sentry.tasks.auto_resolve_issues',
'sentry.tasks.beacon',
'sentry.tasks.check_auth',
'sentry.tasks.clear_expired_snoozes',
'sentry.tasks.collect_project_platforms',
'sentry.tasks.deletion',
'sentry.tasks.digests',
'sentry.tasks.dsymcache',
'sentry.tasks.email',
'sentry.tasks.merge',
'sentry.tasks.options',
'sentry.tasks.ping',
'sentry.tasks.post_process',
'sentry.tasks.process_buffer',
'sentry.tasks.reports',
'sentry.tasks.store',
)
CELERY_QUEUES = [
Queue('alerts', routing_key='alerts'),
Queue('auth', routing_key='auth'),
Queue('cleanup', routing_key='cleanup'),
Queue('default', routing_key='default'),
Queue('digests.delivery', routing_key='digests.delivery'),
Queue('digests.scheduling', routing_key='digests.scheduling'),
Queue('email', routing_key='email'),
Queue('events.preprocess_event', routing_key='events.preprocess_event'),
Queue('events.process_event', routing_key='events.process_event'),
Queue('events.save_event', routing_key='events.save_event'),
Queue('merge', routing_key='merge'),
Queue('options', routing_key='options'),
Queue('reports.deliver', routing_key='reports.deliver'),
Queue('reports.prepare', routing_key='reports.prepare'),
Queue('search', routing_key='search'),
Queue('stats', routing_key='stats'),
Queue('update', routing_key='update'),
]
for queue in CELERY_QUEUES:
queue.durable = False
CELERY_ROUTES = ('sentry.queue.routers.SplitQueueRouter',)
def create_partitioned_queues(name):
exchange = Exchange(name, type='direct')
for num in range(1):
CELERY_QUEUES.append(Queue(
'{0}-{1}'.format(name, num),
exchange=exchange,
))
create_partitioned_queues('counters')
create_partitioned_queues('triggers')
from celery.schedules import crontab
CELERYBEAT_SCHEDULE_FILENAME = os.path.join(tempfile.gettempdir(), 'sentry-celerybeat')
CELERYBEAT_SCHEDULE = {
'check-auth': {
'task': 'sentry.tasks.check_auth',
'schedule': timedelta(minutes=1),
'options': {
'expires': 60,
'queue': 'auth',
}
},
'send-beacon': {
'task': 'sentry.tasks.send_beacon',
'schedule': timedelta(hours=1),
'options': {
'expires': 3600,
},
},
'send-ping': {
'task': 'sentry.tasks.send_ping',
'schedule': timedelta(minutes=1),
'options': {
'expires': 60,
},
},
'flush-buffers': {
'task': 'sentry.tasks.process_buffer.process_pending',
'schedule': timedelta(seconds=10),
'options': {
'expires': 10,
'queue': 'counters-0',
}
},
'sync-options': {
'task': 'sentry.tasks.options.sync_options',
'schedule': timedelta(seconds=10),
'options': {
'expires': 10,
'queue': 'options',
}
},
'schedule-digests': {
'task': 'sentry.tasks.digests.schedule_digests',
'schedule': timedelta(seconds=30),
'options': {
'expires': 30,
},
},
'clear-expired-snoozes': {
'task': 'sentry.tasks.clear_expired_snoozes',
'schedule': timedelta(minutes=5),
'options': {
'expires': 300,
},
},
# Disabled for the time being:
# 'clear-old-cached-dsyms': {
# 'task': 'sentry.tasks.clear_old_cached_dsyms',
# 'schedule': timedelta(minutes=60),
# 'options': {
# 'expires': 3600,
# },
# },
'collect-project-platforms': {
'task': 'sentry.tasks.collect_project_platforms',
'schedule': timedelta(days=1),
'options': {
'expires': 3600 * 24,
},
},
'schedule-auto-resolution': {
'task': 'sentry.tasks.schedule_auto_resolution',
'schedule': timedelta(minutes=15),
'options': {
'expires': 60 * 25,
},
},
'schedule-weekly-organization-reports': {
'task': 'sentry.tasks.reports.prepare_reports',
'schedule': crontab(
minute=0,
hour=12, # 05:00 PDT, 09:00 EDT, 12:00 UTC
day_of_week='monday',
),
'options': {
'expires': 60 * 60 * 3,
},
},
}
# Sentry logs to two major places: stdout, and it's internal project.
# To disable logging to the internal project, add a logger who's only
# handler is 'console' and disable propagating upwards.
# Additionally, Sentry has the ability to override logger levels by
# providing the cli with -l/--loglevel or the SENTRY_LOG_LEVEL env var.
# The loggers that it overrides are root and any in LOGGING.overridable.
# Be very careful with this in a production system, because the celery
# logger can be extremely verbose when given INFO or DEBUG.
LOGGING = {
'default_level': 'INFO',
'version': 1,
'disable_existing_loggers': True,
'handlers': {
'null': {
'class': 'django.utils.log.NullHandler',
},
'console': {
'class': 'sentry.logging.handlers.StructLogHandler',
},
'internal': {
'level': 'ERROR',
'filters': ['sentry:internal'],
'class': 'raven.contrib.django.handlers.SentryHandler',
},
},
'filters': {
'sentry:internal': {
'()': 'sentry.utils.raven.SentryInternalFilter',
},
},
'root': {
'level': 'NOTSET',
'handlers': ['console', 'internal'],
},
# LOGGING.overridable is a list of loggers including root that will change
# based on the overridden level defined above.
'overridable': ['celery', 'sentry'],
'loggers': {
'celery': {
'level': 'WARN',
},
'sentry': {
'level': 'INFO',
},
'sentry.errors': {
'handlers': ['console'],
'propagate': False,
},
'sentry.rules': {
'handlers': ['console'],
'propagate': False,
},
'multiprocessing': {
'handlers': ['console'],
# https://github.com/celery/celery/commit/597a6b1f3359065ff6dbabce7237f86b866313df
# This commit has not been rolled into any release and leads to a
# large amount of errors when working with postgres.
'level': 'CRITICAL',
'propagate': False,
},
'celery.worker.job': {
'handlers': ['console'],
'propagate': False,
},
'static_compiler': {
'level': 'INFO',
},
'django.request': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'toronado': {
'level': 'ERROR',
'handlers': ['null'],
'propagate': False,
},
'urllib3.connectionpool': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'boto3': {
'level': 'WARNING',
'handlers': ['console'],
'propagate': False,
},
'botocore': {
'level': 'WARNING',
'handlers': ['console'],
'propagate': False,
},
}
}
# django-rest-framework
REST_FRAMEWORK = {
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
'DEFAULT_PERMISSION_CLASSES': (
'sentry.api.permissions.NoPermission',
),
}
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Percy config for visual regression testing.
PERCY_DEFAULT_TESTING_WIDTHS = (1280, 375)
# Debugger
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.timer.TimerPanel',
'sentry.debug.panels.route.RoutePanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.sql.SQLPanel',
# TODO(dcramer): https://github.com/getsentry/sentry/issues/1722
# 'sentry.debug.panels.redis.RedisPanel',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# Sentry and Raven configuration
SENTRY_CLIENT = 'sentry.utils.raven.SentryInternalClient'
SENTRY_FEATURES = {
'auth:register': True,
'organizations:api-keys': True,
'organizations:create': True,
'organizations:repos': False,
'organizations:sso': True,
'organizations:callsigns': False,
'projects:global-events': False,
'projects:plugins': True,
'projects:dsym': False,
'projects:sample-events': True,
'workflow:release-emails': False,
}
# Default time zone for localization in the UI.
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
SENTRY_DEFAULT_TIME_ZONE = 'UTC'
# Enable the Sentry Debugger (Beta)
SENTRY_DEBUGGER = False
SENTRY_IGNORE_EXCEPTIONS = (
'OperationalError',
)
# Should we send the beacon to the upstream server?
SENTRY_BEACON = True
# Allow access to Sentry without authentication.
SENTRY_PUBLIC = False
# Instruct Sentry that this install intends to be run by a single organization
# and thus various UI optimizations should be enabled.
SENTRY_SINGLE_ORGANIZATION = False
# Login url (defaults to LOGIN_URL)
SENTRY_LOGIN_URL = None
# Default project ID (for internal errors)
SENTRY_PROJECT = 1
# Project ID for recording frontend (javascript) exceptions
SENTRY_FRONTEND_PROJECT = None
# Only store a portion of all messages per unique group.
SENTRY_SAMPLE_DATA = True
# The following values control the sampling rates
SENTRY_SAMPLE_RATES = (
# up until N events, store 1 in M
(50, 1),
(1000, 2),
(10000, 10),
(100000, 50),
(1000000, 300),
(10000000, 2000),
)
SENTRY_MAX_SAMPLE_RATE = 10000
SENTRY_SAMPLE_TIMES = (
(3600, 1),
(360, 10),
(60, 60),
)
SENTRY_MAX_SAMPLE_TIME = 10000
# Web Service
SENTRY_WEB_HOST = 'localhost'
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {}
# SMTP Service
SENTRY_SMTP_HOST = 'localhost'
SENTRY_SMTP_PORT = 1025
SENTRY_INTERFACES = {
'csp': 'sentry.interfaces.csp.Csp',
'device': 'sentry.interfaces.device.Device',
'exception': 'sentry.interfaces.exception.Exception',
'logentry': 'sentry.interfaces.message.Message',
'query': 'sentry.interfaces.query.Query',
'repos': 'sentry.interfaces.repos.Repos',
'request': 'sentry.interfaces.http.Http',
'sdk': 'sentry.interfaces.sdk.Sdk',
'stacktrace': 'sentry.interfaces.stacktrace.Stacktrace',
'template': 'sentry.interfaces.template.Template',
'user': 'sentry.interfaces.user.User',
'applecrashreport': 'sentry.interfaces.applecrash.AppleCrashReport',
'breadcrumbs': 'sentry.interfaces.breadcrumbs.Breadcrumbs',
'contexts': 'sentry.interfaces.contexts.Contexts',
'threads': 'sentry.interfaces.threads.Threads',
'debug_meta': 'sentry.interfaces.debug_meta.DebugMeta',
'sentry.interfaces.Exception': 'sentry.interfaces.exception.Exception',
'sentry.interfaces.Message': 'sentry.interfaces.message.Message',
'sentry.interfaces.Stacktrace': 'sentry.interfaces.stacktrace.Stacktrace',
'sentry.interfaces.Template': 'sentry.interfaces.template.Template',
'sentry.interfaces.Query': 'sentry.interfaces.query.Query',
'sentry.interfaces.Http': 'sentry.interfaces.http.Http',
'sentry.interfaces.User': 'sentry.interfaces.user.User',
'sentry.interfaces.Csp': 'sentry.interfaces.csp.Csp',
'sentry.interfaces.AppleCrashReport': 'sentry.interfaces.applecrash.AppleCrashReport',
'sentry.interfaces.Breadcrumbs': 'sentry.interfaces.breadcrumbs.Breadcrumbs',
'sentry.interfaces.Contexts': 'sentry.interfaces.contexts.Contexts',
'sentry.interfaces.Threads': 'sentry.interfaces.threads.Threads',
'sentry.interfaces.DebugMeta': 'sentry.interfaces.debug_meta.DebugMeta',
}
SENTRY_EMAIL_BACKEND_ALIASES = {
'smtp': 'django.core.mail.backends.smtp.EmailBackend',
'dummy': 'django.core.mail.backends.dummy.EmailBackend',
'console': 'django.core.mail.backends.console.EmailBackend',
}
SENTRY_FILESTORE_ALIASES = {
'filesystem': 'django.core.files.storage.FileSystemStorage',
's3': 'sentry.filestore.s3.S3Boto3Storage',
}
# set of backends that do not support needing SMTP mail.* settings
# This list is a bit fragile and hardcoded, but it's unlikely that
# a user will be using a different backend that also mandates SMTP
# credentials.
SENTRY_SMTP_DISABLED_BACKENDS = frozenset((
'django.core.mail.backends.dummy.EmailBackend',
'django.core.mail.backends.console.EmailBackend',
'django.core.mail.backends.locmem.EmailBackend',
'django.core.mail.backends.filebased.EmailBackend',
'sentry.utils.email.PreviewBackend',
))
# Should users without superuser permissions be allowed to
# make projects public
SENTRY_ALLOW_PUBLIC_PROJECTS = True
# Can users be invited to organizations?
SENTRY_ENABLE_INVITES = True
# Default to not sending the Access-Control-Allow-Origin header on api/store
SENTRY_ALLOW_ORIGIN = None
# Enable scraping of javascript context for source code
SENTRY_SCRAPE_JAVASCRIPT_CONTEXT = True
# Buffer backend
SENTRY_BUFFER = 'sentry.buffer.Buffer'
SENTRY_BUFFER_OPTIONS = {}
# Cache backend
# XXX: We explicitly require the cache to be configured as its not optional
# and causes serious confusion with the default django cache
SENTRY_CACHE = None
SENTRY_CACHE_OPTIONS = {}
# The internal Django cache is still used in many places
# TODO(dcramer): convert uses over to Sentry's backend
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# The cache version affects both Django's internal cache (at runtime) as well
# as Sentry's cache. This automatically overrides VERSION on the default
# CACHES backend.
CACHE_VERSION = 1
# Digests backend
SENTRY_DIGESTS = 'sentry.digests.backends.dummy.DummyBackend'
SENTRY_DIGESTS_OPTIONS = {}
# Quota backend
SENTRY_QUOTAS = 'sentry.quotas.Quota'
SENTRY_QUOTA_OPTIONS = {}
# Rate limiting backend
SENTRY_RATELIMITER = 'sentry.ratelimits.base.RateLimiter'
SENTRY_RATELIMITER_OPTIONS = {}
# The default value for project-level quotas
SENTRY_DEFAULT_MAX_EVENTS_PER_MINUTE = '90%'
# Node storage backend
SENTRY_NODESTORE = 'sentry.nodestore.django.DjangoNodeStorage'
SENTRY_NODESTORE_OPTIONS = {}
# Search backend
SENTRY_SEARCH = 'sentry.search.django.DjangoSearchBackend'
SENTRY_SEARCH_OPTIONS = {}
# SENTRY_SEARCH_OPTIONS = {
# 'urls': ['http://localhost:9200/'],
# 'timeout': 5,
# }
# Time-series storage backend
SENTRY_TSDB = 'sentry.tsdb.dummy.DummyTSDB'
SENTRY_TSDB_OPTIONS = {}
# rollups must be ordered from highest granularity to lowest
SENTRY_TSDB_ROLLUPS = (
# (time in seconds, samples to keep)
(10, 360), # 60 minutes at 10 seconds
(3600, 24 * 7), # 7 days at 1 hour
(3600 * 24, 90), # 90 days at 1 day
)
# Internal metrics
SENTRY_METRICS_BACKEND = 'sentry.metrics.dummy.DummyMetricsBackend'
SENTRY_METRICS_OPTIONS = {}
SENTRY_METRICS_SAMPLE_RATE = 1.0
SENTRY_METRICS_PREFIX = 'sentry.'
# URI Prefixes for generating DSN URLs
# (Defaults to URL_PREFIX by default)
SENTRY_ENDPOINT = None
SENTRY_PUBLIC_ENDPOINT = None
# Prevent variables (e.g. context locals, http data, etc) from exceeding this
# size in characters
SENTRY_MAX_VARIABLE_SIZE = 512
# Prevent variables within extra context from exceeding this size in
# characters
SENTRY_MAX_EXTRA_VARIABLE_SIZE = 4096 * 4 # 16kb
# For changing the amount of data seen in Http Response Body part.
SENTRY_MAX_HTTP_BODY_SIZE = 4096 * 4 # 16kb
# For various attributes we don't limit the entire attribute on size, but the
# individual item. In those cases we also want to limit the maximum number of
# keys
SENTRY_MAX_DICTIONARY_ITEMS = 50
SENTRY_MAX_MESSAGE_LENGTH = 1024 * 8
SENTRY_MAX_STACKTRACE_FRAMES = 50
SENTRY_MAX_EXCEPTIONS = 25
# Gravatar service base url
SENTRY_GRAVATAR_BASE_URL = 'https://secure.gravatar.com'
# Timeout (in seconds) for fetching remote source files (e.g. JS)
SENTRY_SOURCE_FETCH_TIMEOUT = 5
# Timeout (in seconds) for socket operations when fetching remote source files
SENTRY_SOURCE_FETCH_SOCKET_TIMEOUT = 2
# Maximum content length for source files before we abort fetching
SENTRY_SOURCE_FETCH_MAX_SIZE = 40 * 1024 * 1024
# List of IP subnets which should not be accessible
SENTRY_DISALLOWED_IPS = ()
# Fields which managed users cannot change via Sentry UI. Username and password
# cannot be changed by managed users. Optionally include 'email' and
# 'name' in SENTRY_MANAGED_USER_FIELDS.
SENTRY_MANAGED_USER_FIELDS = ()
SENTRY_SCOPES = set([
'org:read',
'org:write',
'org:delete',
'member:read',
'member:write',
'member:delete',
'team:read',
'team:write',
'team:delete',
'project:read',
'project:write',
'project:delete',
'project:releases',
'event:read',
'event:write',
'event:delete',
])
SENTRY_DEFAULT_ROLE = 'member'
# Roles are ordered, which represents a sort-of hierarchy, as well as how
# they're presented in the UI. This is primarily important in that a member
# that is earlier in the chain cannot manage the settings of a member later
# in the chain (they still require the appropriate scope).
SENTRY_ROLES = (
{
'id': 'member',
'name': 'Member',
'desc': 'Members can view and act on events, as well as view most other data within the organization.',
'scopes': set([
'event:read', 'event:write', 'event:delete', 'project:releases',
'project:read', 'org:read', 'member:read', 'team:read',
]),
},
{
'id': 'admin',
'name': 'Admin',
'desc': 'Admin privileges on any teams of which they\'re a member. They can create new teams and projects, as well as remove teams and projects which they already hold membership on.',
'scopes': set([
'event:read', 'event:write', 'event:delete',
'org:read', 'member:read',
'project:read', 'project:write', 'project:delete', 'project:releases',
'team:read', 'team:write', 'team:delete',
]),
},
{
'id': 'manager',
'name': 'Manager',
'desc': 'Gains admin access on all teams as well as the ability to add and remove members.',
'is_global': True,
'scopes': set([
'event:read', 'event:write', 'event:delete',
'member:read', 'member:write', 'member:delete',
'project:read', 'project:write', 'project:delete', 'project:releases',
'team:read', 'team:write', 'team:delete',
'org:read', 'org:write',
]),
},
{
'id': 'owner',
'name': 'Owner',
'desc': 'Gains full permission across the organization. Can manage members as well as perform catastrophic operations such as removing the organization.',
'is_global': True,
'scopes': set([
'org:read', 'org:write', 'org:delete',
'member:read', 'member:write', 'member:delete',
'team:read', 'team:write', 'team:delete',
'project:read', 'project:write', 'project:delete', 'project:releases',
'event:read', 'event:write', 'event:delete',
]),
},
)
# See sentry/options/__init__.py for more information
SENTRY_OPTIONS = {}
SENTRY_DEFAULT_OPTIONS = {}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = False
# Delay (in ms) to induce on API responses
SENTRY_API_RESPONSE_DELAY = 0
# Watchers for various application purposes (such as compiling static media)
# XXX(dcramer): this doesn't work outside of a source distribution as the
# webpack.config.js is not part of Sentry's datafiles
SENTRY_WATCHERS = (
('webpack', [os.path.join(NODE_MODULES_ROOT, '.bin', 'webpack'), '--output-pathinfo', '--watch',
"--config={}".format(os.path.normpath(os.path.join(PROJECT_ROOT, os.pardir, os.pardir, "webpack.config.js")))]),
)
# Max file size for avatar photo uploads
SENTRY_MAX_AVATAR_SIZE = 5000000
# statuspage.io support
STATUS_PAGE_ID = None
STATUS_PAGE_API_HOST = 'statuspage.io'
SENTRY_ONPREMISE = True
# Whether we should look at X-Forwarded-For header or not
# when checking REMOTE_ADDR ip addresses
SENTRY_USE_X_FORWARDED_FOR = True
def get_raven_config():
return {
'release': sentry.__build__,
'register_signals': True,
'include_paths': [
'sentry',
],
}
RAVEN_CONFIG = get_raven_config()
# Config options that are explicitly disabled from Django
DEAD = object()
# This will eventually get set from values in SENTRY_OPTIONS during
# sentry.runner.initializer:bootstrap_options
SECRET_KEY = DEAD
EMAIL_BACKEND = DEAD
EMAIL_HOST = DEAD
EMAIL_PORT = DEAD
EMAIL_HOST_USER = DEAD
EMAIL_HOST_PASSWORD = DEAD
EMAIL_USE_TLS = DEAD
SERVER_EMAIL = DEAD
EMAIL_SUBJECT_PREFIX = DEAD
SUDO_URL = 'sentry-sudo'
# TODO(dcramer): move this to sentry.io so it can be automated
SDK_VERSIONS = {
'raven-js': '3.9.1',
'raven-python': '5.32.0',
'sentry-laravel': '0.5.0',
'sentry-php': '1.6.0',
}
SDK_URLS = {
'raven-js': 'https://docs.sentry.io/clients/javascript/',
'raven-python': 'https://docs.sentry.io/clients/python/',
'raven-swift': 'https://docs.sentry.io/clients/cocoa/',
'sentry-php': 'https://docs.sentry.io/clients/php/',
'sentry-laravel': 'https://docs.sentry.io/clients/php/integrations/laravel/',
}
DEPRECATED_SDKS = {
# sdk name => new sdk name
'raven-objc': 'sentry-swift',
}
| 31.362466 | 192 | 0.674489 |
4a19ea547a219cdbf3ffa147ad934318a8992aa2
| 19,487 |
py
|
Python
|
seisflows3/optimize/base.py
|
bch0w/seisflows3
|
5158020d4efc06dae65dc472b59f1f958609348a
|
[
"BSD-2-Clause"
] | null | null | null |
seisflows3/optimize/base.py
|
bch0w/seisflows3
|
5158020d4efc06dae65dc472b59f1f958609348a
|
[
"BSD-2-Clause"
] | null | null | null |
seisflows3/optimize/base.py
|
bch0w/seisflows3
|
5158020d4efc06dae65dc472b59f1f958609348a
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""
The Optimization library contains classes and methods used to solve nonlinear
optimization problems, i.e., misfit minimization. Various subclasses implement
different optimization algorithms.
.. note::
By default the base class implements a steepest descent optimization
"""
import os
import sys
import logging
import numpy as np
from seisflows3.tools import msg, unix
from seisflows3.tools.math import angle, dot
from seisflows3.plugins import line_search, preconds
from seisflows3.tools.specfem import check_poissons_ratio
from seisflows3.config import SeisFlowsPathsParameters, CFGPATHS
PAR = sys.modules["seisflows_parameters"]
PATH = sys.modules["seisflows_paths"]
solver = sys.modules["seisflows_solver"]
class Base:
"""
Nonlinear optimization abstract base class.
This base class provides a steepest descent optimization algorithm.
Nonlinear conjugate, quasi-Newton and Newton methods can be implemented on
top of this base class.
.. note::
To reduce memory overhead, vectors are read from disk rather than passed
from calling routines. For example, at the beginning of
compute_direction the current gradient is read from 'g_new' and the
resulting search direction is written to 'p_new'. As the inversion
progresses, other information is stored as well.
.. note::
The default numerical parameters defined below should work well for a
range of applications without manual tuning. If the nonlinear
optimization procedure stagnates, it may be due to issues involving
data quality or the choice of data misfit, data processing, or
regularization parameters. Problems in any of these areas usually
manifest themselves through stagnation of the nonlinear optimization
algorithm.
"""
# Class-specific logger accessed using self.logger
logger = logging.getLogger(__name__).getChild(__qualname__)
def __init__(self):
"""
These parameters should not be set by __init__!
Attributes are just initialized as NoneTypes for clarity and docstrings
:type iter: int
:param iter: the current iteration of the workflow
:type line_search: Class
:param line_search: a class controlling the line search functionality
for determining step length
:type precond: Class
:param precond: a class controlling the preconditioner functionality
for preconditiong gradient information
:type restarted: bool
:param restarted: a flag signalling if the optimization algorithm has
been restarted recently
:param m_new: current model
:param m_old: previous model
:param m_try: line search model
:param f_new: current objective function value
:param f_old: previous objective function value
:param f_try: line search function value
:param g_new: current gradient direction
:param g_old: previous gradient direction
:param p_new: current search direction
:param p_old: previous search direction
"""
self.iter = 1
self.line_search = None
self.precond = None
self.restarted = False
# Define the names of output stats logs to keep all paths in one place
# Line search log is named differently so that optimize doesn't
# overwrite this log file when intiating the stats directory
self.line_search_log = "line_search"
self.log_factor = "factor"
self.log_gradient_norm_L1 = "gradient_norm_L1"
self.log_gradient_norm_L2 = "gradient_norm_L2"
self.log_misfit = "misfit"
self.log_restarted = "restarted"
self.log_slope = "slope"
self.log_step_count = "step_count"
self.log_step_length = "step_length"
self.log_theta = "theta"
# Define the names of variables used to keep track of models etc. so
# that we don't have multiple strings floating around defining the same
# thing
self.m_new = "m_new.npy"
self.m_old = "m_old.npy"
self.m_try = "m_try.npy"
self.f_new = "f_new.txt"
self.f_old = "f_old.txt"
self.f_try = "f_try.txt"
self.g_new = "g_new.npy"
self.g_old = "g_old.npy"
self.p_new = "p_new.npy"
self.p_old = "p_old.npy"
self.alpha = "alpha.npy"
@property
def required(self):
"""
A hard definition of paths and parameters required by this class,
alongside their necessity for the class and their string explanations.
"""
sf = SeisFlowsPathsParameters()
# Define the Parameters required by this module
sf.par("LINESEARCH", required=False, default="Bracket", par_type=str,
docstr="Algorithm to use for line search, see "
"seisflows3.plugins.line_search for available choices")
sf.par("PRECOND", required=False, par_type=str,
docstr="Algorithm to use for preconditioning gradients, see "
"seisflows3.plugins.preconds for available choices")
sf.par("STEPCOUNTMAX", required=False, default=10, par_type=int,
docstr="Max number of trial steps in line search before a "
"change in line search behavior")
sf.par("STEPLENINIT", required=False, default=0.05, par_type=float,
docstr="Initial line search step length, as a fraction "
"of current model parameters")
sf.par("STEPLENMAX", required=False, default=0.5, par_type=float,
docstr="Max allowable step length, as a fraction of "
"current model parameters")
# Define the Paths required by this module
sf.path("OPTIMIZE", required=False,
default=os.path.join(PATH.SCRATCH, "optimize"),
docstr="scratch path for nonlinear optimization data")
return sf
def check(self, validate=True):
"""
Checks parameters, paths, and dependencies
"""
if validate:
self.required.validate()
if PAR.LINESEARCH:
assert PAR.LINESEARCH in dir(line_search), \
f"LINESEARCH parameter must be in {dir(line_search)}"
if PAR.PRECOND:
assert PAR.PRECOND in dir(preconds), \
f"PRECOND must be in {dir(preconds)}"
assert 0. < PAR.STEPLENINIT, f"STEPLENINIT must be >= 0."
assert 0. < PAR.STEPLENMAX, f"STEPLENMAX must be >= 0."
assert PAR.STEPLENINIT < PAR.STEPLENMAX, \
f"STEPLENINIT must be < STEPLENMAX"
def setup(self):
"""
Sets up nonlinear optimization machinery
"""
# All ptimization statistics text files will be written to path_stats
path_stats = os.path.join(PATH.WORKDIR, CFGPATHS.STATSDIR)
unix.mkdir(path_stats)
# Line search machinery is defined externally as a plugin class
self.line_search = getattr(line_search, PAR.LINESEARCH)(
step_count_max=PAR.STEPCOUNTMAX, step_len_max=PAR.STEPLENMAX,
log_file=os.path.join(path_stats, f"{self.line_search_log}.txt"),
)
if PAR.PRECOND:
self.precond = getattr(preconds, PAR.PRECOND)()
else:
self.precond = None
# Instantiate all log files in stats/ directory as empty text files
# OVERWRITES any existing stats/ log files that may already be there
for key, val in vars(self).items():
if "log_" in key:
self.write_stats(val)
# Ensure that line search step count starts at 0 (workflow.intialize)
self.write_stats(self.log_step_count, 0)
unix.mkdir(PATH.OPTIMIZE)
if "MODEL_INIT" in PATH:
m_new = solver.merge(solver.load(PATH.MODEL_INIT))
self.save(self.m_new, m_new)
self.check_model(m_new, self.m_new)
@property
def eval_str(self):
"""
Print out the evaluation string, which states what iteration and line
search step count we are at. Useful for log statements
For example, an inversion at iteration 1 and step count 2 will return
'i01s02'
"""
iter_ = self.iter
step = self.line_search.step_count
return f"i{iter_:0>2}s{step:0>2}"
def compute_direction(self):
"""
Computes a steepest descent search direction (inverse gradient)
with an optional user-defined preconditioner.
.. note::
Other optimization algorithms must overload this method
"""
self.logger.info(f"computing search direction with {PAR.OPTIMIZE}")
g_new = self.load(self.g_new)
if self.precond is not None:
p_new = -1 * self.precond(g_new)
else:
p_new = -1 * g_new
self.save(self.p_new, p_new)
def initialize_search(self):
"""
Initialize the plugin line search machinery. Should only be run at
the beginning of line search, by the main workflow module.
"""
m = self.load(self.m_new)
g = self.load(self.g_new)
p = self.load(self.p_new)
f = self.loadtxt(self.f_new)
norm_m = max(abs(m))
norm_p = max(abs(p))
gtg = dot(g, g)
gtp = dot(g, p)
# Restart plugin line search if the optimization library restarts
if self.restarted:
self.line_search.clear_history()
# Optional safeguard to prevent step length from getting too large
if PAR.STEPLENMAX:
self.line_search.step_len_max = PAR.STEPLENMAX * norm_m / norm_p
self.logger.debug(f"max step length safeguard is: "
f"{self.line_search.step_len_max:.2E}")
# Alpha defines the trial step length
alpha, _ = self.line_search.initialize(iter=self.iter, step_len=0.,
func_val=f, gtg=gtg, gtp=gtp
)
# Optional initial step length override
if PAR.STEPLENINIT and len(self.line_search.step_lens) <= 1:
alpha = PAR.STEPLENINIT * norm_m / norm_p
self.logger.debug(f"manually set initial step length: {alpha:.2E}")
# The new model is the old model, scaled by the step direction and
# gradient threshold to remove any outlier values
m_try = m + alpha * p
self.save(self.m_try, m_try)
self.savetxt(self.alpha, alpha)
self.check_model(m_try, self.m_try)
def update_search(self):
"""
Updates line search status and step length and checks if the line search
has been completed.
Available status codes from line_search.update():
status == 1 : finished
status == 0 : not finished
status == -1 : failed
"""
alpha, status = self.line_search.update(
iter=self.iter, step_len=self.loadtxt(self.alpha),
func_val=self.loadtxt(self.f_try)
)
# New search direction needs to be searchable on disk
if status in [0, 1]:
m = self.load(self.m_new)
p = self.load(self.p_new)
self.savetxt(self.alpha, alpha)
m_try = m + alpha * p
self.save(self.m_try, m_try)
self.check_model(m_try, self.m_try)
return status
def finalize_search(self):
"""
Prepares algorithm machinery and scratch directory for next model update
Removes old model/search parameters, moves current parameters to old,
sets up new current parameters and writes statistic outputs
"""
self.logger.info(msg.sub("FINALIZING LINE SEARCH"))
g = self.load(self.g_new)
p = self.load(self.p_new)
x = self.line_search.search_history()[0]
f = self.line_search.search_history()[1]
# Clean scratch directory
unix.cd(PATH.OPTIMIZE)
# Remove the old model parameters
if self.iter > 1:
self.logger.info("removing previously accepted model files (old)")
for fid in [self.m_old, self.f_old, self.g_old, self.p_old]:
unix.rm(fid)
self.logger.info("shifting current model (new) to previous model (old)")
unix.mv(self.m_new, self.m_old)
unix.mv(self.f_new, self.f_old)
unix.mv(self.g_new, self.g_old)
unix.mv(self.p_new, self.p_old)
self.logger.info("setting accepted line search model as current model")
unix.mv(self.m_try, self.m_new)
self.savetxt(self.f_new, f.min())
self.logger.info(f"current misfit is {self.f_new}={f.min():.3E}")
# !!! TODO Describe what stats are being written here
self.logger.info(f"writing optimization stats to: {CFGPATHS.STATSDIR}")
self.write_stats(self.log_factor, value=
-dot(g, g) ** -0.5 * (f[1] - f[0]) / (x[1] - x[0])
)
self.write_stats(self.log_gradient_norm_L1, value=np.linalg.norm(g, 1))
self.write_stats(self.log_gradient_norm_L2, value=np.linalg.norm(g, 2))
self.write_stats(self.log_misfit, value=f[0])
self.write_stats(self.log_restarted, value=self.restarted)
self.write_stats(self.log_slope, value=(f[1] - f[0]) / (x[1] - x[0]))
self.write_stats(self.log_step_count, value=self.line_search.step_count)
self.write_stats(self.log_step_length, value=x[f.argmin()])
self.write_stats(self.log_theta,
value=180. * np.pi ** -1 * angle(p, -g))
self.logger.info("resetting line search step count to 0")
self.line_search.step_count = 0
def retry_status(self):
"""
After a failed line search, this determines if restart is worthwhile
by checking, in effect, if the search direction was the same as gradient
direction
"""
g = self.load(self.g_new)
p = self.load(self.p_new)
theta = angle(p, -g)
self.logger.debug(f"theta: {theta:6.3f}")
thresh = 1.e-3
if abs(theta) < thresh:
return 0
else:
return 1
def restart(self):
"""
Restarts nonlinear optimization algorithm for any schema that is NOT
steepest descent (default base class).
Keeps current position in model space, but discards history of
nonlinear optimization algorithm in an attempt to recover from
numerical stagnation.
"""
# Steepest descent (base) does not need to be restarted
if PAR.OPTIMIZE != "base":
g = self.load(self.g_new)
self.save(self.p_new, -g)
self.line_search.clear_history()
self.restarted = 1
def write_stats(self, log, value=None, format="18.6E"):
"""
Simplified write function to append values to text files in the
STATSDIR. Used because stats line search information can be overwritten
by subsequent iterations so we need to append values to text files
if they should be retained.
Log files will look something like:
ITER FACTOR
==== ======
1 0.0
:type log: str
:param log: name of the file to write to. Will append .txt to it
:type value: float
:param value: value to write to file
:type format: str
:param format: string formatter for value
"""
fid = os.path.join(PATH.WORKDIR, CFGPATHS.STATSDIR, f"{log}.txt")
# If no value is given, assuming we are being run from setup() and
# writing to new files. Will OVERWRITE any existing files
if value is None:
with open(fid, "w") as f:
f.write(f"{'ITER':>4} {log.upper():>18}\n")
f.write(f"{'='*4} {'='*18}\n")
else:
with open(fid, "a") as f:
f.write(f"{self.iter:>4} {value:{format}}\n")
def check_model(self, m, tag):
"""
Check to ensure that the model parameters fall within the guidelines
of the solver. Print off min/max model parameters for the User.
:type m: np.array
:param m: model to check parameters of
:type tag: str
:param tag: tag of the model to be used for more specific error msgs
"""
# Dynamic way to split up the model based on number of params
pars = {}
for i, par in enumerate(solver.parameters):
pars[par] = np.split(m, len(solver.parameters))[i]
# Check Poisson's ratio, which will error our SPECFEM if outside limits
if (pars["vp"] is not None) and (pars["vs"] is not None):
self.logger.debug(f"checking poissons ratio for: '{tag}'")
pars["pr"] = check_poissons_ratio(vp=pars["vp"], vs=pars["vs"])
if pars["pr"].min() < 0:
self.logger.warning("minimum poisson's ratio is negative")
# Tell the User min and max values of the updated model
self.logger.info(f"model parameters ({tag} {self.eval_str}):")
parts = "{minval:.2f} <= {key} <= {maxval:.2f}"
for key, vals in pars.items():
self.logger.info(parts.format(minval=vals.min(), key=key,
maxval=vals.max())
)
@staticmethod
def load(filename):
"""
Convenience function to reads vectors from disk as Numpy files,
reads directly from PATH.OPTIMIZE. Works around Numpy's behavior of
appending '.npy' to files that it saves.
:type filename: str
:param filename: filename to read from
:rtype: np.array
:return: vector read from disk
"""
fid = os.path.join(PATH.OPTIMIZE, filename)
if not os.path.exists(fid):
fid += ".npy"
return np.load(fid)
@staticmethod
def save(filename, array):
"""
Convenience function to write vectors to disk as numpy files.
Reads directly from PATH.OPTIMIZE
:type filename: str
:param filename: filename to read from
:type array: np.array
:param array: array to be saved
"""
np.save(os.path.join(PATH.OPTIMIZE, filename), array)
@staticmethod
def loadtxt(filename):
"""
Reads scalars from optimize directory on disk,
accounts for savetxt() appending file extension
:type filename: str
:param filename: filename to read from
:rtype: float
:return: scalar read from disk
"""
if not os.path.splitext(filename)[1]:
filename += ".txt"
return float(np.loadtxt(os.path.join(PATH.OPTIMIZE, filename)))
@staticmethod
def savetxt(filename, scalar):
"""
Writes scalars to disk with a specific format, appends .txt to the
filename to make it clear that these are text files.
:type filename: str
:param filename: filename to read from
:type scalar: float
:param scalar: value to write to disk
"""
if not os.path.splitext(filename)[1]:
filename += ".txt"
np.savetxt(os.path.join(PATH.OPTIMIZE, filename), [scalar], "%11.6e")
| 37.838835 | 80 | 0.613486 |
4a19ec8b846b6602bd7ebfef1903e91c45820b59
| 684 |
py
|
Python
|
toys/gaussians/roofit_example.py
|
zfit/benchmarks
|
9d76ecce9514438113ebb7a8e487f225e905c0a8
|
[
"BSD-3-Clause"
] | null | null | null |
toys/gaussians/roofit_example.py
|
zfit/benchmarks
|
9d76ecce9514438113ebb7a8e487f225e905c0a8
|
[
"BSD-3-Clause"
] | null | null | null |
toys/gaussians/roofit_example.py
|
zfit/benchmarks
|
9d76ecce9514438113ebb7a8e487f225e905c0a8
|
[
"BSD-3-Clause"
] | null | null | null |
from ROOT import RooRealVar, RooGaussian, RooChebychev, RooAddPdf, RooArgList, RooArgSet, RooFit
x = RooRealVar("x","x",-1,1)
# Use RooGaussian in the generation
mean = RooRealVar("mean","mean of gaussian",0,-1,1)
sigma = RooRealVar("sigma","sigma of gaussian",0.1,-1,1)
sig = RooGaussian("gauss","gaussian PDF",x,mean,sigma) ;
# Background
a0 = RooRealVar("a0","a0",0.5,0.,1.)
a1 = RooRealVar("a1","a1",-0.2,0.,1.)
bkg = RooChebychev("bkg","Background",x,RooArgList(a0,a1))
bkgfrac = RooRealVar("bkgfrac","fraction of background",0.5,0.,1.)
model = RooAddPdf("model","g+a",RooArgList(bkg,sig), RooArgList(bkgfrac) )
data = model.generate(RooArgSet(x), 10000)
model.fitTo(data)
| 32.571429 | 96 | 0.701754 |
4a19ec9614eb58502c07ed1039d07389b2feccaa
| 5,434 |
py
|
Python
|
api/views.py
|
oangervuori/namubufferi
|
b9353b1d1a32e18e93cb1e9bd2b591950d54269a
|
[
"MIT"
] | 2 |
2016-12-05T03:31:47.000Z
|
2017-02-13T20:10:39.000Z
|
api/views.py
|
oangervuori/namubufferi
|
b9353b1d1a32e18e93cb1e9bd2b591950d54269a
|
[
"MIT"
] | 1 |
2016-12-14T10:53:15.000Z
|
2016-12-17T18:52:25.000Z
|
api/views.py
|
oangervuori/namubufferi
|
b9353b1d1a32e18e93cb1e9bd2b591950d54269a
|
[
"MIT"
] | 1 |
2017-01-14T10:56:28.000Z
|
2017-01-14T10:56:28.000Z
|
from distutils.util import strtobool
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import PermissionDenied
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.generic import RedirectView
from jsonapi.views import (
JSONAPICreateView,
JSONAPIDeleteView,
JSONAPIDetailView,
JSONAPIListView,
)
from ledger.models import Account, Barcode, Group, Product, Transaction
from terms.views import TermsMixin
User = get_user_model()
class ListRestrictMixin:
def get_queryset(self):
queryset = super().get_queryset()
if not strtobool(self.request.GET.get("list_restrict", "1")):
if not self.request.user.is_superuser:
raise PermissionDenied()
return queryset
return eval(f"queryset.filter({self.list_restrict})")
class LoginRequiredMixin(LoginRequiredMixin):
raise_exception = True
class ObjectPermissionMixin:
def get_object(self):
# https://docs.djangoproject.com/en/3.2/topics/class-based-views/generic-display/#performing-extra-work
obj = super().get_object()
if (
eval(self.object_permission) != self.request.user
and not self.request.user.is_superuser
):
raise PermissionDenied()
return obj
class JSONAPICreateView(JSONAPICreateView):
template_name = "api/form.html"
class JSONAPIDeleteView(JSONAPIDeleteView):
template_name = "api/confirm_delete.html"
class JSONAPIDetailView(JSONAPIDetailView):
template_name = "api/detail.html"
class JSONAPIListView(JSONAPIListView):
template_name = "api/list.html"
paginate_by = 10
paginate_orphans = 5
class AccountDetailView(
LoginRequiredMixin, TermsMixin, ObjectPermissionMixin, JSONAPIDetailView
):
model = Account
http_method_names = ["get"]
object_permission = "obj.user"
class AccountListView(
LoginRequiredMixin, TermsMixin, ListRestrictMixin, JSONAPIListView
):
model = Account
http_method_names = ["get"]
list_restrict = "pk=self.request.user.account.pk"
class BarcodeCreateView(LoginRequiredMixin, TermsMixin, JSONAPICreateView):
model = Barcode
http_method_names = ["get", "post"]
fields = ["id", "product"]
def get_success_url(self):
return reverse("api:barcode-detail", kwargs={"pk": self.object.pk})
class BarcodeListView(LoginRequiredMixin, TermsMixin, JSONAPIListView):
model = Barcode
http_method_names = ["get", "post"]
class BarcodeDeleteView(LoginRequiredMixin, TermsMixin, JSONAPIDeleteView):
model = Barcode
http_method_names = ["get", "post", "delete"]
def get_success_url(self):
return reverse("api:barcode-list")
class BarcodeDetailView(LoginRequiredMixin, TermsMixin, JSONAPIDetailView):
model = Barcode
http_method_names = ["get", "delete"]
delete_view = BarcodeDeleteView
class GroupDetailView(LoginRequiredMixin, TermsMixin, JSONAPIDetailView):
model = Group
http_method_names = ["get"]
class GroupListView(LoginRequiredMixin, TermsMixin, JSONAPIListView):
model = Group
http_method_names = ["get"]
class ProductDetailView(LoginRequiredMixin, TermsMixin, JSONAPIDetailView):
model = Product
http_method_names = ["get"]
class ProductListView(LoginRequiredMixin, TermsMixin, JSONAPIListView):
model = Product
http_method_names = ["get"]
class TransactionCreateView(LoginRequiredMixin, TermsMixin, JSONAPICreateView):
model = Transaction
http_method_names = ["get", "post"]
fields = ["product", "quantity"]
def form_valid(self, form):
form.instance.account = self.request.user.account
return super().form_valid(form)
def get_success_url(self):
return reverse("api:transaction-detail", kwargs={"pk": self.object.pk})
class TransactionDeleteView(
LoginRequiredMixin, TermsMixin, ObjectPermissionMixin, JSONAPIDeleteView
):
model = Transaction
http_method_names = ["get", "post"]
object_permission = "obj.account.user"
def get_success_url(self):
return reverse("api:transaction-list")
class TransactionDetailView(
LoginRequiredMixin, TermsMixin, ObjectPermissionMixin, JSONAPIDetailView
):
model = Transaction
http_method_names = ["get", "delete"]
delete_view = TransactionDeleteView
object_permission = "obj.account.user"
@method_decorator(never_cache, name="dispatch")
class TransactionListView(
LoginRequiredMixin, TermsMixin, ListRestrictMixin, JSONAPIListView
):
model = Transaction
http_method_names = ["get", "post"]
create_view = TransactionCreateView
list_restrict = "account=self.request.user.account"
class UserDetailView(
LoginRequiredMixin, TermsMixin, ObjectPermissionMixin, JSONAPIDetailView
):
model = User
http_method_names = ["get"]
object_permission = "obj"
class UserListView(LoginRequiredMixin, TermsMixin, ListRestrictMixin, JSONAPIListView):
model = User
http_method_names = ["get"]
list_restrict = "pk=self.request.user.pk"
class UserRedirectView(LoginRequiredMixin, TermsMixin, RedirectView):
permanent = False
def get_redirect_url(self, *args, **kwargs):
return reverse("api:user-detail", kwargs={"pk": self.request.user.pk})
| 28.15544 | 111 | 0.731321 |
4a19ed5feccf9119e2bd944e2e98a344f8a5cc8f
| 7,314 |
py
|
Python
|
aim/ext/notebook/notebook.py
|
SGevorg/aim-deploy
|
45bc4321a2d0a4d1016f3ea7ee721f982225bd44
|
[
"Apache-2.0"
] | null | null | null |
aim/ext/notebook/notebook.py
|
SGevorg/aim-deploy
|
45bc4321a2d0a4d1016f3ea7ee721f982225bd44
|
[
"Apache-2.0"
] | null | null | null |
aim/ext/notebook/notebook.py
|
SGevorg/aim-deploy
|
45bc4321a2d0a4d1016f3ea7ee721f982225bd44
|
[
"Apache-2.0"
] | null | null | null |
import shlex
from aim.cli.configs import VERSION_NAME, UP_NAME
import aim.cli.manager.manager as manager
# Error message prefix for aim commands
ERROR_MSG_PREFIX = b'Error:'
# returned by get_execution_context
_COLAB_EXEC_CONTEXT = "_COLAB_EXEC_CONTEXT"
_IPYTHON_EXEC_CONTEXT = "_IPYTHON_EXEC_CONTEXT"
_OTHER_EXEC_CONTEXT = "_OTHER_EXEC_CONTEXT"
# current execution context
_CURRENT_CONTEXT = _OTHER_EXEC_CONTEXT
# environment specific constants
# useful for detecting the environment from the UI
_SAGE_MAKER_NOTEBOOK_PATH_POSTFIX = "/aim-sage"
_NOTEBOOK_PATH_POSTFIX = "/notebook"
def get_execution_context():
"""Determine the most specific context that we're in.
Returns:
_COLAB_EXEC_CONTEXT: If in Colab with an IPython notebook context.
_IPYTHON_EXEC_CONTEXT: If we are in an IPython notebook
context but not in colab (i.e. `jupyter notebook`)
line).
_OTHER_EXEC_CONTEXT: Otherwise (e.g., by running a Python script at the
command-line or using the `ipython` interactive shell).
"""
# In Colab, the `google.colab` module is available, but the shell
# returned by `IPython.get_ipython` does not have a `get_trait`
# method.
# imports are dynamic, since some modules are not available for all contexts
try:
import IPython
except ImportError:
pass
else:
ipython = IPython.get_ipython()
# @TODO find a stable way to get colab context
if ipython is not None and 'google.colab' in str(ipython):
# We are in Colab notebook context
# global _CURRENT_CONTEXT
# _CURRENT_CONTEXT = _COLAB_EXEC_CONTEXT
return _COLAB_EXEC_CONTEXT
# In an IPython command line shell or Jupyter notebook
elif ipython is not None and ipython.has_trait("kernel"):
# global _CURRENT_CONTEXT
# _CURRENT_CONTEXT = _IPYTHON_EXEC_CONTEXT
return _IPYTHON_EXEC_CONTEXT
# Otherwise, we're not in a known notebook context.
return _OTHER_EXEC_CONTEXT
def get_argument_options(line):
"""
Returns parsed argument options and command from magic cell as dict (command, options)
currently parse only --<name>=value style to dict
Set default values for the required fields, otherwise the provided fields
Will omit unsupported args @TODO notify about unsupported args
@TODO add process args all styles to dict
"""
# @TODO improve this logic
# --proxy-url is useful to print the right url, and set UI's url into iframe correctly
supported_args = ['--port', '--host', '--repo', '--proxy-url']
args = shlex.split(line)
command = args[0]
options = {
'--host': '127.0.0.1',
'--port': '43801',
'--base-path': _NOTEBOOK_PATH_POSTFIX
}
for arg in args[1:]:
key, value = arg.split('=', 1)
if key in supported_args:
options[key] = value
# if --proxy-url passed
if options.get('--proxy-url'):
options['--base-path'] = f'/proxy/absolute/{options["--port"]}{_SAGE_MAKER_NOTEBOOK_PATH_POSTFIX}'
return command, options
def display_colab(port, display):
"""Display Aim instance in a Colab output frame.
It need go through the proxy
"""
import IPython.display
shell = """
(async () => {{
const url = new URL('{path}/', await google.colab.kernel.proxyPort({port}, {{'cache': true}}));
const iframe = document.createElement('iframe');
iframe.src = url;
const a = document.createElement('a');
a.href = url;
a.innerHTML = 'Open in new browser tab';
a.setAttribute('target', '_blank');
iframe.setAttribute('width', '100%');
iframe.setAttribute('height', '800');
iframe.setAttribute('frameborder', 0);
document.body.appendChild(iframe);
document.body.appendChild(a);
}})();
""".format(path=_NOTEBOOK_PATH_POSTFIX, port=port)
script = IPython.display.Javascript(shell)
if display:
display.update(script)
else:
IPython.display.display(script)
def display_notebook(host, port, display, proxy_url=None):
"""Display Aim instance in an ipython context output frame.
"""
import IPython.display
url = "{}:{}{}".format(host, port, _NOTEBOOK_PATH_POSTFIX)
# @TODO add warning if proxy_url is not defined
if proxy_url:
# jupyter-server-proxy supports absolute paths by using it with /proxy/absolute/<port> path
url = "{}{}{}{}/".format(proxy_url, '/proxy/absolute/', port, _SAGE_MAKER_NOTEBOOK_PATH_POSTFIX)
print(url)
shell = """
<iframe id="aim" width="100%" height="800" frameborder="0" src={}>
</iframe>
""".format(url)
iframe = IPython.display.HTML(shell)
display.update(iframe)
def up(options, context):
"""
Calls to run `aim up` command width corresponding arguments
Handles the result of the command
Renders the <iframe> tag for the notebook and message for the shell users
The <iframe> renders width the corresponding way for different execution contexts (mainly for notebooks)
"""
try:
import IPython
import IPython.display
except ImportError:
IPython = None
display = None
if context == _OTHER_EXEC_CONTEXT:
print("Launching Aim ...")
else:
display = IPython.display.display(
IPython.display.Pretty("Launching Aim ..."),
display_id=True,
)
result = manager.run_process(UP_NAME, options)
if result.status == manager.ManagerActionStatuses.Failed:
print(result.info["message"])
return
port = result.info["port"]
host = result.info["host"]
# successful exec of aim up command
if context == _COLAB_EXEC_CONTEXT:
display_colab(port, display)
return
if context == _IPYTHON_EXEC_CONTEXT:
display_notebook(host, port, display, options.get("--proxy-url"))
return
# other context
print("Open {}:{}".format(host, port))
def version(options, context):
"""Handles aim version (get version process) and send to the ui"""
result = manager.run_process(VERSION_NAME, options)
if result.status is manager.ManagerActionStatuses.Failed:
print(result.info["message"])
else:
print("Aim v{}".format(result.info["version"]))
# Those are aim magic function available commands
# This is why we are not using constants from aim.cli.commands
# It is possible to add commands outside aim cli
handlers = {
UP_NAME: up,
VERSION_NAME: version
}
def execute_magic_aim(line):
""" `aim` line magic function
We are trying to keep similarity with the native aim cli commands as much as possible
"""
context = get_execution_context()
command, options = get_argument_options(line)
# check command existence
if command not in handlers:
print('Invalid operation.')
return
# call corresponding handler
handlers[command](options, context)
def load_ipython_extension(ipython):
ipython.register_magic_function(execute_magic_aim, magic_kind="line", magic_name="aim")
| 32.651786 | 112 | 0.654908 |
4a19ed92547c6b8f2272fae6749819cf8366e20a
| 2,834 |
py
|
Python
|
baselines/test.py
|
vykimo/twitter_best_date
|
557c3a1e084633760ceda11ae340a3d2871d7926
|
[
"Apache-2.0"
] | 1 |
2019-08-22T03:45:47.000Z
|
2019-08-22T03:45:47.000Z
|
baselines/test.py
|
vykimo/twitter_best_date
|
557c3a1e084633760ceda11ae340a3d2871d7926
|
[
"Apache-2.0"
] | null | null | null |
baselines/test.py
|
vykimo/twitter_best_date
|
557c3a1e084633760ceda11ae340a3d2871d7926
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
import argparse
from sklearn.metrics import mean_squared_error
from baselines import Baselines
import numpy as np
def parse_arguments():
parser = argparse.ArgumentParser(description='Test baselines (MSE).')
parser.add_argument('-f, --file', dest='file', type=open, help='file with tweets gathered')
parser.add_argument('-s, --skip', dest='skip', action='store_true', help='skip tweets with no hashtags')
return parser.parse_args()
def main(run_args):
print("# Tests")
if run_args.file:
# load dataset
datas = json.load(run_args.file)
if run_args.skip:
# Delete empty hashtags
print("Before cleaning tweets without hashtags : "+str(len(datas)))
datas = [row for row in datas if len(row['hashtag'])>0]
print("After cleaning tweets without hashtags : "+str(len(datas)))
# split datas
train_size = int(len(datas) * 0.66)
train, test = datas[1:train_size], datas[train_size:]
else:
train = [ {'user': 'user1', 'hashtag':['sport','nhl'], 'score': 10, 'score2': 100}, {'user': 'user1', 'hashtag':['sport','nba'], 'score': 20, 'score2': 200}, {'user': 'user2', 'hashtag':['sport','nhl'], 'score': 10, 'score2': 10}, {'user': 'user1', 'hashtag':['nba'], 'score': 30, 'score2': 300}]
test = [ {'user': 'user1', 'hashtag':['sport','nhl'], 'score': 10, 'score2': 100}, {'user': 'user3', 'hashtag':['sport','nhl'], 'score': 10, 'score2': 30}, {'user': 'user2', 'hashtag':['sport','nhl'], 'score': 10, 'score2': 10}, {'user': 'user3', 'hashtag':[], 'score': 10, 'score2': 30}, {'user': 'user1', 'hashtag':[], 'score': 10, 'score2': 100} ]
test_y = [[row['score'],row['score2']] for row in test]
# baselines
predictions_baseline = []
for i in range(0,4):
baselines = Baselines()
pred = baselines.int2function(i, train, test)
test_score = mean_squared_error(test_y, pred)
print('=Baseline '+str(i+1)+' = Test MSE: %.3f' % test_score)
predictions_baseline.append({'score':test_score, 'prediction':pred})
predictions1 = []
predictions2 = []
for i in range(0,len(predictions_baseline[0]['prediction'])):
lis = [predictions_baseline[0]['prediction'][i], predictions_baseline[1]['prediction'][i], predictions_baseline[2]['prediction'][i], predictions_baseline[3]['prediction'][i]]
predictions1.extend(np.matrix(lis).max(0).tolist())
predictions2.append(np.mean(lis, axis=0))
test_score1 = mean_squared_error(test_y, predictions1)
test_score2 = mean_squared_error(test_y, predictions2)
print('=Max Baseline = Test MSE: %.3f' % test_score1)
print('=Mean Baseline = Test MSE: %.3f' % test_score2)
predictions_baseline.append({'score':test_score1, 'prediction':predictions1})
predictions_baseline.append({'score':test_score2, 'prediction':predictions2})
if __name__ == "__main__":
args = parse_arguments()
main(args)
| 47.233333 | 352 | 0.670783 |
4a19f10b6527ad480f95975efed177238363b8df
| 3,490 |
py
|
Python
|
indico/core/oauth/util.py
|
javfg/indico
|
2634756ba1e9caf6dd8fc9afc3f47291fda5816d
|
[
"MIT"
] | null | null | null |
indico/core/oauth/util.py
|
javfg/indico
|
2634756ba1e9caf6dd8fc9afc3f47291fda5816d
|
[
"MIT"
] | null | null | null |
indico/core/oauth/util.py
|
javfg/indico
|
2634756ba1e9caf6dd8fc9afc3f47291fda5816d
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import hashlib
from uuid import UUID
from authlib.oauth2.rfc6749 import list_to_scope, scope_to_list
from sqlalchemy.dialects.postgresql.array import ARRAY
from sqlalchemy.orm import joinedload
from indico.core.db import db
from indico.core.oauth.logger import logger
from indico.core.oauth.models.applications import OAuthApplication, OAuthApplicationUserLink
from indico.core.oauth.models.personal_tokens import PersonalToken
from indico.core.oauth.models.tokens import OAuthToken
# The maximum number of tokens to keep for any given app/user and scope combination
MAX_TOKENS_PER_SCOPE = 50
# The prefix for OAuth tokens
TOKEN_PREFIX_OAUTH = 'indo_'
# The prefix for personal tokens
TOKEN_PREFIX_PERSONAL = 'indp_'
# The prefix for service tokens (not handled by this module)
TOKEN_PREFIX_SERVICE = 'inds_'
def query_token(token_string, allow_personal=False):
token_hash = hashlib.sha256(token_string.encode()).hexdigest()
if token_string.startswith(TOKEN_PREFIX_PERSONAL):
if not allow_personal:
return None
return (PersonalToken.query
.filter_by(access_token_hash=token_hash)
.first())
# XXX: oauth tokens may be from pre-3.0 and thus not use a token prefix, so we simply
# assume that any token without another known prefix is an oauth token
# we always need the app link (which already loads the application) and the user
# since we need those to check if the token is still valid
return (OAuthToken.query
.filter_by(access_token_hash=token_hash)
.options(joinedload('app_user_link').joinedload('user'))
.first())
def query_client(client_id):
try:
UUID(hex=client_id)
except ValueError:
return None
return OAuthApplication.query.filter_by(client_id=client_id, is_enabled=True).first()
def save_token(token_data, request):
requested_scopes = set(scope_to_list(token_data.get('scope', '')))
application = OAuthApplication.query.filter_by(client_id=request.client.client_id).one()
link = OAuthApplicationUserLink.query.with_parent(application).with_parent(request.user).first()
if link is None:
link = OAuthApplicationUserLink(application=application, user=request.user, scopes=requested_scopes)
else:
if not requested_scopes:
# for already-authorized apps not specifying a scope uses all scopes the
# user previously granted to the app
requested_scopes = set(link.scopes)
token_data['scope'] = list_to_scope(requested_scopes)
new_scopes = requested_scopes - set(link.scopes)
if new_scopes:
logger.info('New scopes for %r: %s', link, new_scopes)
link.update_scopes(new_scopes)
link.tokens.append(OAuthToken(access_token=token_data['access_token'], scopes=requested_scopes))
# get rid of old tokens if there are too many
q = (db.session.query(OAuthToken.id)
.with_parent(link)
.filter_by(_scopes=db.cast(sorted(requested_scopes), ARRAY(db.String)))
.order_by(OAuthToken.created_dt.desc())
.offset(MAX_TOKENS_PER_SCOPE)
.scalar_subquery())
OAuthToken.query.filter(OAuthToken.id.in_(q)).delete(synchronize_session='fetch')
| 39.213483 | 108 | 0.726648 |
4a19f18aa4f11320f26dadcb7e55da9bbab7576a
| 839 |
py
|
Python
|
workflows/diagnostics/tests/prognostic/test_generate_movie_stills.py
|
jacnugent/fv3net
|
84958651bdd17784fdab98f87ad0d65414c03368
|
[
"MIT"
] | 5 |
2021-03-20T22:42:40.000Z
|
2021-06-30T18:39:36.000Z
|
workflows/diagnostics/tests/prognostic/test_generate_movie_stills.py
|
jacnugent/fv3net
|
84958651bdd17784fdab98f87ad0d65414c03368
|
[
"MIT"
] | 154 |
2021-03-17T19:44:29.000Z
|
2021-09-15T23:02:12.000Z
|
workflows/diagnostics/tests/prognostic/test_generate_movie_stills.py
|
jacnugent/fv3net
|
84958651bdd17784fdab98f87ad0d65414c03368
|
[
"MIT"
] | 1 |
2021-06-16T22:04:24.000Z
|
2021-06-16T22:04:24.000Z
|
import numpy as np
import xarray as xr
from fv3net.diagnostics.prognostic_run.views.movies import _movie_specs, _non_zero
def test__movie_specs():
movie_specs = _movie_specs()
for name, spec in movie_specs.items():
func = spec["plotting_function"]
variables = spec["required_variables"]
assert callable(func)
assert isinstance(variables, list)
def test__non_zero():
da_zeros = xr.DataArray(np.zeros(5))
da_not_zeros = xr.DataArray(np.ones(5))
assert not _non_zero(xr.Dataset({"a": da_zeros}), ["a"])
assert _non_zero(xr.Dataset({"b": da_not_zeros}), ["b"])
assert not _non_zero(xr.Dataset({"a": da_zeros, "b": da_not_zeros}), ["a"])
assert not _non_zero(xr.Dataset({"a": da_zeros}), ["b"])
assert _non_zero(xr.Dataset({"a": da_zeros, "b": da_not_zeros}), ["a", "b"])
| 36.478261 | 82 | 0.671037 |
4a19f18bf53434cf3c1ce8923b78a031c7f1d898
| 4,818 |
py
|
Python
|
build/lib/detect_secrets/core/potential_secret.py
|
nsonaniya2010/detect-secrets
|
f599e94128add8995280a373566e9b0974724b36
|
[
"Apache-2.0"
] | 1 |
2021-03-15T15:12:42.000Z
|
2021-03-15T15:12:42.000Z
|
build/lib/detect_secrets/core/potential_secret.py
|
nsonaniya2010/detect-secrets
|
f599e94128add8995280a373566e9b0974724b36
|
[
"Apache-2.0"
] | null | null | null |
build/lib/detect_secrets/core/potential_secret.py
|
nsonaniya2010/detect-secrets
|
f599e94128add8995280a373566e9b0974724b36
|
[
"Apache-2.0"
] | null | null | null |
import hashlib
from typing import Any
from typing import Dict
from typing import Optional
from typing import Union
from ..util.color import AnsiColor
from ..util.color import colorize
class PotentialSecret:
"""This custom data type represents a string found, matching the
plugin rules defined in SecretsCollection, that has the potential
to be a secret that we actually care about.
"Potential" is the operative word here, because of the nature of
false positives.
We use this custom class so that we can more easily generate data
structures and do object-based comparisons with other PotentialSecrets,
without actually knowing what the secret is.
"""
def __init__(
self,
type: str,
filename: str,
secret: str,
line_number: int = 0,
is_secret: Optional[bool] = None,
is_verified: bool = False,
) -> None:
"""
:param type: human-readable secret type, defined by the plugin
that generated this PotentialSecret. e.g. "High Entropy String"
:param filename: name of file that this secret was found
:param secret: the actual secret identified
:param line_number: location of secret, within filename.
Merely used as a reference for easy triage.
:param is_secret: whether or not the secret is a true- or false- positive
:param is_verified: whether the secret has been externally verified
"""
self.type = type
self.filename = filename
self.line_number = line_number
self.set_secret(secret)
self.is_secret = is_secret
self.is_verified = is_verified
# If two PotentialSecrets have the same values for these fields,
# they are considered equal. Note that line numbers aren't included
# in this, because line numbers are subject to change.
self.fields_to_compare = ['filename', 'secret_hash', 'type', 'line_number'] # lineno_added
def set_secret(self, secret: str) -> None:
self.secret_hash: str = self.hash_secret(secret)
# Note: Originally, we never wanted to keep the secret value in memory,
# after finding it in the codebase. However, to support verifiable
# secrets (and avoid the pain of re-scanning again), we need to
# keep the plaintext in memory as such.
#
# This value should never appear in the baseline though, seeing that
# we don't want to create a file that contains all plaintext secrets
# in the repository.
self.secret_value: Optional[str] = secret
@staticmethod
def hash_secret(secret: str) -> str:
"""This offers a way to coherently test this class, without mocking self.secret_hash."""
return hashlib.sha1(secret.encode('utf-8')).hexdigest()
@classmethod
def load_secret_from_dict(cls, data: Dict[str, Union[str, int, bool]]) -> 'PotentialSecret':
"""Custom JSON decoder"""
kwargs: Dict[str, Any] = {
'type': str(data['type']),
'filename': str(data['filename']),
'secret': 'will be replaced',
}
# Optional parameters
for parameter in {
'line_number',
'is_secret',
'is_verified',
}:
if parameter in data:
kwargs[parameter] = data[parameter]
output = cls(**kwargs)
output.secret_value = None
output.secret_hash = str(data['hashed_secret'])
return output
def json(self) -> Dict[str, Union[str, int, bool]]:
"""Custom JSON encoder"""
attributes: Dict[str, Union[str, int, bool]] = {
'type': self.type,
'filename': self.filename,
'hashed_secret': self.secret_hash,
'is_verified': self.is_verified,
}
if self.line_number:
attributes['line_number'] = self.line_number
if self.is_secret is not None:
attributes['is_secret'] = self.is_secret
return attributes
def __eq__(self, other: Any) -> bool:
if not isinstance(other, PotentialSecret):
return NotImplemented
return all(
getattr(self, field) == getattr(other, field)
for field in self.fields_to_compare
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
return hash(
tuple(
getattr(self, x)
for x in self.fields_to_compare
),
)
def __str__(self) -> str:
return (
f'Secret Type: {colorize(self.type, AnsiColor.BOLD)}\n'
f'Location: {self.filename}:{self.line_number}\n'
)
| 34.661871 | 99 | 0.611249 |
4a19f228ce6a39b9091b5bd219f2685522cfd3fc
| 4,013 |
py
|
Python
|
ghostvlad/toolkits.py
|
lylyhan/Speaker-Diarization
|
8d4cd899c352c18f85c173315b2eaecd1dcb6cc6
|
[
"Apache-2.0"
] | null | null | null |
ghostvlad/toolkits.py
|
lylyhan/Speaker-Diarization
|
8d4cd899c352c18f85c173315b2eaecd1dcb6cc6
|
[
"Apache-2.0"
] | null | null | null |
ghostvlad/toolkits.py
|
lylyhan/Speaker-Diarization
|
8d4cd899c352c18f85c173315b2eaecd1dcb6cc6
|
[
"Apache-2.0"
] | 1 |
2022-01-20T12:56:45.000Z
|
2022-01-20T12:56:45.000Z
|
import os
import numpy as np
def initialize_GPU(args):
# Initialize GPUs
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
return session
def get_chunks(l, n):
# For item i in a range that is a length of l,
for i in range(0, len(l), n):
# Create an index range for l of n items:
yield l[i:i+n]
def debug_generator(generator):
import cv2
import pdb
G = generator.next()
for i,img in enumerate(G[0]):
path = '../sample/{}.jpg'.format(i)
img = np.asarray(img[:,:,::-1] + 128.0, dtype='uint8')
cv2.imwrite(path, img)
# set up multiprocessing
def set_mp(processes=8):
import multiprocessing as mp
def init_worker():
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
global pool
try:
pool.terminate()
except:
pass
if processes:
pool = mp.Pool(processes=processes, initializer=init_worker)
else:
pool = None
return pool
# vggface2 dataset
def get_vggface2_imglist(args):
def get_datalist(s):
file = open('{}'.format(s), 'r')
datalist = file.readlines()
imglist = []
labellist = []
for i in datalist:
linesplit = i.split(' ')
imglist.append(linesplit[0])
labellist.append(int(linesplit[1][:-1]))
return imglist, labellist
print('==> calculating image lists...')
# Prepare training data.
imgs_list_trn, lbs_list_trn = get_datalist(args.trn_meta)
imgs_list_trn = [os.path.join(args.data_path, i) for i in imgs_list_trn]
imgs_list_trn = np.array(imgs_list_trn)
lbs_list_trn = np.array(lbs_list_trn)
# Prepare validation data.
imgs_list_val, lbs_list_val = get_datalist(args.val_meta)
imgs_list_val = [os.path.join(args.data_path, i) for i in imgs_list_val]
imgs_list_val = np.array(imgs_list_val)
lbs_list_val = np.array(lbs_list_val)
return imgs_list_trn, lbs_list_trn, imgs_list_val, lbs_list_val
def get_imagenet_imglist(args, trn_meta_path='', val_meta_path=''):
with open(trn_meta_path) as f:
strings = f.readlines()
trn_list = np.array([os.path.join(args.data_path, '/'.join(string.split()[0].split(os.sep)[-4:]))
for string in strings])
trn_lb = np.array([int(string.split()[1]) for string in strings])
f.close()
with open(val_meta_path) as f:
strings = f.readlines()
val_list = np.array([os.path.join(args.data_path, '/'.join(string.split()[0].split(os.sep)[-4:]))
for string in strings])
val_lb = np.array([int(string.split()[1]) for string in strings])
f.close()
return trn_list, trn_lb, val_list, val_lb
def get_voxceleb2_datalist(args, path):
with open(path) as f:
strings = f.readlines()
audiolist = np.array([os.path.join(args.data_path, string.split()[0]) for string in strings])
labellist = np.array([int(string.split()[1]) for string in strings])
f.close()
return audiolist, labellist
def calculate_eer(y, y_score):
# y denotes groundtruth scores,
# y_score denotes the prediction scores.
from scipy.optimize import brentq
from sklearn.metrics import roc_curve
from scipy.interpolate import interp1d
fpr, tpr, thresholds = roc_curve(y, y_score, pos_label=1)
eer = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
thresh = interp1d(fpr, thresholds)(eer)
return eer, thresh
def sync_model(src_model, tgt_model):
print('==> synchronizing the model weights.')
params = {}
for l in src_model.layers:
params['{}'.format(l.name)] = l.get_weights()
for l in tgt_model.layers:
if len(l.get_weights()) > 0:
l.set_weights(params['{}'.format(l.name)])
return tgt_model
| 31.108527 | 105 | 0.634936 |
4a19f2439be9a45a54e9cab1b50298271edfab60
| 6,057 |
py
|
Python
|
pkg/utils/annotation/annotation.py
|
ispeakc0de/litmus-python
|
9e3e49002f25598cecd7828aad604b14a6d5ad40
|
[
"Apache-2.0"
] | null | null | null |
pkg/utils/annotation/annotation.py
|
ispeakc0de/litmus-python
|
9e3e49002f25598cecd7828aad604b14a6d5ad40
|
[
"Apache-2.0"
] | null | null | null |
pkg/utils/annotation/annotation.py
|
ispeakc0de/litmus-python
|
9e3e49002f25598cecd7828aad604b14a6d5ad40
|
[
"Apache-2.0"
] | null | null | null |
import logging
# getDeploymentName derive the deployment name belongs to the given target pod
# it extract the parent name from the owner references
def getDeploymentName(targetPod,chaosDetails, clients):
rsOwnerRef = targetPod.metadata.owner_references
for own in rsOwnerRef :
if own.kind == "ReplicaSet" :
try:
rs = clients.clientApps.read_namespaced_replica_set(own.name, chaosDetails.AppDetail.Namespace)
except Exception as exp:
return "", exp
ownerRef = rs.metadata.owner_references
for own in ownerRef:
if own.kind == "Deployment":
return own.name, None
return "", ValueError("no deployment found for {} pod".format(targetPod.Name))
# getStatefulsetName derive the statefulset name belongs to the given target pod
# it extract the parent name from the owner references
def getStatefulsetName(targetPod,chaosDetails, clients):
ownerRef = targetPod.metadata.owner_references
for own in ownerRef:
if own.kind == "StatefulSet":
return own.name, None
return "", ValueError("no statefulset found for {} pod".format(targetPod.Name))
# getDaemonsetName derive the daemonset name belongs to the given target pod
# it extract the parent name from the owner references
def getDaemonsetName(targetPod, chaosDetails, clients):
ownerRef = targetPod.metadata.owner_references
for own in ownerRef:
if own.kind == "DaemonSet":
return own.name, None
return "", ValueError("no daemonset found for {} pod".format(targetPod.Name))
# getDeploymentConfigName derive the deploymentConfig name belongs to the given target pod
# it extract the parent name from the owner references
def getDeploymentConfigName(targetPod, chaosDetails, clients):
rcOwnerRef = targetPod.metadata.owner_references
for own in range(rcOwnerRef):
if own.kind == "ReplicationController":
try:
rc = clients.clientCoreV1.read_namespaced_replication_controller(own.name, chaosDetails.AppDetail.Namespace)
except Exception as exp:
return "", exp
ownerRef = rc.metadata.owner_references
for own in ownerRef:
if own.kind == "DeploymentConfig":
return own.name, None
return "", ValueError("No deploymentConfig found for {} pod".format(targetPod.Name))
# getDeploymentConfigName derive the rollout name belongs to the given target pod
# it extract the parent name from the owner references
def getRolloutName(targetPod, chaosDetails, clients):
rsOwnerRef = targetPod.metadata.owner_references
for own in rsOwnerRef :
if own.kind == "ReplicaSet":
try:
rs = clients.clientsAppsV1.read_namespaced_replica_set(own.name, chaosDetails.AppDetail.Namespace)
except Exception as exp:
return "", exp
ownerRef = rs.metadata.owner_references
for own in ownerRef:
if own.kind == "Rollout":
return own.name, None
return "", ValueError("no rollout found for {} pod".format(targetPod.Name))
# GetParentName derive the parent name of the given target pod
def GetParentName(clients, targetPod, chaosDetails):
kind = chaosDetails.AppDetail.Kind
if kind == "deployment" or kind == "deployments":
return getDeploymentName(targetPod,chaosDetails, clients)
elif kind == "statefulset" or kind == "statefulsets":
return getStatefulsetName(targetPod,chaosDetails, clients)
elif kind == "daemonset" or kind == "daemonsets":
return getDaemonsetName(targetPod,chaosDetails, clients)
elif kind == "deploymentConfig" or kind == "deploymentConfigs":
return getDeploymentConfigName(targetPod,chaosDetails, clients)
elif kind == "rollout" or kind == "rollouts":
return getRolloutName(targetPod,chaosDetails, clients)
else:
return False, ValueError("Appkind: {} is not supported".format(kind))
# IsParentAnnotated check whether the target pod's parent is annotated or not
def IsParentAnnotated(clients, parentName, chaosDetails):
if chaosDetails.AppDetail.Kind.lower() == "deployment" or chaosDetails.AppDetail.Kind.lower() == "deployments":
try:
deploy = clients.clientApps.read_namespaced_deployment(name=parentName, namespace=chaosDetails.AppDetail.Namespace)
except Exception as exp:
return False, exp
if deploy.metadata.annotations.get(chaosDetails.AppDetail.AnnotationKey) == chaosDetails.AppDetail.AnnotationValue:
return True, None
elif chaosDetails.AppDetail.Kind.lower() =="statefulset" or chaosDetails.AppDetail.Kind.lower() == "statefulsets":
try:
sts = clients.clientApps.read_namespaced_stateful_set(name=parentName, namespace=chaosDetails.AppDetail.Namespace)
except Exception as exp:
return False, exp
if sts.metadata.annotations.get(chaosDetails.AppDetail.AnnotationKey) == chaosDetails.AppDetail.AnnotationValue:
return True, None
elif chaosDetails.AppDetail.Kind.lower() =="daemonset" or chaosDetails.AppDetail.Kind.lower() == "daemonsets":
try:
ds = clients.clientApps.read_namespaced_daemon_set(name=parentName, namespace=chaosDetails.AppDetail.Namespace)
except Exception as exp:
return False, exp
if ds.metadata.annotations.get(chaosDetails.AppDetail.AnnotationKey) == chaosDetails.AppDetail.AnnotationValue:
return True, None
elif chaosDetails.AppDetail.Kind.lower() == "deploymentconfig":
try:
dc = clients.clientDyn.resources.get(api_version="v1", kind="DeploymentConfig", group="apps.openshift.io").get(namespace=chaosDetails.AppDetail.Namespace, name=parentName)
except Exception as exp:
return False, exp
if dc.metadata.annotations.get(chaosDetails.AppDetail.AnnotationKey) == chaosDetails.AppDetail.AnnotationValue:
return True, None
elif chaosDetails.AppDetail.Kind.lower() == "rollout":
try:
ro = clients.clientDyn.resources.get(api_version="v1alpha1", kind="Rollout", group="argoproj.io").get(namespace=chaosDetails.AppDetail.Namespace, name=parentName)
except Exception as exp:
return "", exp
if ro.metadata.annotations.get(chaosDetails.AppDetail.AnnotationKey) == chaosDetails.AppDetail.AnnotationValue:
return True, None
else:
return False, ValueError("{} appkind is not supported".format(chaosDetails.AppDetail.Kind))
return False, None
| 40.925676 | 174 | 0.768697 |
4a19f2f37f5f3faee70274b14593e80475d15b02
| 2,481 |
py
|
Python
|
portfolio/migrations/0015_auto_20170519_1722.py
|
Arlefreak/api.afk
|
7e968f6dc3b593ad8f20a4962cb7e3c91bf3ab79
|
[
"MIT"
] | null | null | null |
portfolio/migrations/0015_auto_20170519_1722.py
|
Arlefreak/api.afk
|
7e968f6dc3b593ad8f20a4962cb7e3c91bf3ab79
|
[
"MIT"
] | 5 |
2016-01-21T20:21:42.000Z
|
2016-01-22T23:00:33.000Z
|
portfolio/migrations/0015_auto_20170519_1722.py
|
Arlefreak/ApiArlefreak
|
7e968f6dc3b593ad8f20a4962cb7e3c91bf3ab79
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-05-19 17:22
from __future__ import unicode_literals
import adminsortable.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0014_auto_20170102_0508'),
]
operations = [
migrations.AlterField(
model_name='image',
name='order',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
),
migrations.AlterField(
model_name='image',
name='project',
field=adminsortable.fields.SortableForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolio.Project'),
),
migrations.AlterField(
model_name='link',
name='category',
field=adminsortable.fields.SortableForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolio.LinkCategory'),
),
migrations.AlterField(
model_name='link',
name='order',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
),
migrations.AlterField(
model_name='link',
name='project',
field=adminsortable.fields.SortableForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolio.Project'),
),
migrations.AlterField(
model_name='project',
name='category',
field=adminsortable.fields.SortableForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolio.ProjectCategory'),
),
migrations.AlterField(
model_name='project',
name='order',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
),
migrations.AlterField(
model_name='projectcategory',
name='order',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
),
migrations.AlterField(
model_name='video',
name='order',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
),
migrations.AlterField(
model_name='video',
name='project',
field=adminsortable.fields.SortableForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolio.Project'),
),
]
| 36.485294 | 135 | 0.620717 |
4a19f32d4360d9b3045304f35302bf7d5890da0b
| 8,312 |
py
|
Python
|
server/tests/comparison/leopard/impala_docker_env.py
|
drankye/recordservice
|
ced33a1565b7ab3a25f6cb7cdcf623a26e7b3ec0
|
[
"Apache-2.0"
] | null | null | null |
server/tests/comparison/leopard/impala_docker_env.py
|
drankye/recordservice
|
ced33a1565b7ab3a25f6cb7cdcf623a26e7b3ec0
|
[
"Apache-2.0"
] | null | null | null |
server/tests/comparison/leopard/impala_docker_env.py
|
drankye/recordservice
|
ced33a1565b7ab3a25f6cb7cdcf623a26e7b3ec0
|
[
"Apache-2.0"
] | 2 |
2019-09-22T07:59:28.000Z
|
2021-02-25T21:56:07.000Z
|
# Copyright (c) 2015 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module generates a docker environment for a job'''
from __future__ import division
from fabric.api import sudo, run, settings
from logging import getLogger
from os.path import join as join_path
from time import sleep
import random
import os
IMPALA_HOME = '/home/dev/Impala'
CORE_PATH = '/tmp/core_files'
DEFAULT_BRANCH_NAME = 'origin/cdh5-trunk'
DEFAULT_DOCKER_IMAGE_NAME = 'impala-desktop.ca.cloudera.com:5000/ubuntu-14.04:cdh5-trunk'
DOCKER_USER_NAME = 'dev'
NUM_START_ATTEMPTS = 50
NUM_FABRIC_ATTEMPTS = 50
LOG = getLogger('ImpalaDockerEnv')
def retry(func):
'''Retry decorator.'''
def wrapper(*args, **kwargs):
attempt_num = 0
while True:
attempt_num += 1
try:
return func(*args, **kwargs)
except:
LOG.exception('{0} exception [{1}] (try: {2})'.format(
func.__name__, args[0], attempt_num))
if attempt_num == NUM_FABRIC_ATTEMPTS:
raise
sleep_time = random.randint(1, attempt_num)
sleep(sleep_time)
return wrapper
class ImpalaDockerEnv(object):
'''Represents an Impala environemnt inside a Docker container. Used for starting
Impala, getting stack traces after a crash and keeping track of the ports on which SSH,
Postgres and Impala are running.
'''
def __init__(self, git_command):
self.ssh_port = None
self.impala_port = None
self.postgres_port = None
self.container_id = None
self.git_command = git_command
self.host = os.environ['TARGET_HOST']
self.host_username = os.environ['TARGET_HOST_USERNAME']
self.docker_image_name = os.environ.get(
'DOCKER_IMAGE_NAME', DEFAULT_DOCKER_IMAGE_NAME)
def stop_docker(self):
with settings(warn_only = True, host_string = self.host, user = self.host_username):
retry(sudo)('docker stop {0}'.format(self.container_id), pty=True)
retry(sudo)('docker rm {0}'.format(self.container_id), pty=True)
def start_new_container(self):
'''Starts a container with port forwarding for ssh, impala and postgres. '''
for _ in range(NUM_START_ATTEMPTS):
with settings(warn_only = True, host_string = self.host, user = self.host_username):
port = random.randint(0, 999)
self.ssh_port = 55000 + port
self.impala_port = 56000 + port
self.postgres_port = 57000 + port
start_command = (
'docker pull {docker_image_name} '
'&& docker run -d -t -p {postgres_port}:5432 -p {ssh_port}:22 '
'-p {impala_port}:21050 {docker_image_name} /bin/docker-boot-daemon').format(
ssh_port = self.ssh_port,
impala_port = self.impala_port,
postgres_port = self.postgres_port,
docker_image_name = self.docker_image_name)
try:
self.container_id = sudo(start_command, pty=True)
except:
LOG.exception('start_new_container')
if self.container_id is not None:
break
else:
LOG.error('Container failed to start after {0} attempts'.format(NUM_START_ATTEMPTS))
def get_git_hash(self):
'''Returns Git hash if the current commit. '''
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
git_hash = retry(run)('cd {IMPALA_HOME} && git rev-parse --short HEAD'.format(
IMPALA_HOME = IMPALA_HOME))
return git_hash
def run_all(self):
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
run_all_command = ('source {IMPALA_HOME}/bin/impala-config.sh '
'&& {IMPALA_HOME}/bin/create-test-configuration.sh '
'&& {IMPALA_HOME}/testdata/bin/run-all.sh').format(
IMPALA_HOME = IMPALA_HOME)
retry(run)(run_all_command, pty=False)
def build_impala(self):
'''Fetches and Builds Impala. If git_command is not present the latest version is
fetched by default. '''
if self.git_command:
build_command = (
'mkdir -p {CORE_PATH} && chmod 777 {CORE_PATH} '
'docker-boot && cd {IMPALA_HOME} && {git_command} '
'&& source {IMPALA_HOME}/bin/impala-config.sh '
'&& {IMPALA_HOME}/buildall.sh -notests').format(
git_command = self.git_command,
IMPALA_HOME = IMPALA_HOME,
CORE_PATH = CORE_PATH)
else:
build_command = (
'mkdir -p {CORE_PATH} && chmod 777 {CORE_PATH} '
'&& docker-boot && cd {IMPALA_HOME} '
'&& source {IMPALA_HOME}/bin/impala-config.sh').format(
IMPALA_HOME = IMPALA_HOME,
CORE_PATH = CORE_PATH)
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
result = retry(run)(build_command, pty=False)
return result
def start_impala(self):
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
start_command = ('source {IMPALA_HOME}/bin/impala-config.sh '
'&& {IMPALA_HOME}/bin/start-impala-cluster.py').format(IMPALA_HOME = IMPALA_HOME)
result = retry(run)(start_command, pty=False)
return result
def is_impala_running(self):
'''Check that exactly 3 impalads are running inside the docker instance.'''
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
return retry(run)('ps aux | grep impalad').count('/service/impalad') == 3
def get_stack(self):
'''Finds the newest core file and extracts the stack trace from it using gdb.
'''
IMPALAD_PATH = '{IMPALA_HOME}/be/build/debug/service/impalad'.format(
IMPALA_HOME = IMPALA_HOME)
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
core_file_name = retry(run)('ls {0} -t1 | head -1'.format(CORE_PATH))
LOG.info('Core File Name: {0}'.format(core_file_name))
if 'core' not in core_file_name:
return None
core_full_path = join_path(CORE_PATH, core_file_name)
stack_trace = retry(run)('gdb {0} {1} --batch --quiet --eval-command=bt'.format(
IMPALAD_PATH, core_full_path))
self.delete_core_files()
return stack_trace
def delete_core_files(self):
'''Delete all core files. This is usually done after the stack was extracted.'''
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
retry(run)('rm -f {0}/core.*'.format(CORE_PATH))
def prepare(self):
'''Create a new Impala Environment. Starts a docker container and builds Impala in it.
'''
self.start_new_container()
LOG.info('Container Started')
# Wait for the SSH service to start inside the docker instance. Usually takes 1
# second. This is simple and reliable. An alternative implementation is to poll with
# timeout if SSH was started.
sleep(10)
result = self.build_impala()
LOG.info('Build Complete, Result: {0}'.format(result))
try:
result = self.run_all()
except Exception:
LOG.info('run_all exception')
LOG.info('Run All Complete, Result: {0}'.format(result))
| 39.393365 | 90 | 0.659408 |
4a19f4be3ffae27c69158bf574ac28d7c347091c
| 26,515 |
py
|
Python
|
Data_Clean_Functions.py
|
Cian-Byrne/ML_Yacht_Performance
|
97444b768a5d4335de49ab21890b13412802548e
|
[
"MIT"
] | null | null | null |
Data_Clean_Functions.py
|
Cian-Byrne/ML_Yacht_Performance
|
97444b768a5d4335de49ab21890b13412802548e
|
[
"MIT"
] | null | null | null |
Data_Clean_Functions.py
|
Cian-Byrne/ML_Yacht_Performance
|
97444b768a5d4335de49ab21890b13412802548e
|
[
"MIT"
] | null | null | null |
"""
Funtions to extract relevant data from a dataframe.
"""
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
from scipy import signal
import xlrd as xl
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
from datetime import datetime
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import max_error
from sklearn.metrics import r2_score
class Funcs():
def __init__(self):
return
def Time_info(self, df):
max(df.Utc)
min(df.Utc)
print("Start Time: %s" % (min(df.Utc)))
print("End Time: %s" % (max(df.Utc)))
print("Length of time elapsed: %.2f hours" % (len(df.Utc.unique())/60**2))
def df_extract(self, start, stop, df): # returns a df with time positions betweeen start and stop from original df
df = df.set_index(df['Utc'])
df = df.between_time(start, stop)
df = df.rename_axis(None)
df = df.reset_index()
df = df.drop('index', axis=1)
return df
def unfiltered_plot(self, variable, df): # Plots the unfiltered variable against time
df = df[np.isfinite(df[variable])]
y = df[variable]
return plt.plot(df['Utc'], y)
def filtered_plot(self, variable, df, N, Wn): # Plots filtered variable data against time
df = df[np.isfinite(df[variable])]
y = df[variable]
c, a = signal.butter(N, Wn)
y = signal.filtfilt(c, a, y)
return plt.plot(df['Utc'], y)
def Var_filter_compare(self, variable, N, Wn, df): # Plots both filtered and unfiltered data on the same plot
plt.figure(figsize=(14, 8))
self.unfiltered(variable, df)
self.filtered(variable, df, N, Wn)
plt.ylabel(variable)
plt.xlabel('Time')
plt.show()
def Segment_points(self, df): # Returns the Lat Lon positions when a manoeuver occurs.
df = df[np.isfinite(df['Hdg'])]
df = df.reset_index().drop('index', axis=1)
i = 30
Lat_arr = []
Lon_arr = []
Time_arr = []
while i < len(df):
angle = min(abs(df.at[i, 'Hdg'] - df.at[i-30, 'Hdg']),
360 - abs(df.at[i, 'Hdg'] - df.at[i-30, 'Hdg']))
if angle > 50:
Lon_arr = np.append(Lon_arr, df.at[i, 'Lon'])
Lat_arr = np.append(Lat_arr, df.at[i, 'Lat'])
Time_arr = np.append(Time_arr, df.at[i, 'Utc'])
i += 40
i += 1
return Lon_arr, Lat_arr, Time_arr
def Segment_times(self, df): # Returns the times when a manoeuver occurs
T_arr = []
i = 1
while i < len(df):
current_sign = np.sign(df.loc[i, "Twa"])
previous_sign = np.sign(df.loc[i - 1, "Twa"])
if current_sign != previous_sign:
T_arr = np.append(T_arr, df.loc[i, 'Utc'])
i += 40 # Prevents some noise mid manoeb=ver registering as two separate manoevers
i += 1
return T_arr
def time_to_str(self, time): # Converts time to string format to be used in Segment_df()
t = str(time.hour)+':'+str(time.minute)+':'+str(time.second)
return t
def Segment_plot(self, df): # Plots each individual segment from a dataframe
segment_df = self.Segment_df(df)
i = 1
while i <= len(segment_df['Segment'].unique()):
plt.scatter(segment_df[segment_df['Segment'] == i].Lon, segment_df[segment_df['Segment'] == i].Lat, alpha=0.7, s=4)
i += 1
def Segment_df(self, df): # Returns a df labeled to include each segment
# time_arr = self.Segment_times(df)
lonarr, latarr, time_arr = self.Segment_points(df)
begin = self.time_to_str(df.at[0, 'Utc'])
end = self.time_to_str(time_arr[0])
Segment_df = self.df_extract(begin, end, df)
Segment_df.insert(0, "Segment", 1)
i = 1
while i < len(time_arr):
begin = self.time_to_str(time_arr[i-1])
end = self.time_to_str(time_arr[i])
df_temp = self.df_extract(begin, end, df)
df_temp.insert(0, "Segment", i+1)
Segment_df = pd.concat([Segment_df, df_temp])
i += 1
return Segment_df
# Label the tacks and gybe contained in a
def manoeuver_label(self, df, man_len=60):
# Get the times that a manoever takes place
Lon, Latt, times = self.Segment_points(df)
if "Pred" in df.columns:
my_label = "Pred"
elif "label" in df.columns:
my_label = "label"
for time in times:
index = df[df['Utc'] == time].index[0]
label = df.loc[index - man_len/2:index + man_len/2, my_label].value_counts().idxmax()
if label == 'UW':
df.loc[index-man_len/2:index+man_len/2, my_label] = 'Tack'
elif label == 'DW':
df.loc[index-man_len/2:index+man_len/2, my_label] = 'Gybe'
return df
def Segment_scatter(self, df): # Plots the positions of each manoeuver
my_lonarr, my_latarr, my_timearr = self.Segment_points(df)
plt.scatter(my_lonarr, my_latarr, alpha=0.8, s=45)
def Segment_Label(self, UW_arr, DW_arr, NR_arr, df): # Gives label to data in a df based on list of segments provided
Seg_df = self.Segment_df(df)
Seg_df.insert(0, 'label', '')
Seg_df = Seg_df.reset_index().drop('index', axis=1)
i = 0
while i < len(Seg_df):
for seg in UW_arr:
if Seg_df.at[i, "Segment"] == seg:
Seg_df.at[i, 'label'] = 'UW'
for seg in DW_arr:
if Seg_df.at[i, "Segment"] == seg:
Seg_df.at[i, "label"] = 'DW'
for seg in NR_arr:
if Seg_df.at[i, "Segment"] == seg:
Seg_df.at[i, "label"] = 'NR'
i += 1
return Seg_df
def Segment_time_label(self, df, Segment, TRANSITION, label_1, label_2):
seg_df = df[df['Segment'] == Segment]
START_index = seg_df.first_valid_index()
TRANSITION_index = seg_df.index[seg_df['Utc'] == TRANSITION].to_list()[0]
END_index = seg_df.last_valid_index()
i = START_index
while i < TRANSITION_index:
df.at[i, 'label'] = label_1
i += 1
while i <= END_index:
df.at[i, 'label'] = label_2
i += 1
return df
def ARRAY_check(self, UW_arr, DW_arr, NR_arr, Interval_arr, df_segment):
l1 = len(UW_arr)
l2 = len(DW_arr)
l3 = len(NR_arr)
l4 = len(Interval_arr)
len_tot = l1+l2+l3+l4
len_true = len(df_segment.Segment.unique())
if len_true == len_tot:
print("Correct Number of segments labeled")
else:
print("Number of Segments: %d" % (len_true))
print("Number of Segments Labeled: %d\n" % (len_tot))
lin = np.linspace(1, len(df_segment.Segment.unique()), len(df_segment.Segment.unique()))
my_arr = np.concatenate((UW_arr, DW_arr, NR_arr, lin, Interval_arr))
unique, Counts = np.unique(my_arr, return_counts=True)
i = 0
while i < len(Counts):
if Counts[i] != 2:
print("ERROR with value %d" % (i+1))
i += 1
def df_row_nan(self, df, drop_list): # Returns a trimmed df that has no NaN values in first row
i = 0
while i < len(df):
my_arr = np.isfinite(np.array(df.drop(drop_list, axis=1).loc[i]))
sum_count = 0
j = 0
while j < len(my_arr):
if my_arr[j] == True:
sum_count += 1
j += 1
if sum_count == len(df.columns)-1:
break
df = df.drop(i)
i += 1
df = df.reset_index().drop('index', axis=1) # Reindexes new df
return df
def df_fill_av(self, df, drop_list): # Fills nan values in df with the average of its neighbours values.
df = self.df_row_nan(df, drop_list) # Ensures first row has no NaN values
for var in df.columns.drop(drop_list):
i = 0
while i < len(df)-2:
if math.isnan(df.at[i, var]) == True and math.isnan(df.at[i+1, var]) == False:
df.at[i, var] = (df.at[i-1, var]+df.at[i+1, var])*0.5
elif math.isnan(df.at[i, var]) == True:
df.at[i, var] = (df.at[i-1, var]+df.at[i+2, var])*0.5
i += 1
# Returns averaged data excluding the last two rows which could not be averaged
return df.drop([len(df)-1, len(df)-2])
def gps_clean(self, df): # Removes erroneous gps recordings. NOTE: will not work if distance between points is actually large
df = df[df['Lat'] < df.Lat.mean()+0.5]
df = df[df['Lat'] > df.Lat.mean()-0.5]
df = df[df['Lon'] < df.Lon.mean()+0.5]
df = df[df['Lon'] > df.Lon.mean()-0.5]
return df
# Takes UW and Segment labeled df and returns which segments which have greater than 50% votes for UW.
def Segment_keep(self, df):
i = 1
keep_list = []
while i < len(df.Segment.unique()):
Len = len(df[df['Segment'] == i])
N_UW = len(df[df['Segment'] == i][df[df['Segment'] == i]['UW'] == 1])
if N_UW > 0.5*Len and len(df[df['Segment'] == i]) > 45:
keep_list = np.append(keep_list, i)
i += 1
return keep_list
def df_UW(self, df): # Returns a data frame with only the segments that we want to keepdd
keep = self.Segment_keep(df)
df_UW = df[df["Segment"] == keep[0]]
for seg in keep:
df_UW = pd.concat([df_UW, df[df["Segment"] == seg]])
return df_UW
def arrow_plot(self, df, BBox):
Lon_len = BBox[1]-BBox[0]
Lat_len = BBox[3]-BBox[2]
a1 = np.arctan((Lon_len)/(Lat_len))
a2 = a1+2*np.arctan((Lat_len)/(Lon_len))
a3 = a2+2*a1
a4 = a3+a2-a1
TWD = df.Twd.mean()*np.pi/180
x_centre = (BBox[1]+BBox[0])/2
y_centre = (BBox[3]+BBox[2])/2
length = 0.03*max(Lon_len, Lat_len)
if TWD <= a1:
x = x_centre+np.tan(TWD)*Lat_len/2
y = y_centre+Lat_len/2
dx = length*np.sin(TWD)
dy = length*np.cos(TWD)
plt.arrow(x, y, -dx, -dy, color='green')
elif TWD > a1 and TWD <= np.pi/2:
x = x_centre+Lon_len/2
y = y_centre+Lon_len/2*np.tan(np.pi/2-TWD)
dx = length*np.cos(np.pi/2-TWD)
dy = length*np.sin(np.pi/2-TWD)
plt.arrow(x, y, -dx, -dy, color='green')
elif TWD > np.pi/2 and TWD <= a2:
x = x_centre+Lon_len/2
y = y_centre-((Lon_len/2)/(np.tan(np.pi-TWD)))
dx = length*np.sin(np.pi-TWD)
dy = length*np.cos(np.pi-TWD)
plt.arrow(x, y, -dx, +dy, color='green')
elif TWD > a2 and TWD <= np.pi:
x = x_centre+Lat_len/2*np.tan(np.pi-TWD)
y = y_centre-Lat_len/2
dx = length*np.sin(np.pi-TWD)
dy = length*np.cos(np.pi-TWD)
plt.arrow(x, y, -dx, +dy, color='green')
elif TWD > np.pi and TWD <= a3:
x = x_centre-Lat_len/2*np.tan(TWD-np.pi)
y = y_centre-Lat_len/2
dx = length*np.sin(TWD-np.pi)
dy = length*np.cos(TWD-np.pi)
plt.arrow(x, y, +dx, +dy, color='green')
elif TWD > a3 and TWD <= 3/2*np.pi:
x = x_centre-Lon_len/2
y = y_centre-Lon_len/2*np.tan(3/2*np.pi-TWD)
dx = length*np.cos(3/2*np.pi-TWD)
dy = length*np.sin(3/2*np.pi-TWD)
plt.arrow(x, y, +dx, +dy, color='green')
elif TWD > 3/2*np.pi and TWD <= a4:
x = x_centre-Lon_len/2
y = y_centre+Lon_len/2*np.tan(TWD-np.pi*3/2)
dx = length*np.cos(TWD-3/2*np.pi)
dy = length*np.sin(TWD-3/2*np.pi)
plt.arrow(x, y, +dx, -dy, color='green')
elif TWD > a4:
x = x_centre-Lat_len/2*np.tan(2*np.pi-TWD)
y = y_centre+Lat_len/2
dx = length*np.sin(2*np.pi-TWD)
dy = length*np.cos(2*np.pi-TWD)
plt.arrow(x, y, +dx, -dy, color='green')
def Norm_Pos(self, window_df):
new_df = pd.DataFrame(columns=window_df.columns)
new_df['Lat_norm'] = 0
new_df['Lon_norm'] = 0
for window in window_df.window.unique():
# Create temporary df for each window to normalise position data
temp_df = window_df[window_df['window'] == window].reset_index().drop('index', axis=1)
# Find Twd for given window
TWD = temp_df.Twd.mean()
# Convert to radians
TWD = TWD*np.pi/180
# Find the max/min positions of Lat/Lon in the temporary df
x_max = temp_df.Lon.max()
x_min = temp_df.Lon.min()
y_max = temp_df.Lat.max()
y_min = temp_df.Lat.min()
i = 0
while i < len(temp_df):
# Scale the Lat/Lon data at each point wrt the given window.
Lon_norm = (temp_df.at[i, 'Lon']-x_min)/(x_max-x_min)
Lat_norm = (temp_df.at[i, 'Lat']-y_min)/(y_max-y_min)
# Translate the axis so that the y axis is pointing in the direction of TWD for given window.
x = Lon_norm*np.cos(-TWD) + Lat_norm*np.sin(-TWD)
y = -Lon_norm*np.sin(-TWD) + Lat_norm*np.cos(-TWD)
if np.isfinite(Lon_norm) == True and np.isfinite(Lat_norm) == True:
# Scale each Lon/Lat reading and save in seperate column for simplicity of comparisson
temp_df.at[i, 'Lon_norm'] = x
temp_df.at[i, 'Lat_norm'] = y
# Append the new df to include all new information, this is slow.
new_df = new_df.append(temp_df.loc[i, :])
else:
# If normalised values Nan then set values in this window to be 0,0
temp_df.at[i, 'Lon_norm'] = 0
temp_df.at[i, 'Lat_norm'] = 0
# Append the new df to include all new information, this is slow.
new_df = new_df.append(temp_df.loc[i, :])
i += 1
return new_df
# Function comparing labeled and predicted results
def compare(self, Labeled_df, Pred_df):
correct = 0
wrong = 0
total = 0
# Only compare the times that are present in both data frames to gve more accurate percentage.
l1 = list(Labeled_df.Utc)
l2 = list(Pred_df.Utc)
for time in list(set(l1).intersection(l2)):
# for time in Labeled_df.Utc.unique():
Pred_label = Pred_df[Pred_df['Utc'] == time].Pred.to_list()[0]
Corr_label = Labeled_df[Labeled_df['Utc'] == time].label.to_list()[0]
if Pred_label == Corr_label:
correct += 1
else:
wrong += 1
total += 1
correct_perc = correct/total*100
return correct_perc
def Window(self, df, window_len, overlap):
# Array of unique dates contained in df so that windowing does not go over 2 days.
df['date'] = df['Utc'].map(lambda x: x.strftime('%Y-%m-%d'))
date_arr = df['date'].unique()
# create the new dataframe which the windowed segments will be added to
window_df = pd.DataFrame(columns=df.columns)
window_df['window'] = 0
# Global window count
k = 1
for date in date_arr:
# create dataframe of just given date to work with
date_df = df[df['date'] == date].reset_index().drop('index', axis=1)
date_df['window'] = 0
len_data = len(date_df)
# Set the maximum number of complete time series with given inputs to be n
n = int((len_data-overlap)/(window_len-overlap))
j = 1 # window count
i = 0 # row count
while j <= n:
while i < window_len+(j-1)*(window_len-overlap):
date_df.at[i, 'window'] = k
window_df = window_df.append(date_df.loc[i, ])
i += 1
# Step back the overlap length for indices before continuing to next window
i -= overlap
# Increase the window count for this date
j += 1
# Increase the global window count
k += 1
return window_df
def Labeled_cluster_plot(self, df, UW_arr, DW_arr, NR_arr, Map_arr, Labeled_df_arr, red=100000):
# Array of unique dates contained in df so that windowing does not go over 2 days.
df['date'] = df['Utc'].map(lambda x: x.strftime('%Y-%m-%d'))
date_arr = df['date'].unique()
i = 0
while i < len(df.date.unique()):
Pred_df = df[df['date'] == df.date.unique()[i]]
Map = Map_arr[i]
Labeled_df = Labeled_df_arr[i]
# LHS: Individual cluster plot
BBox = (Pred_df.Lon.min(), Pred_df.Lon.max(), Pred_df.Lat.min(), Pred_df.Lat.max())
plt.figure(figsize=(14, 10))
plt.subplot(1, 2, 1)
plt.imshow(plt.imread(Map), extent=BBox, aspect='equal')
for pred in df.Pred.unique():
if pred == red:
plt.scatter(Pred_df[Pred_df['Pred'] == pred].Lon, Pred_df[Pred_df['Pred']
== pred].Lat, alpha=0.3, s=5, label='0', color='r')
elif pred in UW_arr:
plt.scatter(Pred_df[Pred_df['Pred'] == pred].Lon, Pred_df[Pred_df['Pred']
== pred].Lat, alpha=0.3, s=5, label='0', color='b')
elif pred in DW_arr:
plt.scatter(Pred_df[Pred_df['Pred'] == pred].Lon, Pred_df[Pred_df['Pred']
== pred].Lat, alpha=0.3, s=5, label='0', color='orange')
elif pred in NR_arr:
plt.scatter(Pred_df[Pred_df['Pred'] == pred].Lon, Pred_df[Pred_df['Pred']
== pred].Lat, alpha=0.3, s=5, label='0', color='g')
# plt.legend()
plt.subplot(1, 2, 2)
# RHS: Labeled UW data
BBox = (Labeled_df.Lon.min(), Labeled_df.Lon.max(),
Labeled_df.Lat.min(), Labeled_df.Lat.max())
plt.imshow(plt.imread(Map), extent=BBox, aspect='equal')
plt.scatter(Labeled_df[Labeled_df['label'] == 'UW'].Lon,
Labeled_df[Labeled_df['label'] == 'UW'].Lat, alpha=0.3, s=5, label='UW')
plt.scatter(Labeled_df[Labeled_df['label'] == 'DW'].Lon, Labeled_df[Labeled_df['label']
== 'DW'].Lat, alpha=0.3, s=5, label='DW', color='orange')
plt.scatter(Labeled_df[Labeled_df['label'] == 'NR'].Lon,
Labeled_df[Labeled_df['label'] == 'NR'].Lat, alpha=0.3, s=5, label='NR', color='g')
plt.legend()
plt.show()
i += 1
# Automate process of assigning labels to clusters, the parameters are subject to user guidance.
def auto_labels(self, df):
UW_arr = []
DW_arr = []
NR_arr = []
for pred in df.Pred.unique():
pred_df = df[df['Pred'] == pred]
if pred_df.Bsp.mean() > 6 and pred_df.abs_Leeway.mean() > 2.5: # and pred_df.abs_Heel.mean() > 10 :
UW_arr.append(pred)
# and pred_df.abs_Heel.mean() > 5.0 and pred_df.Bsp_Tws > 0.75 :
elif pred_df.Bsp.mean() > 8 and pred_df.abs_Twa.mean() > 110*np.pi/180 and pred_df.Bsp_Tws.mean() > 0.67 and pred_df.abs_Heel.mean() > 5:
DW_arr.append(pred)
else:
NR_arr.append(pred)
return UW_arr, DW_arr, NR_arr
# Add predicted cluster labels(y_vals) to a given dataframe which may be windowded.
def pred_label_df(self, df, y_vals):
if 'Pred' in df.columns:
df = df.drop('Pred', axis=1)
# Adding prediction column to df
df['Pred'] = 0
if 'window' in df.columns:
# length of window in df
length = len(df[df['window'] == 1])
else:
length = 1
# index count
i = 0
# label count
k = 0
while i < len(df):
df.iloc[i:i+length, -1] = y_vals[k]
i += length
k += 1
return df
def stats(self, df):
# Statistical desciption
desc_df = df.describe()
desc_df.loc["+3_std"] = desc_df.loc['mean'] + (desc_df.loc['std'] * 3)
desc_df.loc["-3_std"] = desc_df.loc['mean'] - (desc_df.loc['std'] * 3)
return desc_df
def Scale_df(self, df, features, scaling_type):
# Define each scaler
min_max_scaler = MinMaxScaler()
std_scaler = StandardScaler()
robust_scaler = RobustScaler()
if scaling_type == 'min_max':
for feature in features:
df[feature +
'_scaled'] = min_max_scaler.fit_transform(np.array(df[feature]).reshape(-1, 1))
elif scaling_type == 'standard':
for feature in features:
df[feature + '_scaled'] = std_scaler.fit_transform(np.array(df[feature]).reshape(-1, 1))
elif scaling_type == 'robust':
for feature in features:
df[feature +
'_scaled'] = robust_scaler.fit_transform(np.array(df[feature]).reshape(-1, 1))
else:
print("ERROR in scaling_type input")
return df
def Series_label_df(self, df):
# Give a series number to separate each consecutive time series for later use with RNN etc.
df['Series_num'] = 0
index_arr = df.index
start = index_arr[0]
i = 0
k = 1
while i < len(index_arr)-1:
if abs(index_arr[i] - index_arr[i+1]) > 1:
end = index_arr[i]
df.loc[start:end, 'Series_num'] = k
start = index_arr[i+1]
k += 1
i += 1
return df
def Labeled_df_plot(self, df, Map_arr, Labeled_df_arr):
# Array of unique dates contained in df so that windowing does not go over 2 days.
df['date'] = df['Utc'].map(lambda x: x.strftime('%Y-%m-%d'))
date_arr = df['date'].unique()
i = 0
while i < len(df.date.unique()):
Pred_df = df[df['date'] == df.date.unique()[i]]
Map = Map_arr[i]
Labeled_df = Labeled_df_arr[i]
# LHS: Individual cluster plot
BBox = (Pred_df.Lon.min(), Pred_df.Lon.max(), Pred_df.Lat.min(), Pred_df.Lat.max())
plt.figure(figsize=(14, 10))
plt.imshow(plt.imread(Map), extent=BBox, aspect='equal')
for label in df.label.unique():
if label == 'UW':
plt.scatter(Pred_df[Pred_df['label'] == label].Lon,
Pred_df[Pred_df['label'] == label].Lat, alpha=0.3, s=5, color='b')
elif label == 'DW':
plt.scatter(Pred_df[Pred_df['label'] == label].Lon,
Pred_df[Pred_df['label'] == label].Lat, alpha=0.3, s=5, color='orange')
elif label == 'NR':
plt.scatter(Pred_df[Pred_df['label'] == label].Lon,
Pred_df[Pred_df['label'] == label].Lat, alpha=0.3, s=5, color='g')
elif label == 'Tack':
plt.scatter(Pred_df[Pred_df['label'] == label].Lon,
Pred_df[Pred_df['label'] == label].Lat, alpha=0.3, s=5, color='r')
elif label == 'Gybe':
plt.scatter(Pred_df[Pred_df['label'] == label].Lon,
Pred_df[Pred_df['label'] == label].Lat, alpha=0.3, s=5, color='purple')
else:
plt.scatter(Pred_df[Pred_df['label'] == label].Lon,
Pred_df[Pred_df['label'] == label].Lat, alpha=0.3, s=5, color='black')
i += 1
def mean_absolute_percentage_error(self, y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def VPP_compare(self, df):
feat_list = ['Bsp', 'Heel', 'Leeway']
for feat in feat_list:
VPP_feat = 'VPP_' + feat
if feat == 'Heel' or feat == 'Leeway':
feat = 'abs_' + feat
RMSE = mean_squared_error(df[feat], df[VPP_feat], squared=False)
MAE = mean_absolute_error(df[feat], df[VPP_feat])
MAPE = self.mean_absolute_percentage_error(df[feat], df[VPP_feat])
r2 = r2_score(df[feat], df[VPP_feat])
explained_variance = explained_variance_score(df[feat], df[VPP_feat])
maximum_error = max_error(df[feat], df[VPP_feat])
print('-' * 100)
print("RMSE %s: %.3f" % (feat, RMSE))
print("MAE %s: %.3f" % (feat, MAE))
print("MAPE %s: %.3f" % (feat, MAPE))
print("R^2 %s: %.3f" % (feat, r2))
print("Explained variance %s: %.3f" % (feat, explained_variance))
print("Maximum Error %s: %.3f\n" % (feat, maximum_error))
def print_scores(self, y_test, y_pred):
RMSE = mean_squared_error(y_test, y_pred, squared=False)
MAE = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
maximum_error = max_error(y_test, y_pred)
print("RMSE: %.3f" % (RMSE))
print("MAE: %.3f" % (MAE))
print("R^2: %.3f" % (r2))
print("Maximum Error: %.3f\n" % (maximum_error))
| 33.185232 | 149 | 0.524948 |
4a19f4ee5563fb4eb092ca0a934ebba752c3ac6e
| 9,318 |
py
|
Python
|
aruco.py
|
PavelNajman/ArUcoDetection
|
e3873ae4ea4fe6e4e62209ae12c75c72848509b7
|
[
"MIT"
] | null | null | null |
aruco.py
|
PavelNajman/ArUcoDetection
|
e3873ae4ea4fe6e4e62209ae12c75c72848509b7
|
[
"MIT"
] | null | null | null |
aruco.py
|
PavelNajman/ArUcoDetection
|
e3873ae4ea4fe6e4e62209ae12c75c72848509b7
|
[
"MIT"
] | null | null | null |
import io
import os
import cv2
import sys
import time
import pickle
import argparse
import picamera
import threading
import numpy as np
ARUCO_DICT = {
"DICT_4X4_50": cv2.aruco.DICT_4X4_50,
"DICT_4X4_100": cv2.aruco.DICT_4X4_100,
"DICT_4X4_250": cv2.aruco.DICT_4X4_250,
"DICT_4X4_1000": cv2.aruco.DICT_4X4_1000,
"DICT_5X5_50": cv2.aruco.DICT_5X5_50,
"DICT_5X5_100": cv2.aruco.DICT_5X5_100,
"DICT_5X5_250": cv2.aruco.DICT_5X5_250,
"DICT_5X5_1000": cv2.aruco.DICT_5X5_1000,
"DICT_6X6_50": cv2.aruco.DICT_6X6_50,
"DICT_6X6_100": cv2.aruco.DICT_6X6_100,
"DICT_6X6_250": cv2.aruco.DICT_6X6_250,
"DICT_6X6_1000": cv2.aruco.DICT_6X6_1000,
"DICT_7X7_50": cv2.aruco.DICT_7X7_50,
"DICT_7X7_100": cv2.aruco.DICT_7X7_100,
"DICT_7X7_250": cv2.aruco.DICT_7X7_250,
"DICT_7X7_1000": cv2.aruco.DICT_7X7_1000,
"DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL,
}
def LoadCalibration(filename):
with open(filename, "rb") as f:
obj = pickle.load(f)
return obj["width"], obj["height"], obj["camera_matrix"], obj["distortion_coeffs"]
class Visualizer(threading.Thread):
def __init__(self, owner):
super(Visualizer, self).__init__()
self.owner = owner
self.timestamp = None
self.current_timestamp = None
self.image = None
self.terminated = False
self.start()
def run(self):
while not self.terminated:
if self.timestamp:
if self.current_timestamp:
if self.timestamp > self.current_timestamp:
cv2.imshow("Frame", self.image)
self.current_timestamp = self.timestamp
else:
self.current_timestamp = self.timestamp
if cv2.waitKey(33) == 27:
self.terminated = True
self.owner.done = True
class ImageProcessor(threading.Thread):
def __init__(self, owner, args):
super(ImageProcessor, self).__init__()
self.stream = io.BytesIO()
self.event = threading.Event()
self.terminated = False
self.owner = owner
self.marker_size = args.marker_size
self.dictionary = ARUCO_DICT[args.dictionary]
_, _, self.camera_matrix, self.distortion_coeffs = LoadCalibration(args.camera_calibration)
self.Reset()
self.start()
def run(self):
# This method runs in a separate thread
while not self.terminated:
# Wait for an image to be written to the stream
if self.event.wait(1):
try:
self.stream.seek(0)
self.image = cv2.imdecode(np.frombuffer(self.stream.getvalue(), dtype=np.uint8), cv2.IMREAD_COLOR)
aruco_dict = cv2.aruco.Dictionary_get(self.dictionary)
aruco_params = cv2.aruco.DetectorParameters_create()
self.corners, self.ids, rejected = cv2.aruco.detectMarkers(self.image, aruco_dict, parameters = aruco_params)
if len(self.corners) > 0:
self.rvecs, self.tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(self.corners, self.marker_size, self.camera_matrix, self.distortion_coeffs)
finally:
# Reset the stream and event
self.stream.seek(0)
self.stream.truncate()
self.event.clear()
# Return ourselves to the available pool
with self.owner.lock:
self.done = True
self.owner.pool.append(self)
def Reset(self):
self.done = False
self.ids = []
self.corners = []
self.rvecs = []
self.tvecs = []
self.image = None
self.timestamp = None
self.frame = 0
class ProcessOutput(object):
def __init__(self, args):
self.done = False
# Construct a pool of image processors along with a lock
# to control access between threads
self.lock = threading.Lock()
self.pool = [ImageProcessor(self, args) for i in range(args.num_threads)]
self.processor = None
self.frame = 0
self.args = args
if self.args.visualize:
self.visualizer = Visualizer(self)
def PrintResult(self, processor):
# assert(processor.done)
print(processor.timestamp, processor.frame, end=" ")
for rvec, tvec, _ in zip(processor.rvecs, processor.tvecs, processor.corners):
print(rvec, tvec, end="")
print(flush=True)
def ShowResult(self, processor):
# assert(processor.done)
self.visualizer.timestamp = processor.timestamp
self.visualizer.image = processor.image
def StoreResult(self, processor):
# assert(processor.done)
dirName = "aruco"
if not os.path.isdir(dirName):
os.mkdir(dirName)
cv2.imwrite("{}/{}_{}.jpg".format(dirName, processor.timestamp, processor.frame), processor.image)
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
# New frame; set the current processor going and grab
# a spare one
self.frame += 1
timestamp = str(round(time.time() * 1000))
if self.processor:
self.processor.event.set()
with self.lock:
if self.pool:
if self.pool[-1].done:
if (self.args.visualize or self.args.store) and self.pool[-1].corners:
for rvec, tvec, crns, id in zip(self.pool[-1].rvecs, self.pool[-1].tvecs, self.pool[-1].corners, self.pool[-1].ids):
cv2.aruco.drawAxis(self.pool[-1].image, self.pool[-1].camera_matrix, self.pool[-1].distortion_coeffs, rvec, tvec, 0.176)
cv2.putText(self.pool[-1].image, "id: {} x: {:.2f} y: {:.2f} z: {:.2f}".format(id, tvec[0][0], tvec[0][1], tvec[0][2]),
(int(crns[0][0][0]), int(crns[0][0][1])), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 0), 1, cv2.LINE_AA)
if self.args.print_results:
self.PrintResult(self.pool[-1])
if self.args.visualize:
self.ShowResult(self.pool[-1])
if self.args.store:
self.StoreResult(self.pool[-1])
self.pool[-1].Reset()
self.processor = self.pool.pop()
self.processor.frame = self.frame
self.processor.timestamp = timestamp
else:
# No processor's available, we'll have to skip
# this frame; you may want to print a warning
# here to see whether you hit this case
if self.args.print_results:
print(timestamp, self.frame, flush=True)
self.processor = None
if self.processor:
self.processor.stream.write(buf)
def Flush(self):
# When told to flush (this indicates end of recording), shut
# down in an orderly fashion. First, add the current processor
# back to the pool
print("Terminating ...")
if self.processor:
with self.lock:
self.pool.append(self.processor)
self.processor = None
if self.args.visualize:
self.visualizer.terminated = True
self.visualizer.join()
# Now, empty the pool, joining each thread as we go
while True:
proc = None
with self.lock:
try:
proc = self.pool.pop()
except IndexError:
pass # pool is empty
if not proc:
break
proc.terminated = True
proc.join()
def ParseCommandLineArguments():
parser = argparse.ArgumentParser(description='Detects ArUco markers and computes their pose.')
parser.add_argument('-n', '--num-threads', default=1, type=int)
parser.add_argument('-p', '--print-results', action='store_const', const=True, default=False)
parser.add_argument('-v', '--visualize', action='store_const', const=True, default=False)
parser.add_argument('-s', '--store', action='store_const', const=True, default=False)
parser.add_argument('-m', '--marker-size', required=True, type=float)
parser.add_argument('-d', '--dictionary', default="DICT_4X4_50", type=str)
parser.add_argument('-c', '--camera-calibration', default="camera_calibration.p", type=str)
return parser.parse_args(sys.argv[1:])
if __name__ == "__main__":
args = ParseCommandLineArguments()
width, height, _, _ = LoadCalibration(args.camera_calibration)
with picamera.PiCamera(resolution=(width, height), framerate=30) as camera:
time.sleep(2)
output = ProcessOutput(args)
camera.start_recording(output, format='mjpeg')
try:
while not output.done:
camera.wait_recording()
except KeyboardInterrupt:
pass
camera.stop_recording()
output.Flush()
| 40.337662 | 163 | 0.577914 |
4a19f5631639c62d7790c0df4f4b1afda265acf1
| 193 |
py
|
Python
|
ding/framework/__init__.py
|
song2181/DI-engine
|
0a39b43f6ad9ef9862878de12b50184f4396be42
|
[
"Apache-2.0"
] | 1 |
2022-03-21T16:15:39.000Z
|
2022-03-21T16:15:39.000Z
|
ding/framework/__init__.py
|
jiaruonan/DI-engine
|
268d77db3cb54401b2cfc83e2bc3ec87c31e7b83
|
[
"Apache-2.0"
] | null | null | null |
ding/framework/__init__.py
|
jiaruonan/DI-engine
|
268d77db3cb54401b2cfc83e2bc3ec87c31e7b83
|
[
"Apache-2.0"
] | null | null | null |
from .context import Context, OnlineRLContext, OfflineRLContext
from .task import Task, task
from .parallel import Parallel
from .event_loop import EventLoop
from .supervisor import Supervisor
| 32.166667 | 63 | 0.839378 |
4a19f629ab402f921e1f117354399920adf70711
| 2,243 |
py
|
Python
|
python3/koans/about_control_statements.py
|
BriTFromIT/python_koans
|
c9269148a55821dbb1541fdd57df8c7a971ee715
|
[
"MIT"
] | null | null | null |
python3/koans/about_control_statements.py
|
BriTFromIT/python_koans
|
c9269148a55821dbb1541fdd57df8c7a971ee715
|
[
"MIT"
] | null | null | null |
python3/koans/about_control_statements.py
|
BriTFromIT/python_koans
|
c9269148a55821dbb1541fdd57df8c7a971ee715
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutControlStatements(Koan):
def test_if_then_else_statements(self):
if True:
result = 'true value'
else:
result = 'false value'
self.assertEqual('true value', result)
def test_if_then_statements(self):
result = 'default value'
if True:
result = 'true value'
self.assertEqual('true value', result)
def test_if_then_elif_else_statements(self):
if False:
result = 'first value'
elif True:
result = 'true value'
else:
result = 'default value'
self.assertEqual('true value', result)
def test_while_statement(self):
i = 1
result = 1
while i <= 10:
result = result * i
i += 1
self.assertEqual(8, result)
def test_break_statement(self):
i = 1
result = 1
while True:
if i > 10: break
result = result * i
i += 1
self.assertEqual(876, result)
def test_continue_statement(self):
i = 0
result = []
while i < 10:
i += 1
if (i % 2) == 0: continue
result.append(i)
self.assertEqual(1, 3, 5, 7, 9, result)
def test_for_statement(self):
phrase = ["fish", "and", "chips"]
result = []
for item in phrase:
result.append(item.upper())
self.assertEqual(['FISH', 'AND', 'CHIPS'], result)
def test_for_statement_with_tuples(self):
round_table = [
("Lancelot", "Blue"),
("Galahad", "I don't know!"),
("Robin", "Blue! I mean Green!"),
("Arthur", "Is that an African Swallow or European Swallow?")
]
result = []
for knight, answer in round_table:
result.append("Contestant: '" + knight + "' Answer: '" + answer + "'")
text = "Contestant: 'Robin' Answer: 'Blue! I mean Green!'"
self.assertRegex(result[2], text)
self.assertNotRegex(result[0], text)
self.assertNotRegex(result[1], text)
self.assertNotRegex(result[3], text)
| 27.691358 | 84 | 0.525189 |
4a19f640605b548c5145313529b469e9eb099fda
| 39 |
py
|
Python
|
src/clusto/test/util/__init__.py
|
thekad/clusto
|
c141ea3ef4931c6a21fdf42845c6e9de5ee08caa
|
[
"BSD-3-Clause"
] | 216 |
2015-01-10T17:03:25.000Z
|
2022-03-24T07:23:41.000Z
|
src/clusto/test/util/__init__.py
|
thekad/clusto
|
c141ea3ef4931c6a21fdf42845c6e9de5ee08caa
|
[
"BSD-3-Clause"
] | 23 |
2015-01-08T16:51:22.000Z
|
2021-03-13T12:56:04.000Z
|
src/clusto/test/util/__init__.py
|
thekad/clusto
|
c141ea3ef4931c6a21fdf42845c6e9de5ee08caa
|
[
"BSD-3-Clause"
] | 49 |
2015-01-08T00:13:17.000Z
|
2021-09-22T02:01:20.000Z
|
from utiltests import TestUtil # noqa
| 19.5 | 38 | 0.794872 |
4a19f64514f39fad1c1fb2d6df373960fad811e0
| 4,881 |
py
|
Python
|
notebooks/dash-app/utils.py
|
piushvaish/instagram-growth-strategy
|
c0740094de376e4c2651c95a1182d408f24a941d
|
[
"Apache-2.0"
] | null | null | null |
notebooks/dash-app/utils.py
|
piushvaish/instagram-growth-strategy
|
c0740094de376e4c2651c95a1182d408f24a941d
|
[
"Apache-2.0"
] | null | null | null |
notebooks/dash-app/utils.py
|
piushvaish/instagram-growth-strategy
|
c0740094de376e4c2651c95a1182d408f24a941d
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import plotly
import plotly.graph_objs as go
import pickle
from sklearn.metrics import roc_auc_score
from tabs.tab_3 import choices
import json
def display_eval_metrics(value):
### Comparison of Possible Models
if value==choices[0]:
compare_models=pd.read_csv('resources/compare_models.csv', index_col=0)
# Let's display that with plotly.
fig = go.Figure()
fig.add_trace(go.Bar(
x=compare_models.loc['F1 score'].index,
y=compare_models.loc['F1 score'],
name=compare_models.index[0],
marker_color='rgb(107,174,214)'
))
fig.add_trace(go.Bar(
x=compare_models.loc['Accuracy'].index,
y=compare_models.loc['Accuracy'],
name=compare_models.index[1],
marker_color='rgba(219, 64, 82, 0.6)'
))
fig.add_trace(go.Bar(
x=compare_models.loc['AUC score'].index,
y=compare_models.loc['AUC score'],
name=compare_models.index[2],
marker_color='rgb(7,40,89)'
))
fig.update_layout(
title='Comparison of Possible Models',
xaxis = dict(title = 'Predictive models'), # x-axis label
yaxis = dict(title = 'Score'), # y-axis label
)
return fig
### Final Model Metrics
elif value==choices[1]:
file = open('resources/eval_scores.pkl', 'rb')
evals=pickle.load(file)
file.close()
fig = go.Figure()
fig.add_trace(go.Bar(
x=list(evals.keys()),
y=list(evals.values())
))
fig.update_traces(marker_color='rgb(107,174,214)', marker_line_color='rgb(8,48,107)',
marker_line_width=1.5, opacity=0.6)
fig.update_layout(
title='Evaluation Metrics for Random Forest Model (Testing Dataset = 578 profiles)',
xaxis = {'title': 'Metrics'},
yaxis = {'title': 'Percent'},
)
return fig
# Receiver Operating Characteristic (ROC): Area Under Curve
elif value==choices[2]:
with open('resources/roc_dict.json') as json_file:
roc_dict = json.load(json_file)
FPR=roc_dict['FPR']
TPR=roc_dict['TPR']
y_test=pd.Series(roc_dict['y_test'])
predictions=roc_dict['predictions']
roc_score=round(100*roc_auc_score(y_test, predictions),1)
fig = go.Figure()
fig.add_trace(go.Scatter(
x=FPR,
y=TPR,
mode='lines',
name=f'AUC: {roc_score}',
marker_color='rgb(150,150,150)'
))
fig.add_trace(go.Scatter(
x=[0,1],
y=[0,1],
mode='lines',
name='Baseline Area: 50.0',
marker_color='rgb(37,37,37)'
))
fig.update_layout(
title='Receiver Operating Characteristic (ROC): Area Under Curve',
xaxis={'title': 'False Positive Rate (100-Specificity)','scaleratio': 1,'scaleanchor': 'y'},
yaxis={'title': 'True Positive Rate (Sensitivity)'}
)
return fig
# Confusion Matrix
elif value==choices[3]:
with open('resources/roc_dict.json') as json_file:
roc_dict = json.load(json_file)
FPR=roc_dict['FPR']
TPR=roc_dict['TPR']
y_test=pd.Series(roc_dict['y_test'])
cm=pd.read_csv('resources/confusion_matrix.csv')
fig = go.Figure()
fig.add_trace(go.Table(
header=dict(values=cm.columns,
line = dict(color='rgb(150,150,150)'),
fill = dict(color='rgb(150,150,150)'),
align = ['left'] * 5),
cells=dict(values=[cm[f'n={len(y_test)}'], cm['pred: follower'], cm['pred: non-follower']],
line = dict(color='#7D7F80'),
fill = dict(color='white'),
align = ['left'] * 5)))
fig.update_layout(
title = f'Confusion Matrix: Random Forest Model (Testing Dataset)'
)
return fig
# Odds of Survival (Coefficients)
elif value==choices[4]:
coeffs=pd.read_csv('resources/coefficients.csv')
fig = go.Figure()
fig.add_trace(go.Bar(
x=coeffs['feature'],
y=coeffs['coefficient']
))
fig.update_traces(marker_color='rgb(158,202,225)', marker_line_color='rgb(8,48,107)',
marker_line_width=1.5, opacity=0.6)
fig.update_layout(
title='Number of Followers is a good indication of becoming a follower.',
xaxis = {'title': 'Instagram Features'},
yaxis = {'title': 'Odds of Becoming a Follower'},
)
return fig
| 32.758389 | 104 | 0.541487 |
4a19f68cec4c682338b36348de47dfdbba452920
| 1,543 |
py
|
Python
|
2048/cli.py
|
S8A/2048
|
f0ed44e717369c1e3b89e989075d0d15b7928dc3
|
[
"MIT"
] | null | null | null |
2048/cli.py
|
S8A/2048
|
f0ed44e717369c1e3b89e989075d0d15b7928dc3
|
[
"MIT"
] | null | null | null |
2048/cli.py
|
S8A/2048
|
f0ed44e717369c1e3b89e989075d0d15b7928dc3
|
[
"MIT"
] | null | null | null |
import sys
from .board import GameBoard
def main(size, win):
game = GameBoard(size, win)
actions = {'l': game.shift_left,
'r': game.shift_right,
'u': game.shift_up,
'd': game.shift_down,
'undo': game.undo,
'exit': None}
stop = False
while not stop:
print_gameboard(game)
if game.won():
print('You won!')
stop = True
elif game.lost():
print('You lost. Try again.')
stop = True
else:
action = input_action(actions)
if not action:
stop = True
else:
action()
print()
def print_gameboard(gb: GameBoard):
print(f'..:: {gb.win} GAME ::..')
print(f'Score: {gb.get_score()}')
print(f'Moves: {gb.moves}')
print()
print('+'.join(['-'*6 for i in range(gb.size)]))
for row in gb.board:
items = []
for cell in row:
if cell == 0:
items.append(' '*6)
else:
items.append(f' {cell :<4} ')
print('|'.join(items))
print('+'.join(['-'*6 for i in range(gb.size)]))
print()
def input_action(actions):
while True:
user_input = input('Shift board (l/r/u/d) or do action (undo/exit): ')
user_input = user_input.strip().lower()
if user_input in actions.keys():
return actions[user_input]
else:
print('ERROR: Invalid action. Try again.')
| 27.070175 | 78 | 0.485418 |
4a19f696db32a9ee0b94900ad3ce8aa92ab383fb
| 96 |
py
|
Python
|
knightian_model/__init__.py
|
QBatista/KnightianInnovationModel.py
|
c61fd98c0a8aa6eb2af913f2a7601f734d611076
|
[
"BSD-3-Clause"
] | null | null | null |
knightian_model/__init__.py
|
QBatista/KnightianInnovationModel.py
|
c61fd98c0a8aa6eb2af913f2a7601f734d611076
|
[
"BSD-3-Clause"
] | null | null | null |
knightian_model/__init__.py
|
QBatista/KnightianInnovationModel.py
|
c61fd98c0a8aa6eb2af913f2a7601f734d611076
|
[
"BSD-3-Clause"
] | 2 |
2019-11-30T05:35:05.000Z
|
2019-12-27T19:50:24.000Z
|
"""
Import the main name to the top level.
"""
from . import discrete
from . import utilities
| 12 | 38 | 0.697917 |
4a19f6b24572ff3615383a52badc892b3bbba67f
| 862 |
py
|
Python
|
SilverEngine/SilverEngine/urls.py
|
sebroad/silver-engine
|
21fbb774a1ab0e4afc8f87dfea3ffd5dc8afd96d
|
[
"MIT"
] | null | null | null |
SilverEngine/SilverEngine/urls.py
|
sebroad/silver-engine
|
21fbb774a1ab0e4afc8f87dfea3ffd5dc8afd96d
|
[
"MIT"
] | null | null | null |
SilverEngine/SilverEngine/urls.py
|
sebroad/silver-engine
|
21fbb774a1ab0e4afc8f87dfea3ffd5dc8afd96d
|
[
"MIT"
] | null | null | null |
"""SilverEngine URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
import LearningContent.views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^search/(?P<terms>.*)$', LearningContent.views.search),
]
| 35.916667 | 79 | 0.708817 |
4a19f954910b3c8d2f6bc7c09e06914d29f37c2e
| 3,712 |
py
|
Python
|
floodsystem/datafetcher.py
|
LimaBravoVictor/Part1A-_Flood_Warning_System
|
7d6936856cc24d4d02c5e84422914be1b49e5470
|
[
"MIT"
] | null | null | null |
floodsystem/datafetcher.py
|
LimaBravoVictor/Part1A-_Flood_Warning_System
|
7d6936856cc24d4d02c5e84422914be1b49e5470
|
[
"MIT"
] | 2 |
2022-01-24T00:02:43.000Z
|
2022-01-24T14:19:51.000Z
|
floodsystem/datafetcher.py
|
LimaBravoVictor/Part1A-_Flood_Warning_System
|
7d6936856cc24d4d02c5e84422914be1b49e5470
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2018 Garth N. Wells
#
# SPDX-License-Identifier: MIT
"""This module provides functionality for retrieving real-time and
latest time history level data
"""
import datetime
import json
import os
import dateutil.parser
import requests
def fetch(url):
"""Fetch data from url and return fetched JSON object"""
r = requests.get(url)
data = r.json()
return data
def dump(data, filename):
"""Save JSON object to file"""
f = open(filename, 'w')
data = json.dump(data, f)
f.close()
def load(filename):
"""Load JSON object from file"""
f = open(filename, 'r')
data = json.load(f)
f.close()
return data
def fetch_station_data(use_cache=True):
"""Fetch data from Environment agency for all active river level
monitoring stations via a REST API and return retrieved data as a
JSON object.
Fetched data is dumped to a cache file so on subsequent call it can
optionally be retrieved from the cache file. This is faster than
retrieval over the Internet and avoids excessive calls to the
Environment Agency service.
"""
# URL for retrieving data for active stations with river level
# monitoring (see
# http://environment.data.gov.uk/flood-monitoring/doc/reference)
url = "http://environment.data.gov.uk/flood-monitoring/id/stations?status=Active¶meter=level&qualifier=Stage&_view=full" # noqa
sub_dir = 'cache'
try:
os.makedirs(sub_dir)
except FileExistsError:
pass
cache_file = os.path.join(sub_dir, 'station_data.json')
# Attempt to load station data from file, otherwise fetch over
# Internet
if use_cache:
try:
# Attempt to load from file
data = load(cache_file)
except FileNotFoundError:
# If load from file fails, fetch and dump to file
data = fetch(url)
dump(data, cache_file)
else:
# Fetch and dump to file
data = fetch(url)
dump(data, cache_file)
return data
def fetch_latest_water_level_data(use_cache=False):
"""Fetch latest levels from all 'measures'. Returns JSON object"""
# URL for retrieving data
url = "http://environment.data.gov.uk/flood-monitoring/id/measures?parameter=level&qualifier=Stage&qualifier=level" # noqa
sub_dir = 'cache'
try:
os.makedirs(sub_dir)
except FileExistsError:
pass
cache_file = os.path.join(sub_dir, 'level_data.json')
# Attempt to load level data from file, otherwise fetch over
# Internet
if use_cache:
try:
# Attempt to load from file
data = load(cache_file)
except FileNotFoundError:
data = fetch(url)
dump(data, cache_file)
else:
data = fetch(url)
dump(data, cache_file)
return data
def fetch_measure_levels(measure_id, dt):
"""Fetch measure levels from latest reading and going back a period
dt. Return list of dates and a list of values.
"""
# Current time (UTC)
now = datetime.datetime.utcnow()
# Start time for data
start = now - dt
# Construct URL for fetching data
url_base = measure_id
url_options = "/readings/?_sorted&since=" + start.isoformat() + 'Z'
url = url_base + url_options
# Fetch data
data = fetch(url)
# Extract dates and levels
dates, levels = [], []
for measure in data['items']:
# Convert date-time string to a datetime object
d = dateutil.parser.parse(measure['dateTime'])
if 'value' in measure:
# Append data
dates.append(d)
levels.append(measure['value'])
return dates, levels
| 26.326241 | 136 | 0.646552 |
4a19fa83c5d4b217c38e8405bc2db320fd0524a7
| 12,216 |
py
|
Python
|
main.py
|
Ritik721/Lung_Cancer_detection-using-CNN-
|
8277a87c86d92b649ffe4987fc12e1e8df19f60f
|
[
"Unlicense"
] | null | null | null |
main.py
|
Ritik721/Lung_Cancer_detection-using-CNN-
|
8277a87c86d92b649ffe4987fc12e1e8df19f60f
|
[
"Unlicense"
] | null | null | null |
main.py
|
Ritik721/Lung_Cancer_detection-using-CNN-
|
8277a87c86d92b649ffe4987fc12e1e8df19f60f
|
[
"Unlicense"
] | null | null | null |
# Handling data
import dicom # for reading dicom files
import os # for doing directory operations
import pandas as pd # for some simple data analysis (right now, just to load in the labels data and quickly reference it)
# Change this to wherever you are storing your data:
data_dir = '../input/scan_data/'
patients = os.listdir(data_dir)
labels_df = pd.read_csv('../input/stage1_labels.csv', index_col=0)
labels_df.head()
for patient in patients[:3]:
label = labels_df.get_value(patient, 'cancer')
path = data_dir + patient
# a couple great 1-liners from: https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
print(slices[0].pixel_array.shape, len(slices))
print(len(patients))
import matplotlib.pyplot as plt
for patient in patients[:1]:
label = labels_df.get_value(patient, 'cancer')
path = data_dir + patient
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
# the first slice
plt.imshow(slices[0].pixel_array)
plt.show()
# Processing and Viewing our Data
import cv2
import numpy as np
IMG_PX_SIZE = 150
for patient in patients[:1]:
label = labels_df.get_value(patient, 'cancer')
path = data_dir + patient
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
fig = plt.figure()
for num,each_slice in enumerate(slices[:12]):
y = fig.add_subplot(3,4,num+1)
new_img = cv2.resize(np.array(each_slice.pixel_array),(IMG_PX_SIZE,IMG_PX_SIZE))
y.imshow(new_img)
plt.show()
import math
def chunks(l, n):
# Credit: Ned Batchelder
# Link: http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def mean(l):
return sum(l) / len(l)
IMG_PX_SIZE = 150
HM_SLICES = 20
data_dir = '../input/sample_images/'
patients = os.listdir(data_dir)
labels_df = pd.read_csv('../input/stage1_labels.csv', index_col=0)
for patient in patients[:10]:
try:
label = labels_df.get_value(patient, 'cancer')
path = data_dir + patient
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
new_slices = []
slices = [cv2.resize(np.array(each_slice.pixel_array),(IMG_PX_SIZE,IMG_PX_SIZE)) for each_slice in slices]
chunk_sizes = math.ceil(len(slices) / HM_SLICES)
for slice_chunk in chunks(slices, chunk_sizes):
slice_chunk = list(map(mean, zip(*slice_chunk)))
new_slices.append(slice_chunk)
if len(new_slices) == HM_SLICES-1:
new_slices.append(new_slices[-1])
if len(new_slices) == HM_SLICES-2:
new_slices.append(new_slices[-1])
new_slices.append(new_slices[-1])
if len(new_slices) == HM_SLICES+2:
new_val = list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],])))
del new_slices[HM_SLICES]
new_slices[HM_SLICES-1] = new_val
if len(new_slices) == HM_SLICES+1:
new_val = list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],])))
del new_slices[HM_SLICES]
new_slices[HM_SLICES-1] = new_val
print(len(slices), len(new_slices))
except Exception as e:
# again, some patients are not labeled, but JIC we still want the error if something
# else is wrong with our code
print(str(e))
for patient in patients[:1]:
label = labels_df.get_value(patient, 'cancer')
path = data_dir + patient
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
new_slices = []
slices = [cv2.resize(np.array(each_slice.pixel_array),(IMG_PX_SIZE,IMG_PX_SIZE)) for each_slice in slices]
chunk_sizes = math.ceil(len(slices) / HM_SLICES)
for slice_chunk in chunks(slices, chunk_sizes):
slice_chunk = list(map(mean, zip(*slice_chunk)))
new_slices.append(slice_chunk)
if len(new_slices) == HM_SLICES-1:
new_slices.append(new_slices[-1])
if len(new_slices) == HM_SLICES-2:
new_slices.append(new_slices[-1])
new_slices.append(new_slices[-1])
if len(new_slices) == HM_SLICES+2:
new_val = list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],])))
del new_slices[HM_SLICES]
new_slices[HM_SLICES-1] = new_val
if len(new_slices) == HM_SLICES+1:
new_val = list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],])))
del new_slices[HM_SLICES]
new_slices[HM_SLICES-1] = new_val
fig = plt.figure()
for num,each_slice in enumerate(new_slices):
y = fig.add_subplot(4,5,num+1)
y.imshow(each_slice, cmap='gray')
plt.show()
# Preprocessing our Data
import numpy as np
import pandas as pd
import dicom
import os
import matplotlib.pyplot as plt
import cv2
import math
IMG_SIZE_PX = 50
SLICE_COUNT = 20
def chunks(l, n):
# Credit: Ned Batchelder
# Link: http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def mean(a):
return sum(a) / len(a)
def process_data(patient,labels_df,img_px_size=50, hm_slices=20, visualize=False):
label = labels_df.get_value(patient, 'cancer')
path = data_dir + patient
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
new_slices = []
slices = [cv2.resize(np.array(each_slice.pixel_array),(img_px_size,img_px_size)) for each_slice in slices]
chunk_sizes = math.ceil(len(slices) / hm_slices)
for slice_chunk in chunks(slices, chunk_sizes):
slice_chunk = list(map(mean, zip(*slice_chunk)))
new_slices.append(slice_chunk)
if len(new_slices) == hm_slices-1:
new_slices.append(new_slices[-1])
if len(new_slices) == hm_slices-2:
new_slices.append(new_slices[-1])
new_slices.append(new_slices[-1])
if len(new_slices) == hm_slices+2:
new_val = list(map(mean, zip(*[new_slices[hm_slices-1],new_slices[hm_slices],])))
del new_slices[hm_slices]
new_slices[hm_slices-1] = new_val
if len(new_slices) == hm_slices+1:
new_val = list(map(mean, zip(*[new_slices[hm_slices-1],new_slices[hm_slices],])))
del new_slices[hm_slices]
new_slices[hm_slices-1] = new_val
if visualize:
fig = plt.figure()
for num,each_slice in enumerate(new_slices):
y = fig.add_subplot(4,5,num+1)
y.imshow(each_slice, cmap='gray')
plt.show()
if label == 1: label=np.array([0,1])
elif label == 0: label=np.array([1,0])
return np.array(new_slices),label
#stage 1 for real.
data_dir = '../input/sample_images/'
patients = os.listdir(data_dir)
labels = pd.read_csv('../input/stage1_labels.csv', index_col=0)
much_data = []
for num,patient in enumerate(patients):
if num % 100 == 0:
print(num)
try:
img_data,label = process_data(patient,labels,img_px_size=IMG_SIZE_PX, hm_slices=SLICE_COUNT)
#print(img_data.shape,label)
much_data.append([img_data,label])
except KeyError as e:
print('This is unlabeled data!')
np.save('muchdata-{}-{}-{}.npy'.format(IMG_SIZE_PX,IMG_SIZE_PX,SLICE_COUNT), much_data)
# Convolutional Neural Nertwork
import tensorflow as tf
import numpy as np
IMG_SIZE_PX = 50
SLICE_COUNT = 20
n_classes = 2
batch_size = 10
x = tf.placeholder('float')
y = tf.placeholder('float')
keep_rate = 0.8
def conv3d(x, W):
return tf.nn.conv3d(x, W, strides=[1,1,1,1,1], padding='SAME')
def maxpool3d(x):
# size of window movement of window as you slide about
return tf.nn.max_pool3d(x, ksize=[1,2,2,2,1], strides=[1,2,2,2,1], padding='SAME')
def convolutional_neural_network(x):
# # 5 x 5 x 5 patches, 1 channel, 32 features to compute.
weights = {'W_conv1':tf.Variable(tf.random_normal([3,3,3,1,32])),
# 5 x 5 x 5 patches, 32 channels, 64 features to compute.
'W_conv2':tf.Variable(tf.random_normal([3,3,3,32,64])),
# 64 features
'W_fc':tf.Variable(tf.random_normal([54080,1024])),
'out':tf.Variable(tf.random_normal([1024, n_classes]))}
biases = {'b_conv1':tf.Variable(tf.random_normal([32])),
'b_conv2':tf.Variable(tf.random_normal([64])),
'b_fc':tf.Variable(tf.random_normal([1024])),
'out':tf.Variable(tf.random_normal([n_classes]))}
# image X image Y image Z
x = tf.reshape(x, shape=[-1, IMG_SIZE_PX, IMG_SIZE_PX, SLICE_COUNT, 1])
conv1 = tf.nn.relu(conv3d(x, weights['W_conv1']) + biases['b_conv1'])
conv1 = maxpool3d(conv1)
conv2 = tf.nn.relu(conv3d(conv1, weights['W_conv2']) + biases['b_conv2'])
conv2 = maxpool3d(conv2)
fc = tf.reshape(conv2,[-1, 54080])
fc = tf.nn.relu(tf.matmul(fc, weights['W_fc'])+biases['b_fc'])
fc = tf.nn.dropout(fc, keep_rate)
output = tf.matmul(fc, weights['out'])+biases['out']
return output
much_data = np.load('muchdata-50-50-20.npy')
# If you are working with the basic sample data, use maybe 2 instead of 100 here... you don't have enough data to really do this
train_data = much_data[:-100]
validation_data = much_data[-100:]
def train_neural_network(x):
prediction = convolutional_neural_network(x)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
successful_runs = 0
total_runs = 0
for epoch in range(hm_epochs):
epoch_loss = 0
for data in train_data:
total_runs += 1
try:
X = data[0]
Y = data[1]
_, c = sess.run([optimizer, cost], feed_dict={x: X, y: Y})
epoch_loss += c
successful_runs += 1
except Exception as e:
# I am passing for the sake of notebook space, but we are getting 1 shaping issue from one
# input tensor. Not sure why, will have to look into it. Guessing it's
# one of the depths that doesn't come to 20.
pass
#print(str(e))
print('Epoch', epoch+1, 'completed out of',hm_epochs,'loss:',epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:[i[0] for i in validation_data], y:[i[1] for i in validation_data]}))
print('Done. Finishing accuracy:')
print('Accuracy:',accuracy.eval({x:[i[0] for i in validation_data], y:[i[1] for i in validation_data]}))
print('fitment percent:',successful_runs/total_runs)
#concluding remarks
labels_df.cancer.value_counts()
labels_df.ix[-100:].cancer.value_counts()
| 34.411268 | 129 | 0.613703 |
4a19faa46859274e6b9a0d75524aa24fdcebf20c
| 4,005 |
py
|
Python
|
src/kvt/models/necks/softpool.py
|
Ynakatsuka/nishika-22
|
72994cab16486b3a26686642ad72a29b6761b46d
|
[
"BSD-2-Clause"
] | 4 |
2022-02-01T05:04:53.000Z
|
2022-02-02T04:16:31.000Z
|
src/kvt/models/necks/softpool.py
|
Ynakatsuka/nishika-22
|
72994cab16486b3a26686642ad72a29b6761b46d
|
[
"BSD-2-Clause"
] | null | null | null |
src/kvt/models/necks/softpool.py
|
Ynakatsuka/nishika-22
|
72994cab16486b3a26686642ad72a29b6761b46d
|
[
"BSD-2-Clause"
] | null | null | null |
"""
https://github.com/wangyida/softpool/blob/master/pytorch/softpool.py
"""
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
# Produce a set of pointnet features in several sorted cloud
def train2cabins(windows, num_cabin=8):
size_bth = list(windows.shape)[0]
size_feat = list(windows.shape)[1]
regions = list(windows.shape)[2]
num_points = list(windows.shape)[3]
cabins = torch.zeros(size_bth, size_feat, regions, num_cabin).cuda()
points_cabin = num_points // num_cabin
for idx in range(num_cabin):
cabins[:, :, :, idx] = torch.max(
windows[:, :, :, idx * points_cabin : (idx + 1) * points_cabin],
dim=3,
keepdim=False,
)[0]
return cabins
class Sorter(nn.Module):
def __init__(self, dim_in, dim_out):
super(Sorter, self).__init__()
self.conv1d = torch.nn.Conv1d(dim_in, dim_out, 1).cuda()
def forward(self, x):
val_activa = self.conv1d(x)
id_activa = torch.argmax(val_activa, dim=1)
return val_activa, id_activa
class SoftPool(nn.Module):
def __init__(self, regions=16, cabins=8, sp_ratio=4):
super(SoftPool, self).__init__()
self.regions = regions
self.num_cabin = cabins
self.sp_ratio = sp_ratio
def forward(self, x):
[self.size_bth, self.size_feat, self.pnt_per_sort] = list(x.shape)
self.pnt_per_sort //= self.sp_ratio
# cabin -2
conv2d_1 = nn.Conv2d(
self.size_feat, self.size_feat, kernel_size=(1, 3), stride=(1, 1)
).cuda()
# cabin -2
conv2d_2 = nn.Conv2d(
self.size_feat, self.size_feat, kernel_size=(1, 3), stride=(1, 1)
).cuda()
conv2d_3 = nn.Conv2d(
self.size_feat,
self.size_feat,
kernel_size=(1, self.num_cabin - 2 * (3 - 1)),
stride=(1, 1),
).cuda()
conv2d_5 = nn.Conv2d(
self.size_feat,
self.size_feat,
kernel_size=(self.regions, 1),
stride=(1, 1),
).cuda()
sorter = Sorter(self.size_feat, self.regions)
val_activa, id_activa = sorter(x)
# initialize empty space for softpool feature
sp_cube = torch.zeros(
self.size_bth, self.size_feat, self.regions, self.pnt_per_sort
).cuda()
sp_idx = torch.zeros(
self.size_bth, self.regions + 3, self.regions, self.pnt_per_sort
).cuda()
for region in range(self.regions):
x_val, x_idx = torch.sort(
val_activa[:, region, :], dim=1, descending=True
)
index = (
x_idx[:, : self.pnt_per_sort]
.unsqueeze(1)
.repeat(1, self.size_feat, 1)
)
sp_cube[:, :, region, :] = torch.gather(x, dim=2, index=index)
sp_idx[:, :, region, :] = (
x_idx[:, : self.pnt_per_sort]
.unsqueeze(1)
.repeat(1, self.regions + 3, 1)
)
# local pointnet feature
points_cabin = self.pnt_per_sort // self.num_cabin
cabins = train2cabins(sp_cube, self.num_cabin)
# we need to use succession manner to repeat cabin to fit with cube
sp_windows = torch.repeat_interleave(
cabins, repeats=points_cabin, dim=3
)
# merge cabins in train
trains = conv2d_3(conv2d_2(conv2d_1(cabins)))
# we need to use succession manner to repeat cabin to fit with cube
sp_trains = trains.repeat(1, 1, 1, self.pnt_per_sort)
# now make a station
station = conv2d_5(trains)
sp_station = station.repeat(1, 1, self.regions, self.pnt_per_sort)
scope = "local"
if scope == "global":
sp_cube = torch.cat(
(sp_cube, sp_windows, sp_trains, sp_station), 1
).contiguous()
return sp_cube, sp_idx, cabins, id_activa
| 32.560976 | 77 | 0.573533 |
4a19fadbc54d394e7d0ed47eedbd6258408d1063
| 3,015 |
py
|
Python
|
jsonit/tests.py
|
crrobinson14/django-jsonit
|
3c4b10c844b4e45759bac28a5a6dd201f60c6c1d
|
[
"BSD-3-Clause"
] | 3 |
2015-01-22T19:02:55.000Z
|
2015-11-08T16:10:08.000Z
|
jsonit/tests.py
|
crrobinson14/django-jsonit
|
3c4b10c844b4e45759bac28a5a6dd201f60c6c1d
|
[
"BSD-3-Clause"
] | 1 |
2015-06-06T11:49:32.000Z
|
2015-06-06T11:49:32.000Z
|
jsonit/tests.py
|
crrobinson14/django-jsonit
|
3c4b10c844b4e45759bac28a5a6dd201f60c6c1d
|
[
"BSD-3-Clause"
] | 1 |
2020-05-12T16:41:54.000Z
|
2020-05-12T16:41:54.000Z
|
import datetime
import json
from unittest import TestCase
from django.contrib import messages
from django.contrib.messages.constants import DEFAULT_TAGS
from django.contrib.messages.storage import base as messages_base
from django.contrib.messages.storage.session import SessionStorage
from django.http import HttpRequest
from django.utils.functional import lazy
from django.utils import six
from jsonit.http import JSONResponse
from jsonit.encoder import encode
class BaseTest(TestCase):
def setUp(self):
self.request = HttpRequest()
self.request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
class JSONResponseTest(BaseTest):
def setUp(self):
self.request = HttpRequest()
self.request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
def test_success(self):
response = JSONResponse(self.request)
self.assertDictEqual(
json.loads(response.content.decode('utf-8')),
{"messages": [], "details": {}, "success": True}
)
def test_not_success(self):
response = JSONResponse(self.request, success=False)
self.assertDictEqual(
json.loads(response.content.decode('utf-8')),
{"messages": [], "details": {}, "success": False}
)
def test_details(self):
response = JSONResponse(self.request, details={'test': 1})
self.assertDictEqual(
json.loads(response.content.decode('utf-8')),
{"messages": [], "details": {"test": 1}, "success": True}
)
class MessageTest(BaseTest):
def setUp(self):
super(MessageTest, self).setUp()
self.request.session = {}
self._old_LEVEL_TAGS = messages_base.LEVEL_TAGS
messages_base.LEVEL_TAGS = DEFAULT_TAGS
self.request._messages = SessionStorage(self.request)
def tearDown(self):
messages_base.LEVEL_TAGS = self._old_LEVEL_TAGS
def test_messages(self):
messages.info(self.request, 'Hello')
response = JSONResponse(self.request)
self.assertDictEqual(
json.loads(response.content.decode('utf-8')),
{
"messages": [{"message": "Hello", "class": "info"}],
"details": {},
"success": True,
}
)
class EncoderTest(TestCase):
def test_lazy(self):
test_msg = lazy(lambda: 'Test!', six.text_type)
self.assertEqual(encode(test_msg()), '"Test!"')
def test_datetime(self):
self.assertEqual(encode(datetime.datetime(1980, 1, 1, 12, 0, 5)),
'"1980-01-01T12:00:05"')
def test_date(self):
self.assertEqual(encode(datetime.date(1980, 1, 1)),
'"1980-01-01"')
def test_custom_encoder(self):
encode_dt = lambda d: d.strftime('%d %b %Y')
self.assertEqual(encode(datetime.datetime(1980, 1, 1),
encoders=[(datetime.datetime, encode_dt)]),
u'"01 Jan 1980"')
| 31.40625 | 75 | 0.619569 |
4a19fc245c45fec1259814cbd900e85faeb2ea10
| 9,978 |
py
|
Python
|
code/base/batch.py
|
KirillDogadin-std/Lab-Semantic
|
d04e592cd540dd5dcd537cd7df7ee6619af3d67f
|
[
"MIT"
] | null | null | null |
code/base/batch.py
|
KirillDogadin-std/Lab-Semantic
|
d04e592cd540dd5dcd537cd7df7ee6619af3d67f
|
[
"MIT"
] | 11 |
2020-11-30T14:28:20.000Z
|
2021-01-13T19:55:44.000Z
|
code/base/batch.py
|
KirillDogadin-std/Lab-Semantic
|
d04e592cd540dd5dcd537cd7df7ee6619af3d67f
|
[
"MIT"
] | 1 |
2021-03-23T07:22:03.000Z
|
2021-03-23T07:22:03.000Z
|
import gc
import random
import numpy as np
import multiprocessing
from utils import task_divide, merge_dic
def generate_pos_batch_queue(triple_list1, triple_list2, batch_size, steps, out_queue):
for step in steps:
out_queue.put(generate_pos_batch(triple_list1, triple_list2, batch_size, step))
exit(0)
def generate_pos_batch(triple_list1, triple_list2, batch_size, step):
batch_size1 = int(len(triple_list1) / (len(triple_list1) + len(triple_list2)) * batch_size)
batch_size2 = batch_size - batch_size1
pos_batch1 = generate_pos_triples(triple_list1, batch_size1, step)
pos_batch2 = generate_pos_triples(triple_list2, batch_size2, step)
return pos_batch1 + pos_batch2
def generate_relation_triple_batch_queue(triple_list1, triple_list2, triple_set1, triple_set2,
entity_list1, entity_list2, batch_size,
steps, out_queue, neighbor1, neighbor2, neg_triples_num):
for step in steps:
pos_batch, neg_batch = generate_relation_triple_batch(triple_list1, triple_list2, triple_set1, triple_set2,
entity_list1, entity_list2, batch_size,
step, neighbor1, neighbor2, neg_triples_num)
out_queue.put((pos_batch, neg_batch))
exit(0)
def generate_relation_triple_batch(triple_list1, triple_list2, triple_set1, triple_set2,
entity_list1, entity_list2, batch_size,
step, neighbor1, neighbor2, neg_triples_num):
batch_size1 = int(len(triple_list1) / (len(triple_list1) + len(triple_list2)) * batch_size)
batch_size2 = batch_size - batch_size1
pos_batch1 = generate_pos_triples(triple_list1, batch_size1, step)
pos_batch2 = generate_pos_triples(triple_list2, batch_size2, step)
neg_batch1 = generate_neg_triples_fast(pos_batch1, triple_set1, entity_list1, neg_triples_num, neighbor=neighbor1)
neg_batch2 = generate_neg_triples_fast(pos_batch2, triple_set2, entity_list2, neg_triples_num, neighbor=neighbor2)
return pos_batch1 + pos_batch2, neg_batch1 + neg_batch2
def generate_pos_triples(triples, batch_size, step, is_fixed_size=False):
start = step * batch_size
end = start + batch_size
if end > len(triples):
end = len(triples)
pos_batch = triples[start: end]
# pos_batch = random.sample(triples, batch_size)
if is_fixed_size and len(pos_batch) < batch_size:
pos_batch += triples[:batch_size-len(pos_batch)]
return pos_batch
def generate_neg_triples(pos_batch, all_triples_set, entities_list, neg_triples_num, neighbor=None, max_try=10):
if neighbor is None:
neighbor = dict()
neg_batch = list()
for head, relation, tail in pos_batch:
head_candidates = neighbor.get(head, entities_list)
tail_candidates = neighbor.get(tail, entities_list)
for i in range(neg_triples_num):
n = 0
while True:
corrupt_head_prob = np.random.binomial(1, 0.5)
neg_head = head
neg_tail = tail
if corrupt_head_prob:
neg_head = random.choice(head_candidates)
else:
neg_tail = random.choice(tail_candidates)
if (neg_head, relation, neg_tail) not in all_triples_set:
neg_batch.append((neg_head, relation, neg_tail))
break
n += 1
if n == max_try:
neg_tail = random.choice(entities_list)
neg_batch.append((head, relation, neg_tail))
break
assert len(neg_batch) == neg_triples_num * len(pos_batch)
return neg_batch
def generate_neg_triples_fast(pos_batch, all_triples_set, entities_list, neg_triples_num, neighbor=None, max_try=10):
if neighbor is None:
neighbor = dict()
neg_batch = list()
for head, relation, tail in pos_batch:
neg_triples = list()
nums_to_sample = neg_triples_num
head_candidates = neighbor.get(head, entities_list)
tail_candidates = neighbor.get(tail, entities_list)
for i in range(max_try):
corrupt_head_prob = np.random.binomial(1, 0.5)
if corrupt_head_prob:
neg_heads = random.sample(head_candidates, nums_to_sample)
i_neg_triples = {(h2, relation, tail) for h2 in neg_heads}
else:
neg_tails = random.sample(tail_candidates, nums_to_sample)
i_neg_triples = {(head, relation, t2) for t2 in neg_tails}
if i == max_try - 1:
neg_triples += list(i_neg_triples)
break
else:
i_neg_triples = list(i_neg_triples - all_triples_set)
neg_triples += i_neg_triples
if len(neg_triples) == neg_triples_num:
break
else:
nums_to_sample = neg_triples_num - len(neg_triples)
assert len(neg_triples) == neg_triples_num
neg_batch.extend(neg_triples)
assert len(neg_batch) == neg_triples_num * len(pos_batch)
return neg_batch
def generate_neighbours(entity_embeds, entity_list, neighbors_num, threads_num):
entity_list = np.array(entity_list)
ent_frags = task_divide(entity_list, threads_num)
ent_frag_indexes = task_divide(np.array(range(len(entity_list))), threads_num)
pool = multiprocessing.Pool(processes=len(ent_frags))
results = list()
for i in range(len(ent_frags)):
results.append(pool.apply_async(find_neighbours,
args=(ent_frags[i], entity_list,
entity_embeds[ent_frag_indexes[i], :],
entity_embeds, neighbors_num)))
pool.close()
pool.join()
dic = dict()
for res in results:
dic = merge_dic(dic, res.get())
del results
gc.collect()
return dic
def find_neighbours(frags, entity_list, sub_embed, embed, k):
dic = dict()
sim_mat = np.matmul(sub_embed, embed.T)
for i in range(sim_mat.shape[0]):
sort_index = np.argpartition(-sim_mat[i, :], k)
neighbors_index = sort_index[0:k]
neighbors = entity_list[neighbors_index].tolist()
dic[frags[i]] = neighbors
return dic
def generate_triple_label_batch(triple_list1, triple_list2, triple_set1, triple_set2, entity_list1, entity_list2,
batch_size, steps, out_queue, neighbor1, neighbor2, neg_triples_num):
batch_size1 = int(len(triple_list1) / (len(triple_list1) + len(triple_list2)) * batch_size)
batch_size2 = batch_size - batch_size1
for step in steps:
pos_batch1 = generate_pos_triples(triple_list1, batch_size1, step)
pos_batch2 = generate_pos_triples(triple_list2, batch_size2, step)
neg_batch1 = generate_neg_triples(pos_batch1, triple_set1, entity_list1,
neg_triples_num, neighbor=neighbor1)
neg_batch2 = generate_neg_triples(pos_batch2, triple_set2, entity_list2,
neg_triples_num, neighbor=neighbor2)
pos_batch = pos_batch1 + pos_batch2
pos_label = [1] * len(pos_batch)
neg_batch = neg_batch1 + neg_batch2
neg_label = [-1] * len(neg_batch)
out_queue.put((pos_batch + neg_batch, pos_label + neg_label))
exit(0)
def generate_neg_attribute_triples(pos_batch, all_triples_set, entity_list, neg_triples_num, neighbor=None):
if neighbor is None:
neighbor = dict()
neg_batch = list()
for head, attribute, value in pos_batch:
for i in range(neg_triples_num):
while True:
neg_head = random.choice(neighbor.get(head, entity_list))
if (neg_head, attribute, value) not in all_triples_set:
break
neg_batch.append((neg_head, attribute, value))
assert len(neg_batch) == neg_triples_num * len(pos_batch)
return neg_batch
def generate_attribute_triple_batch_queue(triple_list1, triple_list2, triple_set1, triple_set2,
entity_list1, entity_list2, batch_size,
steps, out_queue, neighbor1, neighbor2, neg_triples_num, is_fixed_size):
for step in steps:
pos_batch, neg_batch = generate_attribute_triple_batch(triple_list1, triple_list2, triple_set1, triple_set2,
entity_list1, entity_list2, batch_size,
step, neighbor1, neighbor2, neg_triples_num,
is_fixed_size)
out_queue.put((pos_batch, neg_batch))
exit(0)
def generate_attribute_triple_batch(triple_list1, triple_list2, triple_set1, triple_set2,
entity_list1, entity_list2, batch_size,
step, neighbor1, neighbor2, neg_triples_num, is_fixed_size):
batch_size1 = int(len(triple_list1) / (len(triple_list1) + len(triple_list2)) * batch_size)
batch_size2 = batch_size - batch_size1
pos_batch1 = generate_pos_triples(triple_list1, batch_size1, step, is_fixed_size=is_fixed_size)
pos_batch2 = generate_pos_triples(triple_list2, batch_size2, step, is_fixed_size=is_fixed_size)
neg_batch1 = generate_neg_attribute_triples(pos_batch1, triple_set1, entity_list1,
neg_triples_num, neighbor=neighbor1)
neg_batch2 = generate_neg_attribute_triples(pos_batch2, triple_set2, entity_list2,
neg_triples_num, neighbor=neighbor2)
return pos_batch1 + pos_batch2, neg_batch1 + neg_batch2
| 46.84507 | 118 | 0.635799 |
4a19fc384cc0b741f7a5d38bd18fc7fee69c9747
| 10,866 |
py
|
Python
|
uvicorn/protocols/websockets/websockets_impl.py
|
mkiesel/uvicorn
|
efdbe7a4a6c94f34c35d751fb63391136b4e90e4
|
[
"BSD-3-Clause"
] | 1 |
2022-02-09T19:14:57.000Z
|
2022-02-09T19:14:57.000Z
|
uvicorn/protocols/websockets/websockets_impl.py
|
mkiesel/uvicorn
|
efdbe7a4a6c94f34c35d751fb63391136b4e90e4
|
[
"BSD-3-Clause"
] | null | null | null |
uvicorn/protocols/websockets/websockets_impl.py
|
mkiesel/uvicorn
|
efdbe7a4a6c94f34c35d751fb63391136b4e90e4
|
[
"BSD-3-Clause"
] | null | null | null |
import asyncio
import http
import logging
from typing import Callable
from urllib.parse import unquote
import websockets
from websockets.extensions.permessage_deflate import ServerPerMessageDeflateFactory
from uvicorn.logging import TRACE_LOG_LEVEL
from uvicorn.protocols.utils import get_local_addr, get_remote_addr, is_ssl
class Server:
closing = False
def register(self, ws):
pass
def unregister(self, ws):
pass
def is_serving(self):
return not self.closing
class WebSocketProtocol(websockets.WebSocketServerProtocol):
def __init__(
self, config, server_state, on_connection_lost: Callable = None, _loop=None
):
if not config.loaded:
config.load()
self.config = config
self.app = config.loaded_app
self.on_connection_lost = on_connection_lost
self.loop = _loop or asyncio.get_event_loop()
self.root_path = config.root_path
# Shared server state
self.connections = server_state.connections
self.tasks = server_state.tasks
# Connection state
self.transport = None
self.server = None
self.client = None
self.scheme = None
# Connection events
self.scope = None
self.handshake_started_event = asyncio.Event()
self.handshake_completed_event = asyncio.Event()
self.closed_event = asyncio.Event()
self.initial_response = None
self.connect_sent = False
self.accepted_subprotocol = None
self.transfer_data_task = None
self.ws_server = Server()
extensions = []
if self.config.ws_per_message_deflate:
extensions.append(ServerPerMessageDeflateFactory())
super().__init__(
ws_handler=self.ws_handler,
ws_server=self.ws_server,
max_size=self.config.ws_max_size,
ping_interval=self.config.ws_ping_interval,
ping_timeout=self.config.ws_ping_timeout,
extensions=extensions,
logger=logging.getLogger("uvicorn.error"),
extra_headers=[],
)
def connection_made(self, transport):
self.connections.add(self)
self.transport = transport
self.server = get_local_addr(transport)
self.client = get_remote_addr(transport)
self.scheme = "wss" if is_ssl(transport) else "ws"
if self.logger.isEnabledFor(TRACE_LOG_LEVEL):
prefix = "%s:%d - " % tuple(self.client) if self.client else ""
self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection made", prefix)
super().connection_made(transport)
def connection_lost(self, exc):
self.connections.remove(self)
if self.logger.isEnabledFor(TRACE_LOG_LEVEL):
prefix = "%s:%d - " % tuple(self.client) if self.client else ""
self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection lost", prefix)
self.handshake_completed_event.set()
super().connection_lost(exc)
if self.on_connection_lost is not None:
self.on_connection_lost()
if exc is None:
self.transport.close()
def shutdown(self):
self.ws_server.closing = True
self.transport.close()
def on_task_complete(self, task):
self.tasks.discard(task)
async def process_request(self, path, headers):
"""
This hook is called to determine if the websocket should return
an HTTP response and close.
Our behavior here is to start the ASGI application, and then wait
for either `accept` or `close` in order to determine if we should
close the connection.
"""
path_portion, _, query_string = path.partition("?")
websockets.legacy.handshake.check_request(headers)
subprotocols = []
for header in headers.get_all("Sec-WebSocket-Protocol"):
subprotocols.extend([token.strip() for token in header.split(",")])
asgi_headers = [
(name.encode("ascii"), value.encode("ascii"))
for name, value in headers.raw_items()
]
self.scope = {
"type": "websocket",
"asgi": {"version": self.config.asgi_version, "spec_version": "2.3"},
"http_version": "1.1",
"scheme": self.scheme,
"server": self.server,
"client": self.client,
"root_path": self.root_path,
"path": unquote(path_portion),
"raw_path": path_portion,
"query_string": query_string.encode("ascii"),
"headers": asgi_headers,
"subprotocols": subprotocols,
}
task = self.loop.create_task(self.run_asgi())
task.add_done_callback(self.on_task_complete)
self.tasks.add(task)
await self.handshake_started_event.wait()
return self.initial_response
def process_subprotocol(self, headers, available_subprotocols):
"""
We override the standard 'process_subprotocol' behavior here so that
we return whatever subprotocol is sent in the 'accept' message.
"""
return self.accepted_subprotocol
def send_500_response(self):
msg = b"Internal Server Error"
content = [
b"HTTP/1.1 500 Internal Server Error\r\n"
b"content-type: text/plain; charset=utf-8\r\n",
b"content-length: " + str(len(msg)).encode("ascii") + b"\r\n",
b"connection: close\r\n",
b"\r\n",
msg,
]
self.transport.write(b"".join(content))
# Allow handler task to terminate cleanly, as websockets doesn't cancel it by
# itself (see https://github.com/encode/uvicorn/issues/920)
self.handshake_started_event.set()
async def ws_handler(self, protocol, path):
"""
This is the main handler function for the 'websockets' implementation
to call into. We just wait for close then return, and instead allow
'send' and 'receive' events to drive the flow.
"""
self.handshake_completed_event.set()
await self.closed_event.wait()
async def run_asgi(self):
"""
Wrapper around the ASGI callable, handling exceptions and unexpected
termination states.
"""
try:
result = await self.app(self.scope, self.asgi_receive, self.asgi_send)
except BaseException as exc:
self.closed_event.set()
msg = "Exception in ASGI application\n"
self.logger.error(msg, exc_info=exc)
if not self.handshake_started_event.is_set():
self.send_500_response()
else:
await self.handshake_completed_event.wait()
self.transport.close()
else:
self.closed_event.set()
if not self.handshake_started_event.is_set():
msg = "ASGI callable returned without sending handshake."
self.logger.error(msg)
self.send_500_response()
self.transport.close()
elif result is not None:
msg = "ASGI callable should return None, but returned '%s'."
self.logger.error(msg, result)
await self.handshake_completed_event.wait()
self.transport.close()
async def asgi_send(self, message):
message_type = message["type"]
if not self.handshake_started_event.is_set():
if message_type == "websocket.accept":
self.logger.info(
'%s - "WebSocket %s" [accepted]',
self.scope["client"],
self.scope["path"],
)
self.initial_response = None
self.accepted_subprotocol = message.get("subprotocol")
if "headers" in message:
self.extra_headers.extend(
# ASGI spec requires bytes
# But for compability we need to convert it to strings
(name.decode("latin-1"), value.decode("latin-1"))
for name, value in message["headers"]
)
self.handshake_started_event.set()
elif message_type == "websocket.close":
self.logger.info(
'%s - "WebSocket %s" 403',
self.scope["client"],
self.scope["path"],
)
self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b"")
self.handshake_started_event.set()
self.closed_event.set()
else:
msg = (
"Expected ASGI message 'websocket.accept' or 'websocket.close', "
"but got '%s'."
)
raise RuntimeError(msg % message_type)
elif not self.closed_event.is_set():
await self.handshake_completed_event.wait()
if message_type == "websocket.send":
bytes_data = message.get("bytes")
text_data = message.get("text")
data = text_data if bytes_data is None else bytes_data
await self.send(data)
elif message_type == "websocket.close":
code = message.get("code", 1000)
reason = message.get("reason", "") or ""
await self.close(code, reason)
self.closed_event.set()
else:
msg = (
"Expected ASGI message 'websocket.send' or 'websocket.close',"
" but got '%s'."
)
raise RuntimeError(msg % message_type)
else:
msg = "Unexpected ASGI message '%s', after sending 'websocket.close'."
raise RuntimeError(msg % message_type)
async def asgi_receive(self):
if not self.connect_sent:
self.connect_sent = True
return {"type": "websocket.connect"}
await self.handshake_completed_event.wait()
if self.closed_event.is_set():
# If client disconnected, use WebSocketServerProtocol.close_code property.
# If the handshake failed or the app closed before handshake completion,
# use 1006 Abnormal Closure.
return {"type": "websocket.disconnect", "code": self.close_code or 1006}
try:
data = await self.recv()
except websockets.ConnectionClosed as exc:
self.closed_event.set()
return {"type": "websocket.disconnect", "code": exc.code}
msg = {"type": "websocket.receive"}
if isinstance(data, str):
msg["text"] = data
else:
msg["bytes"] = data
return msg
| 35.743421 | 86 | 0.583655 |
4a19fcdab3c833dedbf14b6a37a96dbeaad0ad52
| 3,462 |
py
|
Python
|
test_broker.py
|
Rspangler89/Kwic-Trader
|
f47ebcd392e9b6cff2db2522508e00e84640a492
|
[
"MIT"
] | 1 |
2021-07-15T23:03:37.000Z
|
2021-07-15T23:03:37.000Z
|
test_broker.py
|
Rspangler89/Kwic-Trader
|
f47ebcd392e9b6cff2db2522508e00e84640a492
|
[
"MIT"
] | null | null | null |
test_broker.py
|
Rspangler89/Kwic-Trader
|
f47ebcd392e9b6cff2db2522508e00e84640a492
|
[
"MIT"
] | 1 |
2021-11-16T15:14:29.000Z
|
2021-11-16T15:14:29.000Z
|
"""
Kwic Trader trading system
Created by:
Robert (Alex) Spangler
Plant City, FL, USA
Spring of 2021
"""
"""
TODO: write more unit tests if neccessary
"""
"""
Tests for the portfolioDayChange() method
To set up pytest in pycharm follow the directions below:
1.) Go to File>Settings.
2.) Click on the dropdown menu "Project: Kwic Trader".
3.) Click on "Project Interpreter"
4.) Click on the + sign in the lower left hand corner.
5.) search for pytest in the searchbar.
6.) select pytest then click "Install Package" in the lower left hand corner.
7.) After it states that it's successfully installed, close the "Available Packages" window.
8.) In the settings window select the "Tools" dropdown menu.
9.) Click on "Python Integrated Tools"
10.) Under "Testing" go to the "Default Test Runner" dropdown menu and select "Pytest".
11.) Under "reStructuredText" go to the textbox labeled "Sphinx Working Directory"
and click on the folder icon in the right hand corner.
12.) Select "Kwic Trader" and click okay.
13.) Back in the Settings window click the "Apply" button in the lower right hand corner
then click okay.
14.) Now in the top menu bar go to Run>"Edit Configurations..."
15.) click the + sign in the upper left hand corner and go to the Pytest drop menu.
16.) Under Pytest select "pytest".
17.) Under the "Python Test" menu select "Pytest in Kwic Trader".
18.) Go to the textbox labeled "Working Directory" and click on the folder icon in the right hand corner
and select "Kwic Trader".
19.) In the same menu go to "target" and make sure "Script path" is selected.
20.) In the textbox under "target" click on the folder icon in the right hand corner and select "Kwic Trader".
21.) Click the "Apply" button in the lower right hand corner then click okay.
22.) To run Pytest go to the dropdown menu in the upper right hand corner next to the green ">" run button
and select "pytest in Kwic Trader" the run button.
for more info click the link to the youtube video below
youtube video: https://www.youtube.com/watch?v=WJKLjFwRHIY
"""
"""
pytests created for functions
in the broker module
"""
import broker
"""
Tests for the currentPositionValue() method
"""
"""
Tests for the portfolioValue() method
"""
# checks if return type for portfolioValue() is a dictionary
def test_portfolioValue_checkReturnType():
assert type(broker.portfolioValue()) is dict
"""
Tests for the dayChange() method
"""
# checks if return type for dayChange() is a dictionary
def test_dayChange_checkReturnType():
assert type(broker.dayChange()) is dict
"""
Tests for shareChange()
"""
def test_shareChange_checkReturnType():
assert type(broker.shareChange()) is dict
"""
Tests for the totalCost() method
"""
# checks if return type for totalCost() is a float
def test_totalCost_checkReturnType():
assert type(broker.totalCost()) is dict
"""
Tests for the portPctProfit() method
"""
# checks if return type for portPctProfit() is a float
def test_portfolioValue_checkReturnType():
assert type(broker.portPctProfit()) is float
"""
Tests for the portProfit() method
"""
# checks if return type for portfolioValue() is a float
def test_portProfit_checkReturnType():
assert type(broker.portProfit()) is float
"""
Tests for the portPctProfit() method
"""
# checks if return type for portPctProfit() is a float
def test_portPctProfit_checkReturnType():
assert type(broker.portPctProfit()) is float
| 27.259843 | 110 | 0.73628 |
4a19fd2b2985d4d9656278c2e7d1ba36e8276eed
| 9,621 |
py
|
Python
|
train_crf.py
|
bubblemans/Gun-Violence-Information-Retrieval-Using-BERT-as-Sequence-Tagging-Task
|
a8a199760c531286ae79fd9de541387a6c9fd5a7
|
[
"MIT"
] | null | null | null |
train_crf.py
|
bubblemans/Gun-Violence-Information-Retrieval-Using-BERT-as-Sequence-Tagging-Task
|
a8a199760c531286ae79fd9de541387a6c9fd5a7
|
[
"MIT"
] | null | null | null |
train_crf.py
|
bubblemans/Gun-Violence-Information-Retrieval-Using-BERT-as-Sequence-Tagging-Task
|
a8a199760c531286ae79fd9de541387a6c9fd5a7
|
[
"MIT"
] | null | null | null |
import argparse
import math
import logging
import pandas as pd
import tqdm
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from models import BERT_CRF_Linear, BERT_CRF_LSTM, BERT_CRF_BiLSTM
from dataset import GunViolenceDataset
from utils import *
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
def train(train_X, train_Y, learning_rate, cuda_available, epochs, model_type, is_balance, batch_size, max_seq_length, patience, min_delta, baseline):
training_set = GunViolenceDataset(train_X, train_Y)
training_generator = DataLoader(
training_set,
batch_size=batch_size,
shuffle=True,
)
iter_in_one_epoch = len(train_X) // batch_size
tokenizer = torch.hub.load(TRANSFORMER_PATH, 'tokenizer', 'bert-base-cased') # cased!
model = None
if model_type == 'LSTM':
model = BERT_CRF_LSTM(3)
elif model_type == 'BiLSTM':
model = BERT_CRF_BiLSTM(3)
else:
model = BERT_CRF_Linear(3) # 3 different labels: B, I, O
if cuda_available:
model.to('cuda') # move data onto GPU
model.train()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
losses = []
num_no_improve = 0
best_loss = None
stopping_epoch = 0
for epoch in range(1, epochs + 1):
loss = 0
with tqdm.tqdm(training_generator, unit="batch") as tepoch:
for i, (train_x, train_y) in enumerate(tepoch):
tepoch.set_description(f"Epoch {epoch}")
# prepare model input
tokens, labels = convert_examples_to_features(train_x, train_y, tokenizer, max_seq_length)
indexed_tokens = [tokenizer.convert_tokens_to_ids(token) for token in tokens]
segments_ids = [[0] * len(indexed_token) for indexed_token in indexed_tokens]
if cuda_available:
segments_tensors = torch.tensor(segments_ids).to('cuda')
tokens_tensor = torch.tensor(indexed_tokens).to('cuda')
labels = torch.tensor(labels).to('cuda')
else:
segments_tensors = torch.tensor(segments_ids)
tokens_tensor = torch.tensor(indexed_tokens)
labels = torch.tensor(labels)
# forward pass
y_pred, logits, loss = model(tokens_tensor, segments_tensors, labels)
losses.append((epoch + i / iter_in_one_epoch, loss.item()))
# display loss
tepoch.set_postfix(loss="{:.4f}".format(loss.item()))
# zero out gradients
optimizer.zero_grad()
# backward pass
loss.backward()
# update parameters
optimizer.step()
if not best_loss:
best_loss = loss
elif loss <= best_loss + min_delta:
best_loss = loss
num_no_improve += 1
elif loss < baseline:
num_no_improve += 1
if num_no_improve > patience:
stopping_epoch = epoch
logging.info('Early Stop on epoch {} with the best loss {}'.format(stopping_epoch, best_loss))
break
torch.save(model, 'output/model')
return model, tokenizer, stopping_epoch
def evaluate(model, evaluate_X, evaluate_Y, tokenizer, cuda_available, batch_size, max_seq_length, model_type, lr, epochs):
def _get_prediction(normalized_probs):
# classify B, I, O based on probabilities
labels = []
for sample_prob in normalized_probs:
max_prob = -math.inf
label = None
for i, prob in enumerate(sample_prob):
if max_prob < prob:
max_prob = prob
label = i
labels.append(label)
return labels
model.eval()
num_samples = len(evaluate_X)
evaluate_set = GunViolenceDataset(evaluate_X, evaluate_Y)
evaluate_generator = DataLoader(
evaluate_set,
batch_size=1,
shuffle=True,
)
num_of_tp = num_of_fn = num_of_fp = num_of_tn = 0
for i, (evaluate_x, evaluate_y) in enumerate(evaluate_generator):
tokens, labels = convert_examples_to_features(evaluate_x, evaluate_y, tokenizer, max_seq_length)
indexed_tokens = [tokenizer.convert_tokens_to_ids(token) for token in tokens]
segments_ids = [[0] * len(indexed_token) for indexed_token in indexed_tokens]
if cuda_available:
segments_tensors = torch.tensor(segments_ids).to('cuda')
tokens_tensor = torch.tensor(indexed_tokens).to('cuda')
labels = torch.tensor(labels).to('cuda')
else:
segments_tensors = torch.tensor(segments_ids)
tokens_tensor = torch.tensor(indexed_tokens)
labels = torch.tensor(labels)
with torch.no_grad():
y_pred, logits, loss = model(tokens_tensor, segments_tensors, labels)
normalized_probs = nn.functional.softmax(logits, dim=1)[0]
results = y_pred[0]
# get the real target
original = ''
for i, (x, y) in enumerate(zip(evaluate_x[0].split(), evaluate_y[0].split())):
if y[0] == 'B':
original = x + ' '
index = i
while index + 1 < len(evaluate_y[0].split()) and evaluate_y[0].split()[index + 1][0] == 'I':
original += '{} '.format(evaluate_x[0].split()[index + 1])
index += 1
break
original = original.strip()
probabilities = []
predictions = []
prediction = []
for token, tag, prob in zip(tokens[0], results, normalized_probs):
if tag == 0:
# tag == 'B'
probabilities.append(prob)
if len(prediction) != 0:
predictions.append(prediction)
prediction = []
prediction.append(token)
elif tag == 1:
# tag == 'I'
prediction.append(token)
if len(prediction) != 0:
predictions.append(prediction)
# one sentence might generate multiple targets, eg. shooters or victims
# we need to pick the most possible one, which is the one has the highest probability in 'B' tag
max_prob = -math.inf
max_prob_ind = 0
for i, prob in enumerate(probabilities):
if max_prob < prob[0]:
max_prob_ind = i
max_prob = prob[0]
# calculate true positive, false positive, true negative, false negative
result = ''
if len(predictions) != 0:
result = tokenizer.convert_tokens_to_string(predictions[max_prob_ind])
if result == original:
num_of_tp += 1
else:
num_of_fp += 1
else:
if original.strip() != '':
num_of_fn += 1
else:
num_of_tn += 1
accuracy = num_of_tp/num_samples if num_samples != 0 else 0
precision = num_of_tp/(num_of_tp + num_of_fp) if num_of_tp + num_of_fp != 0 else 0
recall = num_of_tp/(num_of_tp + num_of_fn) if num_of_tp + num_of_fn != 0 else 0
with open('victim/output/crf_{}_{}_{}_{}_{}.txt'.format(model_type, lr, epochs, batch_size, max_seq_length), 'w') as wf:
wf.write('tp: {}\n'.format(num_of_tp))
wf.write('tn: {}\n'.format(num_of_tn))
wf.write('fp: {}\n'.format(num_of_fp))
wf.write('fn: {}\n'.format(num_of_fn))
wf.write('total: {}\n'.format(num_samples))
wf.write('correct: {}\n'.format(num_of_tp))
wf.write('accuracy: {}\n'.format(accuracy))
wf.write('precision: {}\n'.format(precision))
wf.write('recall: {}\n'.format(recall))
f1 = 2 * precision * recall / (precision + recall) if precision + recall != 0 else 0
wf.write('F1: {}\n'.format(f1))
def get_data(filename):
df = pd.read_csv(filename)
texts = df['texts'].tolist()
labels = df['labels'].tolist()
return texts, labels
if __name__ == '__main__':
args = handle_arguments()
model = None
tokenizer = None
if not args.model:
train_X, train_Y = get_data(args.input_dir + '/train.csv', args.is_balance)
dev_X, dev_Y = get_data(args.input_dir + '/dev.csv', args.is_balance)
train_X += dev_X
train_Y += dev_Y
model, tokenizer, stopping_epoch = train(
train_X,
train_Y,
args.lr,
args.cuda_available,
args.epochs,
args.model_type,
args.is_balance,
args.batch_size,
args.max_seq_length,
args.patience,
args.min_delta,
args.baseline
)
else:
model = torch.load(args.model)
tokenizer = torch.hub.load(TRANSFORMER_PATH, 'tokenizer', 'bert-base-cased') # cased!
test_X, test_Y = get_data(args.input_dir + '/test.csv')
eval_results = evaluate(
model,
test_X,
test_Y,
tokenizer,
args.cuda_available,
args.batch_size,
args.max_seq_length,
args.model_type,
args.lr,
stopping_epoch,
args.output_dir
)
| 35.899254 | 150 | 0.566989 |
4a19fe1ff06dcc6437a03413d480b7f6b1fab9f9
| 3,576 |
py
|
Python
|
gary/potential/tests/test_composite.py
|
adrn/gary-old
|
065b371534baa03deeb860893640068d90ba5881
|
[
"MIT"
] | null | null | null |
gary/potential/tests/test_composite.py
|
adrn/gary-old
|
065b371534baa03deeb860893640068d90ba5881
|
[
"MIT"
] | null | null | null |
gary/potential/tests/test_composite.py
|
adrn/gary-old
|
065b371534baa03deeb860893640068d90ba5881
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import, unicode_literals, division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Third party
import astropy.units as u
import pytest
import numpy as np
# This project
from ...integrate import LeapfrogIntegrator, DOPRI853Integrator
from ..core import *
from ..builtin import *
from ...units import solarsystem
class CompositeHelper(object):
def setup(self):
self.units = solarsystem
self.p1 = KeplerPotential(m=1.*u.Msun, units=self.units)
self.p2 = HernquistPotential(m=0.5*u.Msun, c=0.1*u.au,
units=self.units)
def test_shit(self):
potential = self.Cls(one=self.p1, two=self.p2)
q = np.ascontiguousarray(np.array([[1.1,0,0]]).T)
print("val", potential.value(q))
q = np.ascontiguousarray(np.array([[1.1,0,0]]).T)
print("grad", potential.gradient(q))
def test_composite_create(self):
potential = self.Cls()
# Add a point mass with same unit system
potential["one"] = KeplerPotential(units=self.units, m=1.)
with pytest.raises(TypeError):
potential["two"] = "derp"
assert "one" in potential.parameters
assert "m" in potential.parameters["one"]
with pytest.raises(TypeError):
potential.parameters["m"] = "derp"
def test_plot_composite(self):
# TODO: do image comparison or something to compare?
potential = self.Cls()
# Add a kepler potential and a harmonic oscillator
potential["one"] = self.p1
potential["two"] = self.p2
grid = np.linspace(-5.,5)
fig = potential.plot_contours(grid=(grid,0.,0.))
# fig.savefig(os.path.join(plot_path, "composite_kepler_sho_1d.png"))
fig = potential.plot_contours(grid=(grid,grid,0.))
# fig.savefig(os.path.join(plot_path, "composite_kepler_sho_2d.png"))
def test_integrate(self):
potential = self.Cls()
potential["one"] = self.p1
potential["two"] = self.p2
for Integrator in [DOPRI853Integrator, LeapfrogIntegrator]:
w_cy = potential.integrate_orbit([1.,0,0, 0,2*np.pi,0], dt=0.01, n_steps=1000,
Integrator=Integrator, cython_if_possible=True)
w_py = potential.integrate_orbit([1.,0,0, 0,2*np.pi,0], dt=0.01, n_steps=1000,
Integrator=Integrator, cython_if_possible=False)
for i in range(3):
np.testing.assert_allclose(w_cy.pos[i].value, w_cy.pos[i].value)
# ------------------------------------------------------------------------
class TestComposite(CompositeHelper):
Cls = CompositePotential
class TestCComposite(CompositeHelper):
Cls = CCompositePotential
def test_failures():
p = CCompositePotential()
p['derp'] = KeplerPotential(m=1.*u.Msun, units=solarsystem)
with pytest.raises(ValueError):
p['jnsdfn'] = HenonHeilesPotential(units=solarsystem)
def test_lock():
p = CompositePotential()
p['derp'] = KeplerPotential(m=1.*u.Msun, units=solarsystem)
p.lock = True
with pytest.raises(ValueError): # try adding potential after lock
p['herp'] = KeplerPotential(m=2.*u.Msun, units=solarsystem)
p = CCompositePotential()
p['derp'] = KeplerPotential(m=1.*u.Msun, units=solarsystem)
p.lock = True
with pytest.raises(ValueError): # try adding potential after lock
p['herp'] = KeplerPotential(m=2.*u.Msun, units=solarsystem)
| 34.057143 | 93 | 0.622483 |
4a19fe9052a7d8d355965b1e7654a665ae5d2036
| 18,757 |
py
|
Python
|
django/db/models/fields/files.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | 1 |
2019-02-10T19:33:27.000Z
|
2019-02-10T19:33:27.000Z
|
django/db/models/fields/files.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | null | null | null |
django/db/models/fields/files.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import os
from django import forms
from django.db.models.fields import Field
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import File
from django.core.files.storage import default_storage
from django.core.files.images import ImageFile
from django.db.models import signals
from django.utils.encoding import force_str, force_text
from django.utils import six
from django.utils.translation import ugettext_lazy as _
class FieldFile(File):
def __init__(self, instance, field, name):
super(FieldFile, self).__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
self.file.open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = content.size
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
if not self:
return
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
def _get_closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
closed = property(_get_closed)
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor(object):
"""
The descriptor for the file attribute on the model instance. Returns a
FieldFile when accessed so you can do stuff like::
>>> from myapp.models import MyModel
>>> instance = MyModel.objects.get(pk=1)
>>> instance.file.size
Assigns a file object on assignment so you can do::
>>> with open('/tmp/hello.world', 'r') as f:
... instance.file = File(f)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that users can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
file = instance.__dict__[self.field.name]
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, six.string_types) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to the. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = _("File")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
self._primary_key_set_explicitly = 'primary_key' in kwargs
self._unique_set_explicitly = 'unique' in kwargs
self.storage = storage or default_storage
self.upload_to = upload_to
if callable(upload_to):
self.generate_filename = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FileField, self).check(**kwargs)
errors.extend(self._check_unique())
errors.extend(self._check_primary_key())
return errors
def _check_unique(self):
if self._unique_set_explicitly:
return [
checks.Error(
'"unique" is not a valid argument for %s.' % self.__class__.__name__,
hint=None,
obj=self,
id='E049',
)
]
else:
return []
def _check_primary_key(self):
if self._primary_key_set_explicitly:
return [
checks.Error(
'"primary_key" is not a valid argument for %s.' % self.__class__.__name__,
hint=None,
obj=self,
id='E050',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(FileField, self).deconstruct()
if kwargs.get("max_length", None) != 100:
kwargs["max_length"] = 100
else:
del kwargs["max_length"]
kwargs['upload_to'] = self.upload_to
if self.storage is not default_storage:
kwargs['storage'] = self.storage
return name, path, args, kwargs
def get_internal_type(self):
return "FileField"
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'name'):
value = value.name
return super(FileField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
value = super(FileField, self).get_prep_value(value)
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return six.text_type(value)
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
file = super(FileField, self).pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file, save=False)
return file
def contribute_to_class(self, cls, name):
super(FileField, self).contribute_to_class(cls, name)
setattr(cls, self.name, self.descriptor_class(self))
def get_directory_name(self):
return os.path.normpath(force_text(datetime.datetime.now().strftime(force_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
return os.path.join(self.get_directory_name(), self.get_filename(filename))
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to unicode and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(ImageFileDescriptor, self).__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = _("Image")
def __init__(self, verbose_name=None, name=None, width_field=None,
height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
super(ImageField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(ImageField, self).check(**kwargs)
errors.extend(self._check_image_library_installed())
return errors
def _check_image_library_installed(self):
try:
from django.utils.image import Image # NOQA
except ImproperlyConfigured:
return [
checks.Error(
'To use ImageFields, Pillow must be installed.',
hint=('Get Pillow at https://pypi.python.org/pypi/Pillow '
'or run command "pip install pillow".'),
obj=self,
id='E032',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ImageField, self).deconstruct()
if self.width_field:
kwargs['width_field'] = self.width_field
if self.height_field:
kwargs['height_field'] = self.height_field
return name, path, args, kwargs
def contribute_to_class(self, cls, name):
super(ImageField, self).contribute_to_class(cls, name)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
# Only run post-initialization dimension update on non-abstract models
if not cls._meta.abstract:
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Updates field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have have dimension fields.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field))
or (self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
| 38.914938 | 104 | 0.643493 |
4a19fea2096b17cceccb873570ccb19193a37568
| 57 |
py
|
Python
|
quickstats/forms.py
|
kfdm/django-simplestats
|
f3aa26329af56cbb910d86d450ea129a54b271ef
|
[
"MIT"
] | 1 |
2019-06-07T04:55:55.000Z
|
2019-06-07T04:55:55.000Z
|
quickstats/forms.py
|
kfdm/django-simplestats
|
f3aa26329af56cbb910d86d450ea129a54b271ef
|
[
"MIT"
] | 31 |
2016-02-02T22:19:09.000Z
|
2018-05-08T09:52:54.000Z
|
quickstats/forms.py
|
kfdm/django-simplestats
|
f3aa26329af56cbb910d86d450ea129a54b271ef
|
[
"MIT"
] | null | null | null |
from . import models
from django.forms import ModelForm
| 14.25 | 34 | 0.807018 |
4a19ffa0c992068c6b460091a13358f1b83e072a
| 47,339 |
py
|
Python
|
tests/plugins_test.py
|
jshwi/pyaud
|
c3268e56b483661fb8baa6da8c4c96420f0f74b3
|
[
"MIT"
] | 2 |
2021-02-03T01:18:17.000Z
|
2021-06-28T07:24:29.000Z
|
tests/plugins_test.py
|
jshwi/pyaud
|
c3268e56b483661fb8baa6da8c4c96420f0f74b3
|
[
"MIT"
] | 31 |
2021-11-11T15:27:14.000Z
|
2022-03-18T14:44:51.000Z
|
tests/plugins_test.py
|
jshwi/pyaud
|
c3268e56b483661fb8baa6da8c4c96420f0f74b3
|
[
"MIT"
] | null | null | null |
"""
tests.plugins_test
==================
"""
# pylint: disable=too-many-lines,too-many-arguments,cell-var-from-loop
# pylint: disable=too-few-public-methods,unused-variable
import datetime
import os
import random
from pathlib import Path
from typing import Any, List, Tuple
import pytest
import pyaud
import pyaud_plugins
from . import (
CONFPY,
DOCS,
FILES,
INIT,
INITIAL_COMMIT,
NO_ISSUES,
PIPFILE_LOCK,
PUSHING_SKIPPED,
PYAUD_FILES_POPULATE,
PYAUD_PLUGINS_PLUGINS,
README,
REPO,
SP_CALL,
SP_OPEN_PROC,
SP_STDOUT,
files,
)
from .files import EXPECTED_NESTED_TOC
def test_no_files_found(main: Any, nocolorcapsys: Any) -> None:
"""Test the correct output is produced when no file exists.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
main("typecheck")
assert nocolorcapsys.stdout().strip() == "No files found"
@pytest.mark.parametrize(
"contents,expected",
[
(["created"], "created ``whitelist.py``"),
(["", "updated"], "updated ``whitelist.py``"),
(
["up-to-date", "up-to-date"],
"``whitelist.py`` is already up to date",
),
],
ids=("created", "updated", "up_to_date"),
)
def test_write_command(
main: Any,
monkeypatch: Any,
nocolorcapsys: Any,
contents: List[str],
expected: str,
) -> None:
"""Test the ``@write_command`` decorator.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
:param monkeypatch: Mock patch environment and attributes.
:param contents: Content to write to file.
:param expected: Expected output.
"""
for content in contents:
def mock_write_whitelist(*_: Any, **__: Any) -> None:
with open(
Path.cwd() / os.environ["PYAUD_WHITELIST"],
"w",
encoding="utf-8",
) as fout:
fout.write(content)
monkeypatch.setattr(
"pyaud_plugins.modules.Whitelist.write", mock_write_whitelist
)
main("whitelist")
assert expected in nocolorcapsys.stdout()
def test_make_audit_error(
main: Any, monkeypatch: Any, nocolorcapsys: Any
) -> None:
"""Test errors are handled correctly when running ``pyaud audit``.
:param main: Patch package entry point.
:param monkeypatch: Mock patch environment and attributes.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
monkeypatch.setattr(SP_OPEN_PROC, lambda *_, **__: 1)
pyaud.files.append(Path.cwd() / FILES)
monkeypatch.setattr(PYAUD_FILES_POPULATE, lambda: None)
with pytest.raises(pyaud.exceptions.AuditError):
main("audit")
assert nocolorcapsys.stdout().strip() == "pyaud format"
def test_call_coverage_xml(
main: Any, monkeypatch: Any, patch_sp_print_called: Any, nocolorcapsys: Any
) -> None:
"""Test ``coverage xml`` is called after successful test run.
:param main: Patch package entry point.
:param monkeypatch: Mock patch environment and
attributes.
:param patch_sp_print_called: Patch ``Subprocess.call`` to only
announce what is called.
:param nocolorcapsys: Capture system output while
stripping ANSI color codes.
"""
patch_sp_print_called()
mocked_plugins = pyaud.plugins.mapping()
mocked_plugins["tests"] = lambda *_, **__: 0 # type: ignore
monkeypatch.setattr(PYAUD_PLUGINS_PLUGINS, mocked_plugins)
monkeypatch.setattr(PYAUD_FILES_POPULATE, lambda: None)
main("coverage")
assert nocolorcapsys.stdout().strip() == "<Subprocess (coverage)> xml"
def test_make_deploy_all(
main: Any, monkeypatch: Any, nocolorcapsys: Any, call_status: Any
) -> None:
"""Test the correct commands are run when running ``pyaud deploy``.
Patch functions with ``call_status`` to remove functionality from
function and only return a zero exit-status. ``make_deploy_*``
functions should still be able to print what functions are being run
as announced to the console in cyan.
:param main: Patch package entry point.
:param monkeypatch: Mock patch environment and attributes.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
:param call_status: Patch function to not do anything.
Optionally returns non-zero exit code (0 by
default).
"""
modules = "deploy-cov", "deploy-docs"
mocked_plugins = pyaud.plugins.mapping()
for module in modules:
mocked_plugins[module] = call_status(module)
monkeypatch.setattr(PYAUD_PLUGINS_PLUGINS, mocked_plugins)
main("deploy")
out = nocolorcapsys.stdout().splitlines()
for module in modules:
assert f"{pyaud.__name__} {module}" in out
def test_make_deploy_all_fail(
main: Any, call_status: Any, monkeypatch: Any, nocolorcapsys: Any
) -> None:
"""Test ``pyaud deploy`` fails correctly when encountering an error.
:param main: Patch package entry point.
:param call_status: Patch function to return specific exit-code.
:param monkeypatch: Mock patch environment and attributes.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
deploy_module = "deploy-docs"
mock_plugins = pyaud.plugins.mapping()
mock_plugins[deploy_module] = call_status(deploy_module, 1)
monkeypatch.setattr(PYAUD_PLUGINS_PLUGINS, mock_plugins)
main("deploy")
out = nocolorcapsys.stdout().splitlines()
assert f"{pyaud.__name__} {deploy_module}" in out
def test_make_docs_no_docs(main: Any, nocolorcapsys: Any) -> None:
"""Test correct message is produced.
Test when running ``pyaud docs`` when no docs are present.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
Path(Path.cwd() / FILES).touch()
main("docs")
assert nocolorcapsys.stdout().strip() == "No docs found"
def test_suppress(
main: Any, monkeypatch: Any, nocolorcapsys: Any, make_tree: Any
) -> None:
"""Test that audit proceeds through errors with ``--suppress``.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
:param monkeypatch: Mock patch environment and attributes.
:param make_tree: Create directory tree from dict mapping.
"""
make_tree(Path.cwd(), {FILES: None, "docs": {CONFPY: None}})
pyaud.files.append(Path.cwd() / FILES)
fix_modules = 6
monkeypatch.setattr(SP_OPEN_PROC, lambda *_, **__: 1)
monkeypatch.setattr(PYAUD_FILES_POPULATE, lambda: None)
main("audit", "--suppress")
assert (
len(
[
i
for i in nocolorcapsys.stderr().splitlines()
if "Failed: returned non-zero exit status" in i
]
)
== fix_modules
)
def test_coverage_no_tests(main: Any, nocolorcapsys: Any) -> None:
"""Test the correct output is produced when no tests exists.
Ensure message is displayed if ``pytest`` could not find a valid
test folder.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
main("coverage")
assert nocolorcapsys.stdout().strip() == (
"No tests found\nNo coverage to report"
)
def test_make_docs_toc_fail(
main: Any, monkeypatch: Any, make_tree: Any
) -> None:
"""Test that error message is produced when ``make_toc`` fails.
Test process stops when ``make_toc`` fails before running the main
``make_docs`` process.
:param main: Patch package entry point.
:param monkeypatch: Mock patch environment and attributes.
:param make_tree: Create directory tree from dict mapping.
"""
make_tree(Path.cwd(), {"docs": {CONFPY: None}})
monkeypatch.setattr(SP_OPEN_PROC, lambda *_, **__: 1)
monkeypatch.setattr(PYAUD_FILES_POPULATE, lambda: None)
with pytest.raises(pyaud.exceptions.AuditError) as err:
main("docs")
assert str(err.value) == "pyaud docs did not pass all checks"
def test_make_docs_rm_cache(
main: Any, monkeypatch: Any, call_status: Any, make_tree: Any
) -> None:
"""Test ``make_docs`` removes all builds before starting a new one.
:param main: Patch package entry point.
:param monkeypatch: Mock patch environment and attributes.
:param call_status: Patch function to return specific exit-code.
:param make_tree: Create directory tree from dict mapping.
"""
builddir = Path.cwd() / os.environ["BUILDDIR"]
readme = Path.cwd() / README
# disable call to ``Subprocess`` to only create ./docs/_build
# directory so tests can continue
def _call(*_: Any, **__: Any) -> int:
builddir.mkdir(parents=True)
return 0
# patch ``make_toc`` and ``Subprocess.call``
mocked_plugins = pyaud.plugins.mapping()
mocked_plugins["toc"] = call_status("toc")
monkeypatch.setattr(PYAUD_PLUGINS_PLUGINS, mocked_plugins)
monkeypatch.setattr(SP_CALL, _call)
make_tree(Path.cwd(), {"docs": {CONFPY: None, "readme.rst": None}})
with open(readme, "w", encoding="utf-8") as fout:
fout.write(files.README_RST)
builddir.mkdir(parents=True)
Path(builddir / "marker").touch()
freeze_docs_build = builddir.iterdir()
# to test creation of README.rst content needs to be written to file
with open(readme, "w", encoding="utf-8") as fout:
fout.write(files.README_RST)
monkeypatch.setattr(PYAUD_FILES_POPULATE, lambda: None)
main("docs")
assert freeze_docs_build != builddir.iterdir()
def test_make_files(
main: Any, monkeypatch: Any, call_status: Any, nocolorcapsys: Any
) -> None:
"""Test correct commands are executed when running ``make_files``.
:param main: Patch package entry point.
:param monkeypatch: Mock patch environment and attributes.
:param call_status: Patch function to return specific exit-code.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
file_funcs = "requirements", "toc", "whitelist"
mocked_modules = pyaud.plugins.mapping()
for file_func in file_funcs:
mocked_modules[file_func] = call_status(file_func)
monkeypatch.setattr(PYAUD_PLUGINS_PLUGINS, mocked_modules)
main("files")
assert (
nocolorcapsys.stdout()
== "\npyaud requirements\n\npyaud toc\n\npyaud whitelist\n"
)
def test_make_format(main: Any) -> None:
"""Test ``make_format`` when successful and when it fails.
:param main: Patch package entry point.
"""
file = Path.cwd() / FILES
with open(file, "w", encoding="utf-8") as fout:
fout.write(files.UNFORMATTED)
pyaud.files.append(file)
with pytest.raises(pyaud.exceptions.AuditError):
main("format")
def test_pipfile2req_commands(
main: Any, patch_sp_print_called: Any, nocolorcapsys: Any
) -> None:
"""Test that the correct commands are executed.
:param main: Patch package entry point.
:param patch_sp_print_called: Patch ``Subprocess.call`` to only
announce what is called.
:param nocolorcapsys: Capture system output while
stripping ANSI color codes.
"""
requirements = Path.cwd() / os.environ["PYAUD_REQUIREMENTS"]
pipfile_lock = Path.cwd() / PIPFILE_LOCK
with open(pipfile_lock, "w", encoding="utf-8") as fout:
fout.write(files.PIPFILE_LOCK)
patch_sp_print_called()
main("requirements")
out = nocolorcapsys.stdout()
assert all(
e in out
for e in (
f"Updating ``{requirements}``",
f"<Subprocess (pipfile2req)> {pipfile_lock}",
f"<Subprocess (pipfile2req)> {pipfile_lock} --dev",
f"created ``{requirements.name}``",
)
)
@pytest.mark.parametrize(
"args,add,first,last",
[
([], [], "pyaud format", "pyaud docs"),
(["--clean"], ["clean"], "pyaud clean", "pyaud docs"),
(["--deploy"], ["deploy"], "pyaud format", "pyaud deploy"),
(
["--clean", "--deploy"],
["clean", "deploy"],
"pyaud clean",
"pyaud deploy",
),
],
ids=["no_args", "clean", "deploy", "clean_and_deploy"],
)
def test_audit_modules(
monkeypatch: Any,
nocolorcapsys: Any,
main: Any,
call_status: Any,
args: List[str],
add: List[str],
first: str,
last: str,
) -> None:
"""Test that the correct functions are called with ``make_audit``.
Mock all functions in ``MODULES`` to do nothing so the test can
confirm that all the functions that are meant to be run are run with
the output that is displayed to the console in cyan. Confirm what
the first and last functions being run are with the parametrized
values.
:param monkeypatch: Mock patch environment and attributes.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
:param main: Patch package entry point.
:param call_status: Patch function to not do anything.
Optionally returns non-zero exit code (0 by
default).
:param args: Arguments for ``pyaud audit``.
:param add: Function to add to the ``audit_modules``
list
:param first: Expected first function executed.
:param last: Expected last function executed.
"""
mocked_modules = pyaud.plugins.mapping()
modules = list(pyaud.config.DEFAULT_CONFIG["audit"]["modules"])
modules.extend(add)
for module in modules:
mocked_modules[module] = call_status(module)
monkeypatch.setattr(PYAUD_PLUGINS_PLUGINS, mocked_modules)
main("audit", *args)
output = [i for i in nocolorcapsys.stdout().splitlines() if i != ""]
assert all(f"pyaud {i}" in output for i in modules)
assert output[0] == first
assert output[-1] == last
@pytest.mark.parametrize(
"exclude,expected",
[
([], ""),
(
[".env_diff", "instance_diff", ".cache_diff"],
"Removing .cache_diff\n"
"Removing .env_diff\n"
"Removing instance_diff\n",
),
],
ids=["no-exclude", "exclude"],
)
def test_clean_exclude(
main: Any, nocolorcapsys: Any, exclude: List[str], expected: str
) -> None:
"""Test clean with and without exclude parameters.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
:param exclude: Files to exclude from ``git clean``.
:param expected: Expected output from ``pyaud clean``.
"""
Path(Path.cwd() / README).touch()
pyaud.git.init(devnull=True) # type: ignore
pyaud.git.add(".") # type: ignore
pyaud.git.commit("-m", "Initial commit", devnull=True) # type: ignore
for exclusion in exclude:
Path(Path.cwd() / exclusion).touch()
main("clean")
assert nocolorcapsys.stdout() == expected
def test_readme_replace() -> None:
"""Test that ``LineSwitch`` properly edits a file."""
path = Path.cwd() / README
def _test_file_index(title: str, underline: str) -> None:
with open(path, encoding="utf-8") as fin:
lines = fin.read().splitlines()
assert lines[0] == title
assert lines[1] == len(underline) * "="
repo = "repo"
readme = "README"
repo_underline = len(repo) * "="
readme_underline = len(readme) * "="
with open(path, "w", encoding="utf-8") as fout:
fout.write(f"{repo}\n{repo_underline}\n")
_test_file_index(repo, repo_underline)
with pyaud_plugins.modules.LineSwitch(
path, {0: readme, 1: readme_underline}
):
_test_file_index(readme, readme_underline)
_test_file_index(repo, repo_underline)
def test_append_whitelist(
main: Any, nocolorcapsys: Any, patch_sp_print_called: Any
) -> None:
"""Test that whitelist file argument is appended ``vulture`` call.
Test for when whitelist.py exists and is not appended if it does
not, thus avoiding an error.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while
stripping ANSI
:param patch_sp_print_called: Patch ``Subprocess.call`` to only
announce what is called.
"""
project_dir = Path.cwd()
whitelist = project_dir / os.environ["PYAUD_WHITELIST"]
Path(project_dir / FILES).touch()
whitelist.touch()
pyaud.git.add(".") # type: ignore
pyaud.files.populate()
patch_sp_print_called()
main("unused")
assert str(whitelist) in nocolorcapsys.stdout()
def test_mypy_expected(
main: Any, patch_sp_print_called: Any, nocolorcapsys: Any
) -> None:
"""Test that the ``mypy`` command is correctly called.
:param main: Patch package entry point.
:param patch_sp_print_called: Patch ``Subprocess.call`` to only
announce what is called.
:param nocolorcapsys: Capture system output while
stripping ANSI color codes.
"""
path = Path(os.getcwd(), FILES)
pyaud.files.append(path)
patch_sp_print_called()
main("typecheck")
assert (
f"<Subprocess (mypy)> --ignore-missing-imports {path}"
in nocolorcapsys.stdout()
)
@pytest.mark.parametrize(
"relpath,expected",
[
(Path("tests"), "No tests found"),
(Path("tests", "test.py"), "No tests found"),
(Path("tests", "filename.py"), "No tests found"),
(Path("tests", "_test.py"), "<Subprocess (pytest)>"),
(Path("tests", "test_.py"), "<Subprocess (pytest)>"),
(Path("tests", "three_test.py"), "<Subprocess (pytest)>"),
(Path("tests", "test_four.py"), "<Subprocess (pytest)>"),
],
ids=(
"tests",
"tests/test.py",
"tests/filename.py",
"tests/test_.py",
"tests/_test.py",
"tests/three_test.py",
"tests/test_four.py",
),
)
def test_pytest_is_tests(
monkeypatch: Any,
main: Any,
nocolorcapsys: Any,
patch_sp_print_called: Any,
relpath: Path,
expected: str,
) -> None:
"""Test that ``pytest`` is correctly called.
Test that ``pytest`` is not called if:
- there is a tests dir without tests
- incorrect names within tests dir
- no tests at all within tests dir.
:param monkeypatch: Mock patch environment and
attributes.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while
stripping ANSI color codes.
mapping.
:param patch_sp_print_called: Patch ``Subprocess.call`` to only
announce what is called.
:param relpath: Relative path to file.
:param expected: Expected stdout.
"""
pyaud.files.append(Path.cwd() / relpath)
monkeypatch.setattr(PYAUD_FILES_POPULATE, lambda: None)
patch_sp_print_called()
main("tests")
assert nocolorcapsys.stdout().strip() == expected
def test_make_toc(
monkeypatch: Any, main: Any, patch_sp_print_called: Any, make_tree: Any
) -> None:
"""Test that the default toc file is edited correctly.
Ensure additional files generated by ``sphinx-api`` doc are removed.
:param monkeypatch: Mock patch environment and
attributes.
:param main: Patch package entry point.
:param patch_sp_print_called: Patch ``Subprocess.call`` to only
announce what is called.
:param make_tree: Create directory tree from dict
mapping.
"""
project_dir = Path.cwd()
modules = "modules.rst"
path = project_dir / DOCS / f"{REPO}.rst"
make_tree(project_dir, {"docs": {modules: None, CONFPY: None}})
with open(path, "w", encoding="utf-8") as fout:
assert fout.write(files.DEFAULT_TOC)
monkeypatch.setattr(PYAUD_FILES_POPULATE, lambda: None)
patch_sp_print_called()
main("toc")
with open(path, encoding="utf-8") as fin:
assert fin.read() == files.ALTERED_TOC
assert not Path(project_dir / DOCS / modules).is_file()
def test_make_requirements(
monkeypatch: Any, main: Any, patch_sp_output: Any, nocolorcapsys: Any
) -> None:
"""Test that requirements.txt file is correctly edited.
Tested for use with ``pipfile2req``.
:param monkeypatch: Mock patch environment and attributes.
:param main: Patch package entry point.
:param patch_sp_output: Patch ``Subprocess`` so that ``call`` sends
expected stdout out to self.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
path = Path.cwd() / os.environ["PYAUD_REQUIREMENTS"]
with open(Path.cwd() / PIPFILE_LOCK, "w", encoding="utf-8") as fout:
fout.write(files.PIPFILE_LOCK)
patch_sp_output(files.PIPFILE2REQ_PROD, files.PIPFILE2REQ_DEV)
monkeypatch.setattr(PYAUD_FILES_POPULATE, lambda: None)
main("requirements")
assert nocolorcapsys.stdout() == (
f"Updating ``{path}``\ncreated ``{path.name}``\n"
)
with open(path, encoding="utf-8") as fin:
assert fin.read() == files.REQUIREMENTS
def test_make_whitelist(
monkeypatch: Any, nocolorcapsys: Any, make_tree: Any
) -> None:
"""Test a whitelist.py file is created properly.
Test for when piping data from ``vulture --make-whitelist``.
:param monkeypatch: Mock patch environment and attributes.
:param nocolorcapsys: Capture system output while stripping
ANSI color codes.
:param make_tree: Create directory tree from dict mapping.
"""
project_dir = Path.cwd()
whitelist = project_dir / os.environ["PYAUD_WHITELIST"]
make_tree(
project_dir,
{
"tests": {"conftest.py": None, FILES: None},
"pyaud": {"src": {"__init__.py": None, "modules.py": None}},
},
)
pyaud.git.init(devnull=True) # type: ignore
pyaud.git.add(".") # type: ignore
pyaud.files.populate()
monkeypatch.setattr(
"pyaud._utils.Subprocess.stdout",
lambda *_, **__: files.Whitelist.be8a443,
)
pyaud.plugins.get("whitelist")()
assert nocolorcapsys.stdout() == (
f"Updating ``{whitelist}``\ncreated ``{whitelist.name}``\n"
)
with open(whitelist, encoding="utf-8") as fin:
assert fin.read() == files.Whitelist.be8a443_all()
def test_pylint_colorized(main: Any, capsys: Any) -> None:
"""Test that color codes are produced with ``process.PIPE``.
Test ``pylint --output-format=colorized``. If ``colorama`` is
installed and a process calls ``colorama.init()`` a process pipe
will be stripped. Using environment variable ``PYCHARM_HOSTED`` for
now as a workaround as this voids this action.
:param main: Patch package entry point.
:param capsys: Capture sys output.
"""
path = Path.cwd() / FILES
with open(path, "w", encoding="utf-8") as fout:
fout.write("import this_package_does_not_exist")
pyaud.files.append(path)
main("lint", "--suppress")
output = capsys.readouterr()[0]
assert all(
i in output
for i in ["\x1b[7;33m", "\x1b[0m", "\x1b[1m", "\x1b[1;31m", "\x1b[35m"]
)
def test_isort_imports(main: Any, nocolorcapsys: Any) -> None:
"""Test isort properly sorts file imports.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
path = Path.cwd() / FILES
with open(path, "w", encoding="utf-8") as fout:
fout.write(files.IMPORTS_UNSORTED)
pyaud.files.append(path)
main("imports", "--fix")
with open(path, encoding="utf-8") as fin:
assert (
files.IMPORTS_SORTED.splitlines()[1:]
== fin.read().splitlines()[:20]
)
out = nocolorcapsys.stdout()
assert all(i in out for i in (f"Fixed {path.name}", NO_ISSUES))
main("imports")
def test_readme(main: Any, nocolorcapsys: Any) -> None:
"""Test standard README and return values.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
main("readme")
assert (
nocolorcapsys.stdout().strip() == "No README.rst found in project root"
)
with open(Path.cwd() / README, "w", encoding="utf-8") as fout:
fout.write(files.CODE_BLOCK_TEMPLATE)
main("readme")
assert (
"\n".join([i.strip() for i in nocolorcapsys.stdout().splitlines()])
== files.CODE_BLOCK_EXPECTED
)
@pytest.mark.parametrize(
"module,content",
[
("format", files.UNFORMATTED),
("imports", files.IMPORTS_UNSORTED),
("format-str", files.FORMAT_STR_FUNCS_PRE),
("format-docs", files.DOCFORMATTER_EXAMPLE),
],
ids=["format", "imports", "format-str", "format-docs"],
)
def test_py_audit_error(
main: Any, make_tree: Any, module: str, content: str
) -> None:
"""Test ``AuditError`` message.
:param main: Patch package entry point.
:param make_tree: Create directory tree from dict mapping.
:param module: [<module>].__name__.
:param content: Content to write to file.
"""
project_dir = Path.cwd()
file = project_dir / FILES
make_tree(project_dir, {"tests": {"_test.py": None}, REPO: {INIT: None}})
with open(file, "w", encoding="utf-8") as fout:
fout.write(content)
pyaud.git.add(".") # type: ignore
pyaud.files.populate()
with pytest.raises(pyaud.exceptions.AuditError) as err:
main(module)
stderr = str(err.value)
assert stderr == f"pyaud {module} did not pass all checks"
assert "Path" not in stderr
@pytest.mark.usefixtures("init_remote")
def test_deploy_not_master(
main: Any, monkeypatch: Any, nocolorcapsys: Any
) -> None:
"""Test that deployment is skipped when branch is not ``master``.
:param main: Patch package entry point.
:param monkeypatch: Mock patch environment and attributes.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
monkeypatch.setattr("pyaud.branch", lambda: "not_master")
main("deploy-docs")
out = [i.strip() for i in nocolorcapsys.stdout().splitlines()]
assert all(
i in out for i in ["Documentation not for master", PUSHING_SKIPPED]
)
@pytest.mark.usefixtures("init_remote")
def test_deploy_master_not_set(
main: Any, monkeypatch: Any, nocolorcapsys: Any
) -> None:
"""Test correct notification is displayed.
Test for when essential environment variables are not set in
``master``.
:param main: Patch package entry point.
:param monkeypatch: Mock patch environment and attributes.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
monkeypatch.setenv("PYAUD_GH_NAME", "")
monkeypatch.setenv("PYAUD_GH_EMAIL", "")
monkeypatch.setenv("PYAUD_GH_TOKEN", "")
main("deploy-docs")
out = nocolorcapsys.stdout().splitlines()
assert all(
i in out
for i in [
"The following is not set:",
"- PYAUD_GH_NAME",
"- PYAUD_GH_EMAIL",
"- PYAUD_GH_TOKEN",
PUSHING_SKIPPED,
]
)
@pytest.mark.usefixtures("init_remote")
def test_deploy_master(
main: Any, monkeypatch: Any, nocolorcapsys: Any
) -> None:
"""Test docs are properly deployed.
Test for when environment variables are set and checked out at
``master``.
:param main: Patch package entry point.
:param monkeypatch: Mock patch environment and attributes.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
project_dir = Path.cwd()
readme = project_dir / README
mock_plugins = pyaud.plugins.mapping()
def _docs(*_: Any, **__: Any):
Path(Path.cwd() / os.environ["BUILDDIR"] / "html").mkdir(parents=True)
mock_plugins["docs"] = _docs # type: ignore
monkeypatch.setattr(PYAUD_PLUGINS_PLUGINS, mock_plugins)
readme.touch() # force stash
pyaud.git.add(".") # type: ignore
pyaud.git.commit("-m", INITIAL_COMMIT, devnull=True) # type: ignore
with open(readme, "w", encoding="utf-8") as fout:
fout.write(files.README_RST)
main("deploy-docs", "--fix")
out = nocolorcapsys.stdout().splitlines()
assert all(
i in out
for i in [
"Pushing updated documentation",
"Documentation Successfully deployed",
]
)
main("deploy-docs", "--fix")
out = nocolorcapsys.stdout().splitlines()
assert all(
i in out
for i in [
"No difference between local branch and remote",
PUSHING_SKIPPED,
]
)
@pytest.mark.parametrize(
"rounds,expected",
[
(
1,
[
"Pushing updated documentation",
"Documentation Successfully deployed",
],
),
(
2,
["No difference between local branch and remote", PUSHING_SKIPPED],
),
],
ids=["stashed", "multi"],
)
@pytest.mark.usefixtures("init_remote")
def test_deploy_master_param(
main: Any,
monkeypatch: Any,
nocolorcapsys: Any,
rounds: int,
expected: List[str],
) -> None:
"""Check that nothing happens when not checkout at at master.
:param main: Patch package entry point.
:param monkeypatch: Mock patch environment and attributes.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
:param rounds: How many times ``make_deploy_docs`` needs to
be run.
:param expected: Expected stdout result.
"""
path = Path.cwd()
mock_plugins = pyaud.plugins.mapping()
def _docs(*_: Any, **__: Any) -> None:
Path(path / os.environ["BUILDDIR"] / "html").mkdir(parents=True)
mock_plugins["docs"] = _docs # type: ignore
monkeypatch.setattr(PYAUD_PLUGINS_PLUGINS, mock_plugins)
with open(path / README, "w", encoding="utf-8") as fout:
fout.write(files.README_RST)
Path(path, FILES).touch()
pyaud.git.add(".", devnull=True) # type: ignore
pyaud.git.commit("-m", INITIAL_COMMIT, devnull=True) # type: ignore
for _ in range(rounds):
main("deploy-docs", "--fix")
out = [i.strip() for i in nocolorcapsys.stdout().splitlines()]
assert all(i in out for i in expected)
def test_deploy_cov_report_token(
main: Any, monkeypatch: Any, nocolorcapsys: Any, patch_sp_print_called: Any
) -> None:
"""Test ``make_deploy_cov`` when ``CODECOV_TOKEN`` is set.
Test for when ``CODECOV_TOKEN`` is set and a coverage.xml file
exists.
:param main: Patch package entry point.
:param monkeypatch: Mock patch environment and
attributes.
:param nocolorcapsys: Capture system output while
stripping ANSI color codes.
:param patch_sp_print_called: Patch ``Subprocess.call`` to only
announce what is called.
"""
Path(Path.cwd() / os.environ["PYAUD_COVERAGE_XML"]).touch()
patch_sp_print_called()
monkeypatch.setenv("CODECOV_TOKEN", "token")
main("deploy-cov")
out = nocolorcapsys.stdout()
assert all(e in out for e in ["<Subprocess (codecov)>", "--file"])
def test_deploy_cov_no_token(main: Any, nocolorcapsys: Any) -> None:
"""Test ``make_deploy_cov``.
Test for when ``CODECOV_TOKEN`` when only a coverage.xml file
exists.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
Path(Path.cwd() / os.environ["PYAUD_COVERAGE_XML"]).touch()
main("deploy-cov")
out = nocolorcapsys.stdout()
assert all(e in out for e in ["CODECOV_TOKEN not set"])
def test_deploy_cov_no_report_token(main: Any, nocolorcapsys: Any) -> None:
"""Test ``make_deploy_cov``.
Test for when ``CODECOV_TOKEN`` is not set and a coverage.xml file
does not. exist.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
main("deploy-cov")
out = nocolorcapsys.stdout()
assert all(e in out for e in ["No coverage report found"])
def test_make_format_success(
main: Any, nocolorcapsys: Any, patch_sp_print_called: Any
) -> None:
"""Test ``Format`` when successful.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while
stripping ANSI color codes.
:param patch_sp_print_called: Patch ``Subprocess.call`` to only
announce what is called.
"""
pyaud.files.append(Path.cwd() / FILES)
patch_sp_print_called()
main("format")
nocolorcapsys.readouterr()
def test_make_format_docs_fail(main: Any) -> None:
"""Test ``make_format`` when it fails.
Ensure process fails when unformatted docstrings are found.
:param main: Patch package entry point.
"""
path = Path.cwd() / FILES
with open(path, "w", encoding="utf-8") as fout:
fout.write(files.DOCFORMATTER_EXAMPLE)
pyaud.files.append(path)
with pytest.raises(pyaud.exceptions.AuditError):
main("format-docs")
def test_make_format_docs_suppress(main: Any, nocolorcapsys: Any) -> None:
"""Test ``make_format`` when running with ``-s/--suppress``.
Ensure process announces it failed but does not actually return a
non-zero exit-status.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
path = Path.cwd() / FILES
with open(path, "w", encoding="utf-8") as fout:
fout.write(files.DOCFORMATTER_EXAMPLE)
pyaud.files.append(path)
main("format-docs", "--suppress")
assert (
nocolorcapsys.stderr().strip()
== "Failed: returned non-zero exit status 3"
)
def test_make_generate_rcfile(nocolorcapsys: Any):
"""Test for correct output when running ``generate-rcfile``.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
pyaud.plugins.get("generate-rcfile")()
assert (
nocolorcapsys.stdout().strip()
== pyaud.config.toml.dumps(pyaud.config.DEFAULT_CONFIG).strip()
)
def test_isort_and_black(main: Any) -> None:
"""Test ``AuditError`` is raised.
For failed checks when looking for formatted inputs run through
``isort`` and ``Black``.
:param main: Patch package entry point.
"""
path = Path.cwd() / FILES
with open(path, "w", encoding="utf-8") as fout:
fout.write(files.BEFORE_ISORT)
pyaud.files.append(path)
with pytest.raises(pyaud.exceptions.AuditError):
main("imports")
def test_isort_and_black_fix(main: Any, nocolorcapsys: Any) -> None:
"""Test file is correctly fixed for failed check.
When looking for formatted inputs run through ``isort`` and
``Black`` ensure no errors are raised, and output is as expected.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
with open(Path.cwd() / FILES, "w", encoding="utf-8") as fout:
fout.write(files.BEFORE_ISORT)
pyaud.files.append(Path.cwd() / FILES)
main("imports", "--suppress", "--fix")
out = nocolorcapsys.stdout()
assert f"Fixed {Path(Path.cwd() / FILES).relative_to(Path.cwd())}" in out
def test_make_format_fix(main: Any) -> None:
"""Test ``make_format`` when it fails.
:param main: Patch package entry point.
"""
with open(Path.cwd() / FILES, "w", encoding="utf-8") as fout:
fout.write(files.UNFORMATTED)
pyaud.files.append(Path.cwd() / FILES)
main("format", "--fix")
with open(Path.cwd() / FILES, encoding="utf-8") as fin:
assert fin.read().strip() == files.UNFORMATTED.replace("'", '"')
def test_make_unused_fix(
main: Any, nocolorcapsys: Any, make_tree: Any
) -> None:
"""Test ``make_unused`` when ``-f/--fix`` is provided.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
package = Path.cwd() / "repo"
make_tree(Path.cwd(), {"repo": {INIT: None}})
file = package / FILES
with open(file, "w", encoding="utf-8") as fout:
fout.write(files.UNFORMATTED) # also an unused function
pyaud.files.append(file)
main("unused", "--fix")
assert nocolorcapsys.stdout() == (
"{}:1: unused function 'reformat_this' (60% confidence)\n"
"Updating ``{}``\n"
"created ``whitelist.py``\n"
"Success: no issues found in 1 source files\n".format(
file, Path.cwd() / os.environ["PYAUD_WHITELIST"]
)
)
with open(
Path.cwd() / os.environ["PYAUD_WHITELIST"], encoding="utf-8"
) as fin:
assert fin.read().strip() == (
"reformat_this # unused function (repo/file.py:1)"
)
def test_make_unused_fail(main: Any) -> None:
"""Test ``make_unused`` with neither ``--fix`` or ``--suppress``.
:param main: Patch package entry point.
"""
with open(Path.cwd() / FILES, "w", encoding="utf-8") as fout:
fout.write(files.UNFORMATTED) # also an unused function
pyaud.files.append(Path.cwd() / FILES)
with pytest.raises(pyaud.exceptions.AuditError) as err:
main("unused")
assert str(err.value) == "pyaud unused did not pass all checks"
def test_make_format_docs_fix(main: Any, nocolorcapsys: Any) -> None:
"""Test ``make_format`` when running with ``-f/--fix``.
Ensure process fixes checked failure.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
pyaud.files.append(Path.cwd() / FILES)
with open(Path.cwd() / FILES, "w", encoding="utf-8") as fout:
fout.write(files.DOCFORMATTER_EXAMPLE)
main("format-docs", "--fix")
assert nocolorcapsys.stdout().strip() == NO_ISSUES
def test_format_str_fix(main: Any, nocolorcapsys: Any) -> None:
"""Test fix audit when f-strings can be created with ``flynt``.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
"""
with open(Path.cwd() / FILES, "w", encoding="utf-8") as fout:
fout.write(files.FORMAT_STR_FUNCS_PRE)
pyaud.git.add(".", devnull=True) # type: ignore
pyaud.files.populate()
main("format-str", "--fix")
nocolorcapsys.stdout()
with open(Path.cwd() / FILES, encoding="utf-8") as fin:
assert fin.read() == files.FORMAT_STR_FUNCS_POST
def test_custom_modules(
monkeypatch: Any, nocolorcapsys: Any, main: Any, call_status: Any
) -> None:
"""Test the ``custom`` arg runs what is configured in toml file.
:param monkeypatch: Mock patch environment and attributes.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
:param main: Patch package entry point.
:param call_status: Patch function to not do anything.
Optionally returns non-zero exit code (0 by
default).
"""
mocked_modules = pyaud.plugins.mapping()
modules = list(pyaud.config.DEFAULT_CONFIG["audit"]["modules"])
random.shuffle(modules)
pyaud.config.toml["audit"]["modules"] = modules
for module in modules:
mocked_modules[module] = call_status(module)
monkeypatch.setattr(PYAUD_PLUGINS_PLUGINS, mocked_modules)
# make ``load_config`` do nothing so it does not override the toml
# config above
monkeypatch.setattr("pyaud.config.load_config", lambda *_: None)
main("audit")
out = [i for i in nocolorcapsys.stdout().splitlines() if i != ""]
assert out == [f"pyaud {i}" for i in modules]
@pytest.mark.parametrize(
"arg,expected",
[
("", pyaud.plugins.registered()),
("audit", ["audit -- Read from [audit] key in config"]),
("all", pyaud.plugins.registered()),
],
ids=["no-pos", "module", "all-modules"],
)
def test_help_with_plugins(
main: Any, nocolorcapsys: Any, arg: str, expected: Tuple[str, ...]
) -> None:
"""Test expected output for help after plugins have been loaded.
Test no positional argument for json array of keys.
Test ``audit`` positional argument and docstring display.
Test all and display of all module docstrings.
:param main: Patch package entry point.
:param nocolorcapsys: Capture system output while stripping ANSI
color codes.
:param arg: Positional argument for ```pyaud modules``.
:param expected: Expected result when calling command.
"""
with pytest.raises(SystemExit):
main("modules", arg)
out = nocolorcapsys.stdout()
assert any(i in out for i in expected)
def test_audit_class_error(main: Any, monkeypatch: Any) -> None:
"""Test errors are handled correctly when running ``pyaud audit``.
:param main: Patch package entry point.
:param monkeypatch: Mock patch environment and attributes.
"""
monkeypatch.setattr(SP_OPEN_PROC, lambda *_, **__: 1)
pyaud.files.append(Path.cwd() / FILES)
monkeypatch.setattr(PYAUD_FILES_POPULATE, lambda: None)
with pytest.raises(pyaud.exceptions.AuditError):
main("lint")
def test_no_exe_provided(monkeypatch: Any) -> None:
"""Test default value for exe property.
:param monkeypatch: Mock patch environment and attributes.
"""
unique = datetime.datetime.now().strftime("%d%m%YT%H%M%S")
monkeypatch.setattr(SP_OPEN_PROC, lambda *_, **__: 1)
pyaud.files.append(Path.cwd() / FILES)
# noinspection PyUnusedLocal
@pyaud.plugins.register(name=unique)
class Plugin(pyaud.plugins.Audit):
"""Nothing to do."""
def audit(self, *args: Any, **kwargs: bool) -> int:
"""Nothing to do."""
assert pyaud.plugins.get(unique).exe == []
def test_download_missing_stubs(monkeypatch: Any, main: Any) -> None:
"""Test for coverage on missing stubs file.
:param monkeypatch: Mock patch environment and attributes.
:param main: Patch package entry point.
:return:
"""
path = Path(os.getcwd(), FILES)
pyaud.files.append(path)
monkeypatch.setattr(SP_CALL, lambda *_, **__: 1)
monkeypatch.setattr(
SP_STDOUT, lambda _: ["error: Library stubs not installed for"]
)
main("typecheck")
def test_typecheck_re_raise_err(monkeypatch: Any, main: Any) -> None:
"""Test for re-raise of error for non stub library errors.
:param monkeypatch: Mock patch environment and attributes.
:param main: Patch package entry point.
:return:
"""
path = Path(os.getcwd(), FILES)
pyaud.files.append(path)
monkeypatch.setattr(SP_CALL, lambda *_, **__: 1)
monkeypatch.setattr(SP_STDOUT, lambda _: [])
with pytest.raises(pyaud.exceptions.AuditError) as err:
main("typecheck")
assert str(err.value) == "pyaud typecheck did not pass all checks"
def test_nested_toc(main: Any, make_tree: Any) -> None:
"""Test that only one file is completed with a nested project.
Prior to this commit only ``repo.src.rst`` would be removed.
This commit will remove any file and copy its contents to the
single <NAME>.rst file e.g. ``repo.routes.rst`` is removed and
``repo.routes``, ``repo.routes.auth``, ``repo.routes.post``, and
``repo.routes.views`` is added to repo.rst.
:param main: Patch package entry point.
:param make_tree: Create directory tree from dict mapping.
"""
make_tree(
Path.cwd(),
{
"docs": {CONFPY: None},
"repo": {
"routes": {
"auth.py": None,
"__init__.py": None,
"post.py": None,
"views.py": None,
},
"admin.py": None,
"cli.py": None,
"config.py": None,
"deps.py": None,
"exceptions.py": None,
"extensions.py": None,
"forms.py": None,
"__init__.py": None,
"log.py": None,
"mail.py": None,
"models.py": None,
"navbar.py": None,
"redirect.py": None,
"renderers.py": None,
"security.py": None,
"shell.py": None,
"tasks.py": None,
"user.py": None,
},
},
)
main("toc")
assert not Path(Path.cwd() / DOCS / "repo.routes.rst").is_file()
with open(Path.cwd() / DOCS / f"{REPO}.rst", encoding="utf-8") as fin:
assert fin.read() == EXPECTED_NESTED_TOC
| 34.081353 | 79 | 0.612286 |
4a19ffab6b4ba2c749beaee3b9fa91f83ca6c0ab
| 4,062 |
py
|
Python
|
webapp/graphite/events/views.py
|
romanek-adam/graphite-web
|
f6d7d16551a6953a5d0a1c19978efbf93fd3f869
|
[
"Apache-2.0"
] | 4,281 |
2015-01-01T12:35:03.000Z
|
2022-03-31T20:06:59.000Z
|
webapp/graphite/events/views.py
|
romanek-adam/graphite-web
|
f6d7d16551a6953a5d0a1c19978efbf93fd3f869
|
[
"Apache-2.0"
] | 1,809 |
2015-01-01T21:16:36.000Z
|
2022-03-31T21:25:13.000Z
|
webapp/graphite/events/views.py
|
romanek-adam/graphite-web
|
f6d7d16551a6953a5d0a1c19978efbf93fd3f869
|
[
"Apache-2.0"
] | 970 |
2015-01-02T19:49:21.000Z
|
2022-03-27T09:48:44.000Z
|
import datetime
import six
try:
from django.contrib.sites.requests import RequestSite
except ImportError: # Django < 1.9
from django.contrib.sites.models import RequestSite
from django.core.exceptions import ObjectDoesNotExist
from django.core.serializers.json import DjangoJSONEncoder
from django.forms.models import model_to_dict
from django.shortcuts import render, get_object_or_404
from django.utils.timezone import now
from graphite.util import json, epoch, epoch_to_dt, jsonResponse, HttpError, HttpResponse
from graphite.events.models import Event
from graphite.render.attime import parseATTime
class EventEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return epoch(obj)
return json.JSONEncoder.default(self, obj)
def view_events(request):
if request.method == 'GET':
context = {'events': fetch(request),
'site': RequestSite(request),
'protocol': 'https' if request.is_secure() else 'http'}
return render(request, 'events.html', context)
else:
return post_event(request)
@jsonResponse(encoder=DjangoJSONEncoder)
def jsonDetail(request, queryParams, event_id):
try:
e = Event.objects.get(id=event_id)
e.tags = e.tags.split()
return model_to_dict(e)
except ObjectDoesNotExist:
raise HttpError('Event matching query does not exist', status=404)
def detail(request, event_id):
if request.META.get('HTTP_ACCEPT') == 'application/json':
return jsonDetail(request, event_id)
e = get_object_or_404(Event, pk=event_id)
context = {'event': e}
return render(request, 'event.html', context)
def post_event(request):
if request.method == 'POST':
event = json.loads(request.body)
assert isinstance(event, dict)
tags = event.get('tags')
if tags is not None:
if isinstance(tags, list):
tags = ' '.join(tags)
elif not isinstance(tags, six.string_types):
return HttpResponse(
json.dumps({'error': '"tags" must be an array or space-separated string'}),
status=400)
else:
tags = None
if 'when' in event:
when = epoch_to_dt(event['when'])
else:
when = now()
Event.objects.create(
what=event.get('what'),
tags=tags,
when=when,
data=event.get('data', ''),
)
return HttpResponse(status=200)
else:
return HttpResponse(status=405)
def get_data(request):
query_params = request.GET.copy()
query_params.update(request.POST)
if 'jsonp' in query_params:
response = HttpResponse(
"%s(%s)" % (query_params.get('jsonp'),
json.dumps(fetch(request), cls=EventEncoder)),
content_type='text/javascript')
else:
response = HttpResponse(
json.dumps(fetch(request), cls=EventEncoder),
content_type='application/json')
return response
def fetch(request):
if request.GET.get('from') is not None:
time_from = parseATTime(request.GET['from'])
else:
time_from = epoch_to_dt(0)
if request.GET.get('until') is not None:
time_until = parseATTime(request.GET['until'])
else:
time_until = now()
set_operation = request.GET.get('set')
tags = request.GET.get('tags')
if tags is not None:
tags = request.GET.get('tags').split(' ')
result = []
for x in Event.find_events(time_from, time_until, tags=tags, set_operation=set_operation):
# django-tagging's with_intersection() returns matches with unknown tags
# this is a workaround to ensure we only return positive matches
if set_operation == 'intersection':
if len(set(tags) & set(x.as_dict()['tags'])) == len(tags):
result.append(x.as_dict())
else:
result.append(x.as_dict())
return result
| 30.772727 | 95 | 0.630478 |
4a19ffbd4d153f88e549d717fa71cd8634a3e289
| 196,708 |
py
|
Python
|
scipy/stats/tests/test_distributions.py
|
alazarchuk/scipy
|
7124fc982ea9b0ea961c65db550c0703abcb9bfd
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/stats/tests/test_distributions.py
|
alazarchuk/scipy
|
7124fc982ea9b0ea961c65db550c0703abcb9bfd
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/stats/tests/test_distributions.py
|
alazarchuk/scipy
|
7124fc982ea9b0ea961c65db550c0703abcb9bfd
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Test functions for stats module
"""
import warnings
import re
import sys
import pickle
import os
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_warns,
assert_array_less, suppress_warnings)
import pytest
from pytest import raises as assert_raises
import numpy
import numpy as np
from numpy import typecodes, array
from numpy.lib.recfunctions import rec_append_fields
from scipy import special
from scipy._lib._util import check_random_state
from scipy.integrate import IntegrationWarning, quad
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy
from .test_continuous_basic import distcont
from scipy.stats._continuous_distns import FitDataError
from scipy.optimize import root
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = '%s does not have attribute %s' % (a, b)
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
def check_vonmises_pdf_periodic(k, L, s, x):
vm = stats.vonmises(k, loc=L, scale=s)
assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k, L, s, x):
vm = stats.vonmises(k, loc=L, scale=s)
assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1)
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0, 1, numpy.pi, 10, 100]:
check_vonmises_pdf_periodic(k, 0, 1, x)
check_vonmises_pdf_periodic(k, 1, 1, x)
check_vonmises_pdf_periodic(k, 0, 10, x)
check_vonmises_cdf_periodic(k, 0, 1, x)
check_vonmises_cdf_periodic(k, 1, 1, x)
check_vonmises_cdf_periodic(k, 0, 10, x)
def test_vonmises_line_support():
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical():
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
# Expected values of the vonmises PDF were computed using
# mpmath with 50 digits of precision:
#
# def vmpdf_mp(x, kappa):
# x = mpmath.mpf(x)
# kappa = mpmath.mpf(kappa)
# num = mpmath.exp(kappa*mpmath.cos(x))
# den = 2 * mpmath.pi * mpmath.besseli(0, kappa)
# return num/den
#
@pytest.mark.parametrize('x, kappa, expected_pdf',
[(0.1, 0.01, 0.16074242744907072),
(0.1, 25.0, 1.7515464099118245),
(0.1, 800, 0.2073272544458798),
(2.0, 0.01, 0.15849003875385817),
(2.0, 25.0, 8.356882934278192e-16),
(2.0, 800, 0.0)])
def test_vonmises_pdf(x, kappa, expected_pdf):
pdf = stats.vonmises.pdf(x, kappa)
assert_allclose(pdf, expected_pdf, rtol=1e-15)
def _assert_less_or_close_loglike(dist, data, func, **kwds):
"""
This utility function checks that the log-likelihood (computed by
func) of the result computed using dist.fit() is less than or equal
to the result computed using the generic fit method. Because of
normal numerical imprecision, the "equality" check is made using
`np.allclose` with a relative tolerance of 1e-15.
"""
mle_analytical = dist.fit(data, **kwds)
numerical_opt = super(type(dist), dist).fit(data, **kwds)
ll_mle_analytical = func(mle_analytical, data)
ll_numerical_opt = func(numerical_opt, data)
assert (ll_mle_analytical <= ll_numerical_opt or
np.allclose(ll_mle_analytical, ll_numerical_opt, rtol=1e-15))
def assert_fit_warnings(dist):
param = ['floc', 'fscale']
if dist.shapes:
nshapes = len(dist.shapes.split(","))
param += ['f0', 'f1', 'f2'][:nshapes]
all_fixed = dict(zip(param, np.arange(len(param))))
data = [1, 2, 3]
with pytest.raises(RuntimeError,
match="All parameters fixed. There is nothing "
"to optimize."):
dist.fit(data, **all_fixed)
with pytest.raises(RuntimeError,
match="The data contains non-finite values"):
dist.fit([np.nan])
with pytest.raises(RuntimeError,
match="The data contains non-finite values"):
dist.fit([np.inf])
with pytest.raises(TypeError, match="Unknown keyword arguments:"):
dist.fit(data, extra_keyword=2)
with pytest.raises(TypeError, match="Too many positional arguments."):
dist.fit(data, *[1]*(len(param) - 1))
@pytest.mark.parametrize('dist',
['alpha', 'betaprime',
'fatiguelife', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gilbrat',
'powerlognorm', 'rayleigh', 'wald'])
def test_support(dist):
"""gh-6235"""
dct = dict(distcont)
args = dct[dist]
dist = getattr(stats, dist)
assert_almost_equal(dist.pdf(dist.a, *args), 0)
assert_equal(dist.logpdf(dist.a, *args), -np.inf)
assert_almost_equal(dist.pdf(dist.b, *args), 0)
assert_equal(dist.logpdf(dist.b, *args), -np.inf)
class TestRandInt(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.randint.rvs(5, 30, size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5, 30, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15, 46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15, 46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k, 5, 30)
assert_array_almost_equal(vals, out)
def test_cdf(self):
x = np.linspace(0, 36, 100)
k = numpy.floor(x)
out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)
vals = stats.randint.cdf(x, 5, 30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100, 1)
vals2 = stats.binom.pmf(0, 100, 0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
class TestBernoulli(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestBradford(object):
# gh-6216
def test_cdf_ppf(self):
c = 0.1
x = np.logspace(-20, -4)
q = stats.bradford.cdf(x, c)
xx = stats.bradford.ppf(q, c)
assert_allclose(x, xx)
class TestNBinom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0, 1, 1)
assert_equal(val, 0)
class TestGenInvGauss(object):
def setup_method(self):
np.random.seed(1234)
@pytest.mark.slow
def test_rvs_with_mode_shift(self):
# ratio_unif w/ mode shift
gig = stats.geninvgauss(2.3, 1.5)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_without_mode_shift(self):
# ratio_unif w/o mode shift
gig = stats.geninvgauss(0.9, 0.75)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_new_method(self):
# new algorithm of Hoermann / Leydold
gig = stats.geninvgauss(0.1, 0.2)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_p_zero(self):
def my_ks_check(p, b):
gig = stats.geninvgauss(p, b)
rvs = gig.rvs(size=1500, random_state=1234)
return stats.kstest(rvs, gig.cdf)[1] > 0.05
# boundary cases when p = 0
assert_equal(my_ks_check(0, 0.2), True) # new algo
assert_equal(my_ks_check(0, 0.9), True) # ratio_unif w/o shift
assert_equal(my_ks_check(0, 1.5), True) # ratio_unif with shift
def test_rvs_negative_p(self):
# if p negative, return inverse
assert_equal(
stats.geninvgauss(-1.5, 2).rvs(size=10, random_state=1234),
1 / stats.geninvgauss(1.5, 2).rvs(size=10, random_state=1234))
def test_invgauss(self):
# test that invgauss is special case
ig = stats.geninvgauss.rvs(size=1500, p=-0.5, b=1, random_state=1234)
assert_equal(stats.kstest(ig, 'invgauss', args=[1])[1] > 0.15, True)
# test pdf and cdf
mu, x = 100, np.linspace(0.01, 1, 10)
pdf_ig = stats.geninvgauss.pdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(pdf_ig, stats.invgauss(mu).pdf(x))
cdf_ig = stats.geninvgauss.cdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(cdf_ig, stats.invgauss(mu).cdf(x))
def test_pdf_R(self):
# test against R package GIGrvg
# x <- seq(0.01, 5, length.out = 10)
# GIGrvg::dgig(x, 0.5, 1, 1)
vals_R = np.array([2.081176820e-21, 4.488660034e-01, 3.747774338e-01,
2.693297528e-01, 1.905637275e-01, 1.351476913e-01,
9.636538981e-02, 6.909040154e-02, 4.978006801e-02,
3.602084467e-02])
x = np.linspace(0.01, 5, 10)
assert_allclose(vals_R, stats.geninvgauss.pdf(x, 0.5, 1))
def test_pdf_zero(self):
# pdf at 0 is 0, needs special treatment to avoid 1/x in pdf
assert_equal(stats.geninvgauss.pdf(0, 0.5, 0.5), 0)
# if x is large and p is moderate, make sure that pdf does not
# overflow because of x**(p-1); exp(-b*x) forces pdf to zero
assert_equal(stats.geninvgauss.pdf(2e6, 50, 2), 0)
class TestNormInvGauss(object):
def setup_method(self):
np.random.seed(1234)
def test_cdf_R(self):
# test pdf and cdf vals against R
# require("GeneralizedHyperbolic")
# x_test <- c(-7, -5, 0, 8, 15)
# r_cdf <- GeneralizedHyperbolic::pnig(x_test, mu = 0, a = 1, b = 0.5)
# r_pdf <- GeneralizedHyperbolic::dnig(x_test, mu = 0, a = 1, b = 0.5)
r_cdf = np.array([8.034920282e-07, 2.512671945e-05, 3.186661051e-01,
9.988650664e-01, 9.999848769e-01])
x_test = np.array([-7, -5, 0, 8, 15])
vals_cdf = stats.norminvgauss.cdf(x_test, a=1, b=0.5)
assert_allclose(vals_cdf, r_cdf, atol=1e-9)
def test_pdf_R(self):
# values from R as defined in test_cdf_R
r_pdf = np.array([1.359600783e-06, 4.413878805e-05, 4.555014266e-01,
7.450485342e-04, 8.917889931e-06])
x_test = np.array([-7, -5, 0, 8, 15])
vals_pdf = stats.norminvgauss.pdf(x_test, a=1, b=0.5)
assert_allclose(vals_pdf, r_pdf, atol=1e-9)
def test_stats(self):
a, b = 1, 0.5
gamma = np.sqrt(a**2 - b**2)
v_stats = (b / gamma, a**2 / gamma**3, 3.0 * b / (a * np.sqrt(gamma)),
3.0 * (1 + 4 * b**2 / a**2) / gamma)
assert_equal(v_stats, stats.norminvgauss.stats(a, b, moments='mvsk'))
def test_ppf(self):
a, b = 1, 0.5
x_test = np.array([0.001, 0.5, 0.999])
vals = stats.norminvgauss.ppf(x_test, a, b)
assert_allclose(x_test, stats.norminvgauss.cdf(vals, a, b))
class TestGeom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1, 2, 3], 0.5)
assert_array_almost_equal(vals, [0.5, 0.25, 0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))
vals2 = stats.geom.logpmf([1, 2, 3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
def test_ppf_underflow(self):
# this should not underflow
assert_allclose(stats.geom.ppf(1e-20, 1e-20), 1.0, atol=1e-14)
class TestPlanck(object):
def setup_method(self):
np.random.seed(1234)
def test_sf(self):
vals = stats.planck.sf([1, 2, 3], 5.)
expected = array([4.5399929762484854e-05,
3.0590232050182579e-07,
2.0611536224385579e-09])
assert_array_almost_equal(vals, expected)
def test_logsf(self):
vals = stats.planck.logsf([1000., 2000., 3000.], 1000.)
expected = array([-1001000., -2001000., -3001000.])
assert_array_almost_equal(vals, expected)
class TestGennorm(object):
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
class TestHalfgennorm(object):
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestTruncnorm(object):
def setup_method(self):
np.random.seed(1234)
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_2477_large_values(self):
# Check a case that used to fail because of extreme tailness.
low, high = 100, 101
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low <= x.min() <= x.max() <= high), str([low, high, x])
# Check some additional extreme tails
low, high = 1000, 1001
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
low, high = 10000, 10001
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
low, high = -10001, -10000
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_9403_nontail_values(self):
for low, high in [[3, 4], [-4, -3]]:
xvals = np.array([-np.inf, low, high, np.inf])
xmid = (high+low)/2.0
cdfs = stats.truncnorm.cdf(xvals, low, high)
sfs = stats.truncnorm.sf(xvals, low, high)
pdfs = stats.truncnorm.pdf(xvals, low, high)
expected_cdfs = np.array([0, 0, 1, 1])
expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
expected_pdfs = np.array([0, 3.3619772, 0.1015229, 0])
if low < 0:
expected_pdfs = np.array([0, 0.1015229, 3.3619772, 0])
assert_almost_equal(cdfs, expected_cdfs)
assert_almost_equal(sfs, expected_sfs)
assert_almost_equal(pdfs, expected_pdfs)
assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]),
low + 0.5)
pvals = np.array([0, 0.5, 1.0])
ppfs = stats.truncnorm.ppf(pvals, low, high)
expected_ppfs = np.array([low, np.sign(low)*3.1984741, high])
assert_almost_equal(ppfs, expected_ppfs)
if low < 0:
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
0.8475544278436675)
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
0.1524455721563326)
else:
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
0.8475544278436675)
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
0.1524455721563326)
pdf = stats.truncnorm.pdf(xmid, low, high)
assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
def test_gh_9403_medium_tail_values(self):
for low, high in [[39, 40], [-40, -39]]:
xvals = np.array([-np.inf, low, high, np.inf])
xmid = (high+low)/2.0
cdfs = stats.truncnorm.cdf(xvals, low, high)
sfs = stats.truncnorm.sf(xvals, low, high)
pdfs = stats.truncnorm.pdf(xvals, low, high)
expected_cdfs = np.array([0, 0, 1, 1])
expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
expected_pdfs = np.array([0, 3.90256074e+01, 2.73349092e-16, 0])
if low < 0:
expected_pdfs = np.array([0, 2.73349092e-16,
3.90256074e+01, 0])
assert_almost_equal(cdfs, expected_cdfs)
assert_almost_equal(sfs, expected_sfs)
assert_almost_equal(pdfs, expected_pdfs)
assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]),
low + 0.5)
pvals = np.array([0, 0.5, 1.0])
ppfs = stats.truncnorm.ppf(pvals, low, high)
expected_ppfs = np.array([low, np.sign(low)*39.01775731, high])
assert_almost_equal(ppfs, expected_ppfs)
cdfs = stats.truncnorm.cdf(ppfs, low, high)
assert_almost_equal(cdfs, pvals)
if low < 0:
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
0.9999999970389126)
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
2.961048103554866e-09)
else:
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
0.9999999970389126)
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
2.961048103554866e-09)
pdf = stats.truncnorm.pdf(xmid, low, high)
assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
xvals = np.linspace(low, high, 11)
xvals2 = -xvals[::-1]
assert_almost_equal(stats.truncnorm.cdf(xvals, low, high),
stats.truncnorm.sf(xvals2, -high, -low)[::-1])
assert_almost_equal(stats.truncnorm.sf(xvals, low, high),
stats.truncnorm.cdf(xvals2, -high, -low)[::-1])
assert_almost_equal(stats.truncnorm.pdf(xvals, low, high),
stats.truncnorm.pdf(xvals2, -high, -low)[::-1])
def _test_moments_one_range(self, a, b, expected, decimal_s=7):
m0, v0, s0, k0 = expected[:4]
m, v, s, k = stats.truncnorm.stats(a, b, moments='mvsk')
assert_almost_equal(m, m0)
assert_almost_equal(v, v0)
assert_almost_equal(s, s0, decimal=decimal_s)
assert_almost_equal(k, k0)
@pytest.mark.xfail_on_32bit("reduced accuracy with 32bit platforms.")
def test_moments(self):
# Values validated by changing TRUNCNORM_TAIL_X so as to evaluate
# using both the _norm_XXX() and _norm_logXXX() functions, and by
# removing the _stats and _munp methods in truncnorm tp force
# numerical quadrature.
# For m,v,s,k expect k to have the largest error as it is
# constructed from powers of lower moments
self._test_moments_one_range(-30, 30, [0, 1, 0.0, 0.0])
self._test_moments_one_range(-10, 10, [0, 1, 0.0, 0.0])
self._test_moments_one_range(-3, 3, [0.0, 0.9733369246625415,
0.0, -0.1711144363977444])
self._test_moments_one_range(-2, 2, [0.0, 0.7737413035499232,
0.0, -0.6344632828703505])
self._test_moments_one_range(0, np.inf, [0.7978845608028654,
0.3633802276324186,
0.9952717464311565,
0.8691773036059725])
self._test_moments_one_range(-np.inf, 0, [-0.7978845608028654,
0.3633802276324186,
-0.9952717464311565,
0.8691773036059725])
self._test_moments_one_range(-1, 3, [0.2827861107271540,
0.6161417353578292,
0.5393018494027878,
-0.2058206513527461])
self._test_moments_one_range(-3, 1, [-0.2827861107271540,
0.6161417353578292,
-0.5393018494027878,
-0.2058206513527461])
self._test_moments_one_range(-10, -9, [-9.1084562880124764,
0.0114488058210104,
-1.8985607337519652,
5.0733457094223553])
self._test_moments_one_range(-20, -19, [-19.0523439459766628,
0.0027250730180314,
-1.9838694022629291,
5.8717850028287586])
self._test_moments_one_range(-30, -29, [-29.0344012377394698,
0.0011806603928891,
-1.9930304534611458,
5.8854062968996566],
decimal_s=6)
self._test_moments_one_range(-40, -39, [-39.0256074199326264,
0.0006548826719649,
-1.9963146354109957,
5.6167758371700494])
self._test_moments_one_range(39, 40, [39.0256074199326264,
0.0006548826719649,
1.9963146354109957,
5.6167758371700494])
def test_9902_moments(self):
m, v = stats.truncnorm.stats(0, np.inf, moments='mv')
assert_almost_equal(m, 0.79788456)
assert_almost_equal(v, 0.36338023)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_11299_rvs(self):
# Arose from investigating gh-11299
# Test multiple shape parameters simultaneously.
low = [-10, 10, -np.inf, -5, -np.inf, -np.inf, -45, -45, 40, -10, 40]
high = [-5, 11, 5, np.inf, 40, -40, 40, -40, 45, np.inf, np.inf]
x = stats.truncnorm.rvs(low, high, size=(5, len(low)))
assert np.shape(x) == (5, len(low))
assert_(np.all(low <= x.min(axis=0)))
assert_(np.all(x.max(axis=0) <= high))
def test_rvs_Generator(self):
# check that rvs can use a Generator
if hasattr(np.random, "default_rng"):
stats.truncnorm.rvs(-10, -5, size=5,
random_state=np.random.default_rng())
class TestGenLogistic:
# Expected values computed with mpmath with 50 digits of precision.
@pytest.mark.parametrize('x, expected', [(-1000, -1499.5945348918917),
(-125, -187.09453489189184),
(0, -1.3274028432916989),
(100, -99.59453489189184),
(1000, -999.5945348918918)])
def test_logpdf(self, x, expected):
c = 1.5
logp = stats.genlogistic.logpdf(x, c)
assert_allclose(logp, expected, rtol=1e-13)
class TestHypergeom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = [stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten)
for eaten in fruits_eaten]
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
def test_logsf(self):
# Test logsf for very large numbers. See issue #4982
# Results compare with those from R (v3.2.0):
# phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
# -2239.771
k = 1e4
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2239.771 # From R
assert_almost_equal(result, expected, decimal=3)
k = 1
M = 1600
n = 600
N = 300
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2.566567e-68 # From R
assert_almost_equal(result, expected, decimal=15)
def test_logcdf(self):
# Test logcdf for very large numbers. See issue #8692
# Results compare with those from R (v3.3.2):
# phyper(k, n, M-n, N, lower.tail=TRUE, log.p=TRUE)
# -5273.335
k = 1
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -5273.335 # From R
assert_almost_equal(result, expected, decimal=3)
# Same example as in issue #8692
k = 40
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -7.565148879229e-23 # From R
assert_almost_equal(result, expected, decimal=15)
k = 125
M = 1600
n = 250
N = 500
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -4.242688e-12 # From R
assert_almost_equal(result, expected, decimal=15)
# test broadcasting robustness based on reviewer
# concerns in PR 9603; using an array version of
# the example from issue #8692
k = np.array([40, 40, 40])
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = np.full(3, -7.565148879229e-23) # filled from R result
assert_almost_equal(result, expected, decimal=15)
class TestLoggamma(object):
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
class TestLogistic(object):
# gh-6226
def test_cdf_ppf(self):
x = np.linspace(-20, 20)
y = stats.logistic.cdf(x)
xx = stats.logistic.ppf(y)
assert_allclose(x, xx)
def test_sf_isf(self):
x = np.linspace(-20, 20)
y = stats.logistic.sf(x)
xx = stats.logistic.isf(y)
assert_allclose(x, xx)
def test_extreme_values(self):
# p is chosen so that 1 - (1 - p) == p in double precision
p = 9.992007221626409e-16
desired = 34.53957599234088
assert_allclose(stats.logistic.ppf(1 - p), desired)
assert_allclose(stats.logistic.isf(p), desired)
def test_logpdf_basic(self):
logp = stats.logistic.logpdf([-15, 0, 10])
# Expected values computed with mpmath with 50 digits of precision.
expected = [-15.000000611804547,
-1.3862943611198906,
-10.000090797798434]
assert_allclose(logp, expected, rtol=1e-13)
def test_logpdf_extreme_values(self):
logp = stats.logistic.logpdf([800, -800])
# For such large arguments, logpdf(x) = -abs(x) when computed
# with 64 bit floating point.
assert_equal(logp, [-800, -800])
@pytest.mark.parametrize("loc_rvs,scale_rvs", [np.random.rand(2)])
def test_fit(self, loc_rvs, scale_rvs):
data = stats.logistic.rvs(size=100, loc=loc_rvs, scale=scale_rvs)
# test that result of fit method is the same as optimization
def func(input, data):
a, b = input
n = len(data)
x1 = np.sum(np.exp((data - a) / b) /
(1 + np.exp((data - a) / b))) - n / 2
x2 = np.sum(((data - a) / b) *
((np.exp((data - a) / b) - 1) /
(np.exp((data - a) / b) + 1))) - n
return x1, x2
expected_solution = root(func, stats.logistic._fitstart(data), args=(
data,)).x
fit_method = stats.logistic.fit(data)
# other than computational variances, the fit method and the solution
# to this system of equations are equal
assert_allclose(fit_method, expected_solution, atol=1e-30)
@pytest.mark.parametrize("loc_rvs,scale_rvs", [np.random.rand(2)])
def test_fit_comp_optimizer(self, loc_rvs, scale_rvs):
data = stats.logistic.rvs(size=100, loc=loc_rvs, scale=scale_rvs)
# obtain objective function to compare results of the fit methods
args = [data, (stats.logistic._fitstart(data),)]
func = stats.logistic._reduce_func(args, {})[1]
_assert_less_or_close_loglike(stats.logistic, data, func)
class TestLogser(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf_small_p(self):
m = stats.logser.pmf(4, 1e-20)
# The expected value was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 64
# >>> k = 4
# >>> p = mpmath.mpf('1e-20')
# >>> float(-(p**k)/k/mpmath.log(1-p))
# 2.5e-61
# It is also clear from noticing that for very small p,
# log(1-p) is approximately -p, and the formula becomes
# p**(k-1) / k
assert_allclose(m, 2.5e-61)
def test_mean_small_p(self):
m = stats.logser.mean(1e-8)
# The expected mean was computed using mpmath:
# >>> import mpmath
# >>> mpmath.dps = 60
# >>> p = mpmath.mpf('1e-8')
# >>> float(-p / ((1 - p)*mpmath.log(1 - p)))
# 1.000000005
assert_allclose(m, 1.000000005)
class TestPareto(object):
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
def test_sf(self):
x = 1e9
b = 2
scale = 1.5
p = stats.pareto.sf(x, b, loc=0, scale=scale)
expected = (scale/x)**b # 2.25e-18
assert_allclose(p, expected)
@pytest.mark.filterwarnings("ignore:invalid value encountered in "
"double_scalars")
@pytest.mark.parametrize("rvs_shape", [1, 2])
@pytest.mark.parametrize("rvs_loc", [0, 2])
@pytest.mark.parametrize("rvs_scale", [1, 5])
def test_fit(self, rvs_shape, rvs_loc, rvs_scale):
data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,
loc=rvs_loc)
# shape can still be fixed with multiple names
shape_mle_analytical1 = stats.pareto.fit(data, floc=0, f0=1.04)[0]
shape_mle_analytical2 = stats.pareto.fit(data, floc=0, fix_b=1.04)[0]
shape_mle_analytical3 = stats.pareto.fit(data, floc=0, fb=1.04)[0]
assert (shape_mle_analytical1 == shape_mle_analytical2 ==
shape_mle_analytical3 == 1.04)
# data can be shifted with changes to `loc`
data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,
loc=(rvs_loc + 2))
shape_mle_a, loc_mle_a, scale_mle_a = stats.pareto.fit(data, floc=2)
assert_equal(scale_mle_a + 2, data.min())
assert_equal(shape_mle_a, 1/((1/len(data - 2)) *
np.sum(np.log((data
- 2)/(data.min() - 2)))))
assert_equal(loc_mle_a, 2)
@pytest.mark.filterwarnings("ignore:invalid value encountered in "
"double_scalars")
@pytest.mark.parametrize("rvs_shape", [1, 2])
@pytest.mark.parametrize("rvs_loc", [0, 2])
@pytest.mark.parametrize("rvs_scale", [1, 5])
def test_fit_MLE_comp_optimzer(self, rvs_shape, rvs_loc, rvs_scale):
data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,
loc=rvs_loc)
args = [data, (stats.pareto._fitstart(data), )]
func = stats.pareto._reduce_func(args, {})[1]
# fixed `floc` to actual location provides a better fit than the
# super method
_assert_less_or_close_loglike(stats.pareto, data, func, floc=rvs_loc)
# fixing `floc` to an arbitrary number, 0, still provides a better
# fit than the super method
_assert_less_or_close_loglike(stats.pareto, data, func, floc=0)
# fixed shape still uses MLE formula and provides a better fit than
# the super method
_assert_less_or_close_loglike(stats.pareto, data, func, floc=0, f0=4)
# valid fixed fscale still uses MLE formulas and provides a better
# fit than the super method
_assert_less_or_close_loglike(stats.pareto, data, func, floc=0,
fscale=rvs_scale/2)
def test_fit_warnings(self):
assert_fit_warnings(stats.pareto)
# `floc` that causes invalid negative data
assert_raises(FitDataError, stats.pareto.fit, [1, 2, 3], floc=2)
# `floc` and `fscale` combination causes invalid data
assert_raises(FitDataError, stats.pareto.fit, [5, 2, 3], floc=1,
fscale=3)
class TestGenpareto(object):
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
a, b = stats.genpareto._get_support(c)
assert_equal(a, 0.)
assert_(np.isposinf(b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
a, b = stats.genpareto._get_support(c)
assert_allclose([a, b], [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
# rv = stats.genpareto(c=0.)
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
def test_logsf(self):
logp = stats.genpareto.logsf(1e10, .01, 0, 1)
assert_allclose(logp, -1842.0680753952365)
# Values in 'expected_stats' are
# [mean, variance, skewness, excess kurtosis].
@pytest.mark.parametrize(
'c, expected_stats',
[(0, [1, 1, 2, 6]),
(1/4, [4/3, 32/9, 10/np.sqrt(2), np.nan]),
(1/9, [9/8, (81/64)*(9/7), (10/9)*np.sqrt(7), 754/45]),
(-1, [1/2, 1/12, 0, -6/5])])
def test_stats(self, c, expected_stats):
result = stats.genpareto.stats(c, moments='mvsk')
assert_allclose(result, expected_stats, rtol=1e-13, atol=1e-15)
def test_var(self):
# Regression test for gh-11168.
v = stats.genpareto.var(1e-8)
assert_allclose(v, 1.000000040000001, rtol=1e-13)
class TestPearson3(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
def test_negative_cdf_bug_11186(self):
# incorrect CDFs for negative skews in gh-11186; fixed in gh-12640
# Also check vectorization w/ negative, zero, and positive skews
skews = [-3, -1, 0, 0.5]
x_eval = 0.5
neg_inf = -30 # avoid RuntimeWarning caused by np.log(0)
cdfs = stats.pearson3.cdf(x_eval, skews)
int_pdfs = [quad(stats.pearson3(skew).pdf, neg_inf, x_eval)[0]
for skew in skews]
assert_allclose(cdfs, int_pdfs)
def test_return_array_bug_11746(self):
# pearson3.moment was returning size 0 or 1 array instead of float
# The first moment is equal to the loc, which defaults to zero
moment = stats.pearson3.moment(1, 2)
assert_equal(moment, 0)
assert_equal(type(moment), float)
moment = stats.pearson3.moment(1, 0.000001)
assert_equal(moment, 0)
assert_equal(type(moment), float)
class TestKappa4(object):
def test_cdf_genpareto(self):
# h = 1 and k != 0 is generalized Pareto
x = [0.0, 0.1, 0.2, 0.5]
h = 1.0
for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0,
1.9]:
vals = stats.kappa4.cdf(x, h, k)
# shape parameter is opposite what is expected
vals_comp = stats.genpareto.cdf(x, -k)
assert_allclose(vals, vals_comp)
def test_cdf_genextreme(self):
# h = 0 and k != 0 is generalized extreme value
x = np.linspace(-5, 5, 10)
h = 0.0
k = np.linspace(-3, 3, 10)
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.genextreme.cdf(x, k)
assert_allclose(vals, vals_comp)
def test_cdf_expon(self):
# h = 1 and k = 0 is exponential
x = np.linspace(0, 10, 10)
h = 1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.expon.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_gumbel_r(self):
# h = 0 and k = 0 is gumbel_r
x = np.linspace(-5, 5, 10)
h = 0.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.gumbel_r.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_logistic(self):
# h = -1 and k = 0 is logistic
x = np.linspace(-5, 5, 10)
h = -1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.logistic.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_uniform(self):
# h = 1 and k = 1 is uniform
x = np.linspace(-5, 5, 10)
h = 1.0
k = 1.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.uniform.cdf(x)
assert_allclose(vals, vals_comp)
def test_integers_ctor(self):
# regression test for gh-7416: _argcheck fails for integer h and k
# in numpy 1.12
stats.kappa4(1, 2)
class TestPoisson(object):
def setup_method(self):
np.random.seed(1234)
def test_pmf_basic(self):
# Basic case
ln2 = np.log(2)
vals = stats.poisson.pmf([0, 1, 2], ln2)
expected = [0.5, ln2/2, ln2**2/4]
assert_allclose(vals, expected)
def test_mu0(self):
# Edge case: mu=0
vals = stats.poisson.pmf([0, 1, 2], 0)
expected = [1, 0, 0]
assert_array_equal(vals, expected)
interval = stats.poisson.interval(0.95, 0)
assert_equal(interval, (0, 0))
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
mu = np.array([0.0, 1.0, 2.0])
result = stats.poisson.stats(mu, moments='mvsk')
expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])
assert_allclose(result, expected)
class TestKSTwo(object):
def setup_method(self):
np.random.seed(1234)
def test_cdf(self):
for n in [1, 2, 3, 10, 100, 1000]:
# Test x-values:
# 0, 1/2n, where the cdf should be 0
# 1/n, where the cdf should be n!/n^n
# 0.5, where the cdf should match ksone.cdf
# 1-1/n, where cdf = 1-2/n^n
# 1, where cdf == 1
# (E.g. Exact values given by Eqn 1 in Simard / L'Ecuyer)
x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
v1 = (1.0/n)**n
lg = scipy.special.gammaln(n+1)
elg = (np.exp(lg) if v1 != 0 else 0)
expected = np.array([0, 0, v1 * elg,
1 - 2*stats.ksone.sf(0.5, n),
max(1 - 2*v1, 0.0),
1.0])
vals_cdf = stats.kstwo.cdf(x, n)
assert_allclose(vals_cdf, expected)
def test_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
# Same x values as in test_cdf, and use sf = 1 - cdf
x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
v1 = (1.0/n)**n
lg = scipy.special.gammaln(n+1)
elg = (np.exp(lg) if v1 != 0 else 0)
expected = np.array([1.0, 1.0,
1 - v1 * elg,
2*stats.ksone.sf(0.5, n),
min(2*v1, 1.0), 0])
vals_sf = stats.kstwo.sf(x, n)
assert_allclose(vals_sf, expected)
def test_cdf_sqrtn(self):
# For fixed a, cdf(a/sqrt(n), n) -> kstwobign(a) as n->infinity
# cdf(a/sqrt(n), n) is an increasing function of n (and a)
# Check that the function is indeed increasing (allowing for some
# small floating point and algorithm differences.)
x = np.linspace(0, 2, 11)[1:]
ns = [50, 100, 200, 400, 1000, 2000]
for _x in x:
xn = _x / np.sqrt(ns)
probs = stats.kstwo.cdf(xn, ns)
diffs = np.diff(probs)
assert_array_less(diffs, 1e-8)
def test_cdf_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
vals_cdf = stats.kstwo.cdf(x, n)
vals_sf = stats.kstwo.sf(x, n)
assert_array_almost_equal(vals_cdf, 1 - vals_sf)
def test_cdf_sf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x / np.sqrt(n)
vals_cdf = stats.kstwo.cdf(xn, n)
vals_sf = stats.kstwo.sf(xn, n)
assert_array_almost_equal(vals_cdf, 1 - vals_sf)
def test_ppf_of_cdf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x[x > 0.5/n]
vals_cdf = stats.kstwo.cdf(xn, n)
# CDFs close to 1 are better dealt with using the SF
cond = (0 < vals_cdf) & (vals_cdf < 0.99)
vals = stats.kstwo.ppf(vals_cdf, n)
assert_allclose(vals[cond], xn[cond], rtol=1e-4)
def test_isf_of_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x[x > 0.5/n]
vals_isf = stats.kstwo.isf(xn, n)
cond = (0 < vals_isf) & (vals_isf < 1.0)
vals = stats.kstwo.sf(vals_isf, n)
assert_allclose(vals[cond], xn[cond], rtol=1e-4)
def test_ppf_of_cdf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = (x / np.sqrt(n))[x > 0.5/n]
vals_cdf = stats.kstwo.cdf(xn, n)
cond = (0 < vals_cdf) & (vals_cdf < 1.0)
vals = stats.kstwo.ppf(vals_cdf, n)
assert_allclose(vals[cond], xn[cond])
def test_isf_of_sf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = (x / np.sqrt(n))[x > 0.5/n]
vals_sf = stats.kstwo.sf(xn, n)
# SFs close to 1 are better dealt with using the CDF
cond = (0 < vals_sf) & (vals_sf < 0.95)
vals = stats.kstwo.isf(vals_sf, n)
assert_allclose(vals[cond], xn[cond])
def test_ppf(self):
probs = np.linspace(0, 1, 11)[1:]
for n in [1, 2, 3, 10, 100, 1000]:
xn = stats.kstwo.ppf(probs, n)
vals_cdf = stats.kstwo.cdf(xn, n)
assert_allclose(vals_cdf, probs)
def test_simard_lecuyer_table1(self):
# Compute the cdf for values near the mean of the distribution.
# The mean u ~ log(2)*sqrt(pi/(2n))
# Compute for x in [u/4, u/3, u/2, u, 2u, 3u]
# This is the computation of Table 1 of Simard, R., L'Ecuyer, P. (2011)
# "Computing the Two-Sided Kolmogorov-Smirnov Distribution".
# Except that the values below are not from the published table, but
# were generated using an independent SageMath implementation of
# Durbin's algorithm (with the exponentiation and scaling of
# Marsaglia/Tsang/Wang's version) using 500 bit arithmetic.
# Some of the values in the published table have relative
# errors greater than 1e-4.
ns = [10, 50, 100, 200, 500, 1000]
ratios = np.array([1.0/4, 1.0/3, 1.0/2, 1, 2, 3])
expected = np.array([
[1.92155292e-08, 5.72933228e-05, 2.15233226e-02, 6.31566589e-01,
9.97685592e-01, 9.99999942e-01],
[2.28096224e-09, 1.99142563e-05, 1.42617934e-02, 5.95345542e-01,
9.96177701e-01, 9.99998662e-01],
[1.00201886e-09, 1.32673079e-05, 1.24608594e-02, 5.86163220e-01,
9.95866877e-01, 9.99998240e-01],
[4.93313022e-10, 9.52658029e-06, 1.12123138e-02, 5.79486872e-01,
9.95661824e-01, 9.99997964e-01],
[2.37049293e-10, 6.85002458e-06, 1.01309221e-02, 5.73427224e-01,
9.95491207e-01, 9.99997750e-01],
[1.56990874e-10, 5.71738276e-06, 9.59725430e-03, 5.70322692e-01,
9.95409545e-01, 9.99997657e-01]
])
for idx, n in enumerate(ns):
x = ratios * np.log(2) * np.sqrt(np.pi/2/n)
vals_cdf = stats.kstwo.cdf(x, n)
assert_allclose(vals_cdf, expected[idx], rtol=1e-5)
class TestZipf(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0, 0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0., 0.))
assert_allclose((v, k), (4., 3.25))
class TestInvgauss(object):
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize("rvs_mu,rvs_loc,rvs_scale",
[(2, 0, 1), (np.random.rand(3)*10)])
def test_fit(self, rvs_mu, rvs_loc, rvs_scale):
data = stats.invgauss.rvs(size=100, mu=rvs_mu,
loc=rvs_loc, scale=rvs_scale)
# Analytical MLEs are calculated with formula when `floc` is fixed
mu, loc, scale = stats.invgauss.fit(data, floc=rvs_loc)
data = data - rvs_loc
mu_temp = np.mean(data)
scale_mle = len(data) / (np.sum(data**(-1) - mu_temp**(-1)))
mu_mle = mu_temp/scale_mle
# `mu` and `scale` match analytical formula
assert_allclose(mu_mle, mu, atol=1e-15, rtol=1e-15)
assert_allclose(scale_mle, scale, atol=1e-15, rtol=1e-15)
assert_equal(loc, rvs_loc)
data = stats.invgauss.rvs(size=100, mu=rvs_mu,
loc=rvs_loc, scale=rvs_scale)
# fixed parameters are returned
mu, loc, scale = stats.invgauss.fit(data, floc=rvs_loc - 1,
fscale=rvs_scale + 1)
assert_equal(rvs_scale + 1, scale)
assert_equal(rvs_loc - 1, loc)
# shape can still be fixed with multiple names
shape_mle1 = stats.invgauss.fit(data, fmu=1.04)[0]
shape_mle2 = stats.invgauss.fit(data, fix_mu=1.04)[0]
shape_mle3 = stats.invgauss.fit(data, f0=1.04)[0]
assert shape_mle1 == shape_mle2 == shape_mle3 == 1.04
@pytest.mark.parametrize("rvs_mu,rvs_loc,rvs_scale",
[(2, 0, 1), (np.random.rand(3)*10)])
def test_fit_MLE_comp_optimzer(self, rvs_mu, rvs_loc, rvs_scale):
data = stats.invgauss.rvs(size=100, mu=rvs_mu,
loc=rvs_loc, scale=rvs_scale)
super_fit = super(type(stats.invgauss), stats.invgauss).fit
# fitting without `floc` uses superclass fit method
super_fitted = super_fit(data)
invgauss_fit = stats.invgauss.fit(data)
assert_equal(super_fitted, invgauss_fit)
# fitting with `fmu` is uses superclass fit method
super_fitted = super_fit(data, floc=0, fmu=2)
invgauss_fit = stats.invgauss.fit(data, floc=0, fmu=2)
assert_equal(super_fitted, invgauss_fit)
# obtain log-likelihood objective function to compare results
args = [data, (stats.invgauss._fitstart(data), )]
func = stats.invgauss._reduce_func(args, {})[1]
# fixed `floc` uses analytical formula and provides better fit than
# super method
_assert_less_or_close_loglike(stats.invgauss, data, func, floc=rvs_loc)
# fixed `floc` not resulting in invalid data < 0 uses analytical
# formulas and provides a better fit than the super method
assert np.all((data - (rvs_loc - 1)) > 0)
_assert_less_or_close_loglike(stats.invgauss, data, func,
floc=rvs_loc - 1)
# fixed `floc` to an arbitrary number, 0, still provides a better fit
# than the super method
_assert_less_or_close_loglike(stats.invgauss, data, func, floc=0)
# fixed `fscale` to an arbitrary number still provides a better fit
# than the super method
_assert_less_or_close_loglike(stats.invgauss, data, func, floc=rvs_loc,
fscale=np.random.rand(1)[0])
def test_fit_raise_errors(self):
assert_fit_warnings(stats.invgauss)
# FitDataError is raised when negative invalid data
with pytest.raises(FitDataError):
stats.invgauss.fit([1, 2, 3], floc=2)
class TestLaplace(object):
@pytest.mark.parametrize("rvs_loc", [-5, 0, 1, 2])
@pytest.mark.parametrize("rvs_scale", [1, 2, 3, 10])
def test_fit(self, rvs_loc, rvs_scale):
# tests that various inputs follow expected behavior
# for a variety of `loc` and `scale`.
data = stats.laplace.rvs(size=100, loc=rvs_loc, scale=rvs_scale)
# MLE estimates are given by
loc_mle = np.median(data)
scale_mle = np.sum(np.abs(data - loc_mle)) / len(data)
# standard outputs should match analytical MLE formulas
loc, scale = stats.laplace.fit(data)
assert_allclose(loc, loc_mle, atol=1e-15, rtol=1e-15)
assert_allclose(scale, scale_mle, atol=1e-15, rtol=1e-15)
# fixed parameter should use analytical formula for other
loc, scale = stats.laplace.fit(data, floc=loc_mle)
assert_allclose(scale, scale_mle, atol=1e-15, rtol=1e-15)
loc, scale = stats.laplace.fit(data, fscale=scale_mle)
assert_allclose(loc, loc_mle)
# test with non-mle fixed parameter
# create scale with non-median loc
loc = rvs_loc * 2
scale_mle = np.sum(np.abs(data - loc)) / len(data)
# fixed loc to non median, scale should match
# scale calculation with modified loc
loc, scale = stats.laplace.fit(data, floc=loc)
assert_equal(scale_mle, scale)
# fixed scale created with non median loc,
# loc output should still be the data median.
loc, scale = stats.laplace.fit(data, fscale=scale_mle)
assert_equal(loc_mle, loc)
# error raised when both `floc` and `fscale` are fixed
assert_raises(RuntimeError, stats.laplace.fit, data, floc=loc_mle,
fscale=scale_mle)
# error is raised with non-finite values
assert_raises(RuntimeError, stats.laplace.fit, [np.nan])
assert_raises(RuntimeError, stats.laplace.fit, [np.inf])
@pytest.mark.parametrize("rvs_scale,rvs_loc", [(10, -5),
(5, 10),
(.2, .5)])
def test_fit_MLE_comp_optimzer(self, rvs_loc, rvs_scale):
data = stats.laplace.rvs(size=1000, loc=rvs_loc, scale=rvs_scale)
# the log-likelihood function for laplace is given by
def ll(loc, scale, data):
return -1 * (- (len(data)) * np.log(2*scale) -
(1/scale)*np.sum(np.abs(data - loc)))
# test that the objective function result of the analytical MLEs is
# less than or equal to that of the numerically optimized estimate
loc, scale = stats.laplace.fit(data)
loc_opt, scale_opt = super(type(stats.laplace),
stats.laplace).fit(data)
ll_mle = ll(loc, scale, data)
ll_opt = ll(loc_opt, scale_opt, data)
assert ll_mle < ll_opt or np.allclose(ll_mle, ll_opt,
atol=1e-15, rtol=1e-15)
def test_fit_simple_non_random_data(self):
data = np.array([1.0, 1.0, 3.0, 5.0, 8.0, 14.0])
# with `floc` fixed to 6, scale should be 4.
loc, scale = stats.laplace.fit(data, floc=6)
assert_allclose(scale, 4, atol=1e-15, rtol=1e-15)
# with `fscale` fixed to 6, loc should be 4.
loc, scale = stats.laplace.fit(data, fscale=6)
assert_allclose(loc, 4, atol=1e-15, rtol=1e-15)
class TestInvGamma(object):
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
expected = [0.05461496450, 0.0001723162534, 1.020362676,
2.055616582]
assert_allclose(mvsk, expected)
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
expected = ([10., 0.476190476, 0.2173913043], # mmm
[np.inf, 0.2061430632, 0.01312749422], # vvv
[np.nan, 41.95235392, 2.919025532], # sss
[np.nan, np.nan, 24.51923076]) # kkk
for x, y in zip(mvsk, expected):
assert_almost_equal(x, y)
def test_cdf_ppf(self):
# gh-6245
x = np.logspace(-2.6, 0)
y = stats.invgamma.cdf(x, 1)
xx = stats.invgamma.ppf(y, 1)
assert_allclose(x, xx)
def test_sf_isf(self):
# gh-6245
if sys.maxsize > 2**32:
x = np.logspace(2, 100)
else:
# Invgamme roundtrip on 32-bit systems has relative accuracy
# ~1e-15 until x=1e+15, and becomes inf above x=1e+18
x = np.logspace(2, 18)
y = stats.invgamma.sf(x, 1)
xx = stats.invgamma.isf(y, 1)
assert_allclose(x, xx, rtol=1.0)
class TestF(object):
def test_endpoints(self):
# Compute the pdf at the left endpoint dst.a.
data = [[stats.f, (2, 1), 1.0]]
for _f, _args, _correct in data:
ans = _f.pdf(_f.a, *_args)
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
assert_(np.isfinite(m))
assert_(np.isfinite(v))
assert_(np.isfinite(s))
assert_(not np.isfinite(k))
def test_moments_warnings(self):
# no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
def test_stats_broadcast(self):
dfn = np.array([[3], [11]])
dfd = np.array([11, 12])
m, v, s, k = stats.f.stats(dfn=dfn, dfd=dfd, moments='mvsk')
m2 = [dfd / (dfd - 2)]*2
assert_allclose(m, m2)
v2 = 2 * dfd**2 * (dfn + dfd - 2) / dfn / (dfd - 2)**2 / (dfd - 4)
assert_allclose(v, v2)
s2 = ((2*dfn + dfd - 2) * np.sqrt(8*(dfd - 4)) /
((dfd - 6) * np.sqrt(dfn*(dfn + dfd - 2))))
assert_allclose(s, s2)
k2num = 12 * (dfn * (5*dfd - 22) * (dfn + dfd - 2) +
(dfd - 4) * (dfd - 2)**2)
k2den = dfn * (dfd - 6) * (dfd - 8) * (dfn + dfd - 2)
k2 = k2num / k2den
assert_allclose(k, k2)
def test_rvgeneric_std():
# Regression test for #1191
assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
def test_moments_t():
# regression test for #8786
assert_equal(stats.t.stats(df=1, moments='mvsk'),
(np.inf, np.nan, np.nan, np.nan))
assert_equal(stats.t.stats(df=1.01, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2.01, moments='mvsk'),
(0.0, 2.01/(2.01-2.0), np.nan, np.inf))
assert_equal(stats.t.stats(df=3, moments='sk'), (np.nan, np.inf))
assert_equal(stats.t.stats(df=3.01, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4.01, moments='sk'), (0.0, 6.0/(4.01 - 4.0)))
class TestRvDiscrete(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
states = [-1, 0, 1, 2, 3, 4]
probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
samples = 1000
r = stats.rv_discrete(name='sample', values=(states, probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s, p in zip(states, probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert_(isinstance(x, int))
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
def test_pmf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x = [[1., 4.],
[3., 2]]
assert_allclose(rv.pmf(x),
[[0.5, 0.2],
[0., 0.3]], atol=1e-14)
def test_cdf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5]
expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1]
assert_allclose(rv.cdf(x_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.cdf(xx) for xx in x_values],
expected, atol=1e-14)
def test_ppf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.]
expected = [1, 1, 2, 2, 4, 4]
assert_allclose(rv.ppf(q_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.ppf(q) for q in q_values],
expected, atol=1e-14)
def test_cdf_ppf_next(self):
# copied and special cased from test_discrete_basic
vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1])
rv = stats.rv_discrete(values=vals)
assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8),
rv.xk[1:])
def test_multidimension(self):
xk = np.arange(12).reshape((3, 4))
pk = np.array([[0.1, 0.1, 0.15, 0.05],
[0.1, 0.1, 0.05, 0.05],
[0.1, 0.1, 0.05, 0.05]])
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_bad_input(self):
xk = [1, 2, 3]
pk = [0.5, 0.5]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
pk = [1, 2, 3]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3]
pk = [0.5, 1.2, -0.7]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3, 4, 5]
pk = [0.3, 0.3, 0.3, 0.3, -0.2]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
def test_shape_rv_sample(self):
# tests added for gh-9565
# mismatch of 2d inputs
xk, pk = np.arange(4).reshape((2, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same number of elements, but shapes not compatible
xk, pk = np.arange(6).reshape((3, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same shapes => no error
xk, pk = np.arange(6).reshape((3, 2)), np.full((3, 2), 1/6)
assert_equal(stats.rv_discrete(values=(xk, pk)).pmf(0), 1/6)
def test_expect1(self):
xk = [1, 2, 4, 6, 7, 11]
pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_expect2(self):
# rv_sample should override _expect. Bug report from
# https://stackoverflow.com/questions/63199792
y = [200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0,
1100.0, 1200.0, 1300.0, 1400.0, 1500.0, 1600.0, 1700.0, 1800.0,
1900.0, 2000.0, 2100.0, 2200.0, 2300.0, 2400.0, 2500.0, 2600.0,
2700.0, 2800.0, 2900.0, 3000.0, 3100.0, 3200.0, 3300.0, 3400.0,
3500.0, 3600.0, 3700.0, 3800.0, 3900.0, 4000.0, 4100.0, 4200.0,
4300.0, 4400.0, 4500.0, 4600.0, 4700.0, 4800.0]
py = [0.0004, 0.0, 0.0033, 0.006500000000000001, 0.0, 0.0,
0.004399999999999999, 0.6862, 0.0, 0.0, 0.0,
0.00019999999999997797, 0.0006000000000000449,
0.024499999999999966, 0.006400000000000072,
0.0043999999999999595, 0.019499999999999962,
0.03770000000000007, 0.01759999999999995, 0.015199999999999991,
0.018100000000000005, 0.04500000000000004, 0.0025999999999999357,
0.0, 0.0041000000000001036, 0.005999999999999894,
0.0042000000000000925, 0.0050000000000000044,
0.0041999999999999815, 0.0004999999999999449,
0.009199999999999986, 0.008200000000000096,
0.0, 0.0, 0.0046999999999999265, 0.0019000000000000128,
0.0006000000000000449, 0.02510000000000001, 0.0,
0.007199999999999984, 0.0, 0.012699999999999934, 0.0, 0.0,
0.008199999999999985, 0.005600000000000049, 0.0]
rv = stats.rv_discrete(values=(y, py))
# check the mean
assert_allclose(rv.expect(), rv.mean(), atol=1e-14)
assert_allclose(rv.expect(),
sum(v * w for v, w in zip(y, py)), atol=1e-14)
# also check the second moment
assert_allclose(rv.expect(lambda x: x**2),
sum(v**2 * w for v, w in zip(y, py)), atol=1e-14)
class TestSkewNorm(object):
def setup_method(self):
self.rng = check_random_state(1234)
def test_normal(self):
# When the skewness is 0 the distribution is normal
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),
stats.norm.pdf(x))
def test_rvs(self):
shape = (3, 4, 5)
x = stats.skewnorm.rvs(a=0.75, size=shape, random_state=self.rng)
assert_equal(shape, x.shape)
x = stats.skewnorm.rvs(a=-3, size=shape, random_state=self.rng)
assert_equal(shape, x.shape)
def test_moments(self):
X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2,
random_state=self.rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2,
random_state=self.rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
def test_cdf_large_x(self):
# Regression test for gh-7746.
# The x values are large enough that the closest 64 bit floating
# point representation of the exact CDF is 1.0.
p = stats.skewnorm.cdf([10, 20, 30], -1)
assert_allclose(p, np.ones(3), rtol=1e-14)
p = stats.skewnorm.cdf(25, 2.5)
assert_allclose(p, 1.0, rtol=1e-14)
def test_cdf_sf_small_values(self):
# Triples are [x, a, cdf(x, a)]. These values were computed
# using CDF[SkewNormDistribution[0, 1, a], x] in Wolfram Alpha.
cdfvals = [
[-8, 1, 3.870035046664392611e-31],
[-4, 2, 8.1298399188811398e-21],
[-2, 5, 1.55326826787106273e-26],
[-9, -1, 2.257176811907681295e-19],
[-10, -4, 1.523970604832105213e-23],
]
for x, a, cdfval in cdfvals:
p = stats.skewnorm.cdf(x, a)
assert_allclose(p, cdfval, rtol=1e-8)
# For the skew normal distribution, sf(-x, -a) = cdf(x, a).
p = stats.skewnorm.sf(-x, -a)
assert_allclose(p, cdfval, rtol=1e-8)
class TestExpon(object):
def test_zero(self):
assert_equal(stats.expon.pdf(0), 1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.expon.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.expon.fit, x)
class TestNorm(object):
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.norm.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.norm.fit, x)
def test_bad_keyword_arg(self):
x = [1, 2, 3]
assert_raises(TypeError, stats.norm.fit, x, plate="shrimp")
class TestUniform(object):
"""gh-10300"""
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.uniform.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.uniform.fit, x)
class TestExponNorm(object):
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(-900, 0.01), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 0.01), 0.0)
# Expected values for the PDF were computed with mpmath, with
# the following function, and with mpmath.mp.dps = 50.
#
# def exponnorm_stdpdf(x, K):
# x = mpmath.mpf(x)
# K = mpmath.mpf(K)
# t1 = mpmath.exp(1/(2*K**2) - x/K)
# erfcarg = -(x - 1/K)/mpmath.sqrt(2)
# t2 = mpmath.erfc(erfcarg)
# return t1 * t2 / (2*K)
#
@pytest.mark.parametrize('x, K, expected',
[(20, 0.01, 6.90010764753618e-88),
(1, 0.01, 0.24438994313247364),
(-1, 0.01, 0.23955149623472075),
(-20, 0.01, 4.6004708690125477e-88),
(10, 1, 7.48518298877006e-05),
(10, 10000, 9.990005048283775e-05)])
def test_std_pdf(self, x, K, expected):
assert_allclose(stats.exponnorm.pdf(x, K), expected, rtol=1e-12)
class TestGenExpon(object):
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_almost_equal(simps(p, dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
class TestExponpow(object):
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),
5)
class TestSkellam(object):
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
class TestLognorm(object):
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
# Also make sure there are no warnings at x=0, cf gh-5202
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
def test_logcdf(self):
# Regression test for gh-5940: sf et al would underflow too early
x2, mu, sigma = 201.68, 195, 0.149
assert_allclose(stats.lognorm.sf(x2-mu, s=sigma),
stats.norm.sf(np.log(x2-mu)/sigma))
assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma),
stats.norm.logsf(np.log(x2-mu)/sigma))
class TestBeta(object):
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0, 1, 0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0, 0.5, 1)
assert_almost_equal(logpdf, np.inf)
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.beta.fit, x, floc=0, fscale=1,
plate="shrimp")
def test_fit_duplicated_fixed_parameter(self):
# At most one of 'f0', 'fa' or 'fix_a' can be given to the fit method.
# More than one raises a ValueError.
x = [0.1, 0.5, 0.6]
assert_raises(ValueError, stats.beta.fit, x, fa=0.5, fix_a=0.5)
class TestBetaPrime(object):
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.betaprime(alpha, beta)
assert_(np.isfinite(b.logpdf(x)).all())
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_cdf(self):
# regression test for gh-4030: Implementation of
# scipy.stats.betaprime.cdf()
x = stats.betaprime.cdf(0, 0.2, 0.3)
assert_equal(x, 0.0)
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
cdfs = stats.betaprime.cdf(x, alpha, beta)
assert_(np.isfinite(cdfs).all())
# check the new cdf implementation vs generic one:
gen_cdf = stats.rv_continuous._cdf_single
cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
class TestGamma(object):
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0, 1)
assert_almost_equal(logpdf, 0)
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.gamma.fit, x, floc=0, plate="shrimp")
class TestChi2(object):
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003,
decimal=14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778,
decimal=14)
def test_ppf(self):
# Expected values computed with mpmath.
df = 4.8
x = stats.chi2.ppf(2e-47, df)
assert_allclose(x, 1.098472479575179840604902808e-19, rtol=1e-10)
x = stats.chi2.ppf(0.5, df)
assert_allclose(x, 4.15231407598589358660093156, rtol=1e-10)
df = 13
x = stats.chi2.ppf(2e-77, df)
assert_allclose(x, 1.0106330688195199050507943e-11, rtol=1e-10)
x = stats.chi2.ppf(0.1, df)
assert_allclose(x, 7.041504580095461859307179763, rtol=1e-10)
class TestGumbelL(object):
# gh-6228
def test_cdf_ppf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.cdf(x)
xx = stats.gumbel_l.ppf(y)
assert_allclose(x, xx)
def test_logcdf_logsf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.logcdf(x)
z = stats.gumbel_l.logsf(x)
u = np.exp(y)
v = -special.expm1(z)
assert_allclose(u, v)
def test_sf_isf(self):
x = np.linspace(-20, 5)
y = stats.gumbel_l.sf(x)
xx = stats.gumbel_l.isf(y)
assert_allclose(x, xx)
class TestGumbelR:
def test_sf(self):
# Expected value computed with mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 40
# >>> float(mpmath.mp.one - mpmath.exp(-mpmath.exp(-50)))
# 1.9287498479639178e-22
assert_allclose(stats.gumbel_r.sf(50), 1.9287498479639178e-22,
rtol=1e-14)
def test_isf(self):
# Expected value computed with mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 40
# >>> float(-mpmath.log(-mpmath.log(mpmath.mp.one - 1e-17)))
# 39.14394658089878
assert_allclose(stats.gumbel_r.isf(1e-17), 39.14394658089878,
rtol=1e-14)
class TestLevyStable(object):
def test_fit(self):
# construct data to have percentiles that match
# example in McCulloch 1986.
x = [-.05413, -.05413,
0., 0., 0., 0.,
.00533, .00533, .00533, .00533, .00533,
.03354, .03354, .03354, .03354, .03354,
.05309, .05309, .05309, .05309, .05309]
alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
assert_allclose(alpha1, 1.48, rtol=0, atol=0.01)
assert_almost_equal(beta1, -.22, 2)
assert_almost_equal(scale1, 0.01717, 4)
# to 2 dps due to rounding error in McCulloch86
assert_almost_equal(loc1, 0.00233, 2)
# cover alpha=2 scenario
x2 = x + [.05309, .05309, .05309, .05309, .05309]
alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2)
assert_equal(alpha2, 2)
assert_equal(beta2, -1)
assert_almost_equal(scale2, .02503, 4)
assert_almost_equal(loc2, .03354, 4)
@pytest.mark.slow
def test_pdf_nolan_samples(self):
""" Test pdf values against Nolan's stablec.exe output
see - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
Repeat following with beta = -1, -.5, 0, .5 and 1
stablec.exe <<
1 # pdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
fn = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/stable-pdf-sample-data.npy'))
data = np.load(fn)
data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')
# support numpy 1.8.2 for travis
npisin = np.isin if hasattr(np, "isin") else np.in1d
tests = [
# best selects
['best', None, 8, None],
# quadrature is accurate for most alpha except 0.25; perhaps
# limitation of Nolan stablec?
# we reduce size of x to speed up computation as numerical
# integration slow.
['quadrature', None, 8,
lambda r: ((r['alpha'] > 0.25) &
(npisin(r['x'], [-10, -5, 0, 5, 10])))],
# zolatarev is accurate except at alpha==1, beta != 0
['zolotarev', None, 8, lambda r: r['alpha'] != 1],
['zolotarev', None, 8,
lambda r: (r['alpha'] == 1) & (r['beta'] == 0)],
['zolotarev', None, 1,
lambda r: (r['alpha'] == 1) & (r['beta'] != 0)],
# fft accuracy reduces as alpha decreases, fails at low values of
# alpha and x=0
['fft', 0, 4, lambda r: r['alpha'] > 1],
['fft', 0, 3, lambda r: (r['alpha'] < 1) & (r['alpha'] > 0.25)],
# not useful here
['fft', 0, 1, lambda r: (r['alpha'] == 0.25) & (r['x'] != 0)],
]
for ix, (default_method, fft_min_points,
decimal_places, filter_func) in enumerate(tests):
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
subdata = (data[filter_func(data)] if filter_func is not None else
data)
with suppress_warnings() as sup:
sup.record(RuntimeWarning,
"Density calculation unstable for alpha=1 "
"and beta!=0.*")
sup.record(RuntimeWarning,
"Density calculations experimental for FFT "
"method.*")
p = stats.levy_stable.pdf(subdata['x'], subdata['alpha'],
subdata['beta'], scale=1, loc=0)
subdata2 = rec_append_fields(subdata, 'calc', p)
padiff = np.abs(p-subdata['p'])
failures = subdata2[(padiff >= 1.5*10.**(-decimal_places)) |
np.isnan(p)]
assert_almost_equal(p, subdata['p'], decimal_places,
("pdf test %s failed with method '%s'\n%s"
% (ix, default_method, failures)),
verbose=False)
@pytest.mark.slow
def test_cdf_nolan_samples(self):
""" Test cdf values against Nolan's stablec.exe output
see - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
Repeat following with beta = -1, -.5, 0, .5 and 1
stablec.exe <<
2 # cdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
fn = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/stable-cdf-sample-data.npy'))
data = np.load(fn)
data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')
tests = [
# zolatarev is accurate for all values
['zolotarev', None, 8, None],
# fft accuracy poor, very poor alpha < 1
['fft', 0, 2, lambda r: r['alpha'] > 1],
]
for ix, (default_method, fft_min_points, decimal_places,
filter_func) in enumerate(tests):
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
subdata = (data[filter_func(data)] if filter_func is not None else
data)
with suppress_warnings() as sup:
sup.record(RuntimeWarning, 'FFT method is considered ' +
'experimental for cumulative distribution ' +
'function evaluations.*')
p = stats.levy_stable.cdf(subdata['x'], subdata['alpha'],
subdata['beta'], scale=1, loc=0)
subdata2 = rec_append_fields(subdata, 'calc', p)
padiff = np.abs(p - subdata['p'])
failures = subdata2[(padiff >= 1.5*10.**(-decimal_places)) |
np.isnan(p)]
assert_almost_equal(p, subdata['p'], decimal_places,
("cdf test %s failed with method '%s'\n%s"
% (ix, default_method, failures)),
verbose=False)
def test_pdf_alpha_equals_one_beta_non_zero(self):
"""
sample points extracted from Tables and Graphs of Stable Probability
Density Functions - Donald R Holt - 1973 - p 187.
"""
xs = np.array([0, 0, 0, 0,
1, 1, 1, 1,
2, 2, 2, 2,
3, 3, 3, 3,
4, 4, 4, 4])
density = np.array([.3183, .3096, .2925, .2622,
.1591, .1587, .1599, .1635,
.0637, .0729, .0812, .0955,
.0318, .0390, .0458, .0586,
.0187, .0236, .0285, .0384])
betas = np.array([0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1])
tests = [
['quadrature', None, 4],
['zolotarev', None, 1],
]
with np.errstate(all='ignore'), suppress_warnings() as sup:
sup.filter(category=RuntimeWarning,
message="Density calculation unstable.*")
for default_method, fft_min_points, decimal_places in tests:
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
pdf = stats.levy_stable.pdf(xs, 1, betas, scale=1, loc=0)
assert_almost_equal(pdf, density, decimal_places,
default_method)
def test_stats(self):
param_sets = [
[(1.48, -.22, 0, 1), (0, np.inf, np.NaN, np.NaN)],
[(2, .9, 10, 1.5), (10, 4.5, 0, 0)]
]
for args, exp_stats in param_sets:
calc_stats = stats.levy_stable.stats(args[0], args[1],
loc=args[2], scale=args[3],
moments='mvsk')
assert_almost_equal(calc_stats, exp_stats)
class TestArrayArgument(object): # test for ticket:992
def setup_method(self):
np.random.seed(1234)
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),
size=(10, 5))
assert_equal(rvs.shape, (10, 5))
class TestDocstring(object):
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
assert_("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
assert_("bernoulli" in stats.bernoulli.__doc__.lower())
def test_no_name_arg(self):
# If name is not given, construction shouldn't fail. See #1508.
stats.rv_continuous()
stats.rv_discrete()
class TestEntropy(object):
def test_entropy_positive(self):
# See ticket #497
pk = [0.5, 0.2, 0.3]
qk = [0.1, 0.25, 0.65]
eself = stats.entropy(pk, pk)
edouble = stats.entropy(pk, qk)
assert_(0.0 == eself)
assert_(edouble >= 0.0)
def test_entropy_base(self):
pk = np.ones(16, float)
S = stats.entropy(pk, base=2.)
assert_(abs(S - 4.) < 1.e-5)
qk = np.ones(16, float)
qk[:8] = 2.
S = stats.entropy(pk, qk)
S2 = stats.entropy(pk, qk, base=2.)
assert_(abs(S/S2 - np.log(2.)) < 1.e-5)
def test_entropy_zero(self):
# Test for PR-479
assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,
decimal=12)
def test_entropy_2d(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[0.1933259, 0.18609809])
def test_entropy_2d_zero(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[np.inf, 0.18609809])
pk[0][0] = 0.0
assert_array_almost_equal(stats.entropy(pk, qk),
[0.17403988, 0.18609809])
def test_entropy_base_2d_nondefault_axis(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
assert_array_almost_equal(stats.entropy(pk, axis=1),
[0.63651417, 0.63651417, 0.66156324])
def test_entropy_2d_nondefault_axis(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk, axis=1),
[0.231049, 0.231049, 0.127706])
def test_entropy_raises_value_error(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.1, 0.2], [0.6, 0.3]]
assert_raises(ValueError, stats.entropy, pk, qk)
def test_base_entropy_with_axis_0_is_equal_to_default(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
assert_array_almost_equal(stats.entropy(pk, axis=0),
stats.entropy(pk))
def test_entropy_with_axis_0_is_equal_to_default(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk, axis=0),
stats.entropy(pk, qk))
def test_base_entropy_transposed(self):
pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
assert_array_almost_equal(stats.entropy(pk.T).T,
stats.entropy(pk, axis=1))
def test_entropy_transposed(self):
pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
qk = np.array([[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]])
assert_array_almost_equal(stats.entropy(pk.T, qk.T).T,
stats.entropy(pk, qk, axis=1))
def TestArgsreduce():
a = array([1, 3, 2, 1, 2, 3, 3])
b, c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3, 2, 2, 3, 3])
assert_array_equal(c, [2, 2, 2, 2, 2])
b, c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a[0])
assert_array_equal(c, [2])
b, c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * numpy.size(a))
class TestFitMethod(object):
skip = ['ncf', 'ksone', 'kstwo']
def setup_method(self):
np.random.seed(1234)
# skip these b/c deprecated, or only loc and scale arguments
fitSkipNonFinite = ['expon', 'norm', 'uniform']
@pytest.mark.parametrize('dist,args', distcont)
def test_fit_w_non_finite_data_values(self, dist, args):
"""gh-10300"""
if dist in self.fitSkipNonFinite:
pytest.skip("%s fit known to fail or deprecated" % dist)
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
y = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
distfunc = getattr(stats, dist)
assert_raises(RuntimeError, distfunc.fit, x, floc=0, fscale=1)
assert_raises(RuntimeError, distfunc.fit, y, floc=0, fscale=1)
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
np.random.seed(12345)
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
expected_shape = np.sqrt(((np.log(x) - np.log(20))**2).mean())
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[expected_shape, 0, 20], atol=1e-8)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
def test_expon_fit(self):
x = np.array([2, 2, 4, 4, 4, 4, 4, 8])
loc, scale = stats.expon.fit(x)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 2) # x.mean() - x.min()
loc, scale = stats.expon.fit(x, fscale=3)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 3) # fscale
loc, scale = stats.expon.fit(x, floc=0)
assert_equal(loc, 0) # floc
assert_equal(scale, 4) # x.mean() - loc
def test_lognorm_fit(self):
x = np.array([1.5, 3, 10, 15, 23, 59])
lnxm1 = np.log(x - 1)
shape, loc, scale = stats.lognorm.fit(x, floc=1)
assert_allclose(shape, lnxm1.std(), rtol=1e-12)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fscale=6)
assert_allclose(shape, np.sqrt(((lnxm1 - np.log(6))**2).mean()),
rtol=1e-12)
assert_equal(loc, 1)
assert_equal(scale, 6)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fix_s=0.75)
assert_equal(shape, 0.75)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
def test_uniform_fit(self):
x = np.array([1.0, 1.1, 1.2, 9.0])
loc, scale = stats.uniform.fit(x)
assert_equal(loc, x.min())
assert_equal(scale, x.ptp())
loc, scale = stats.uniform.fit(x, floc=0)
assert_equal(loc, 0)
assert_equal(scale, x.max())
loc, scale = stats.uniform.fit(x, fscale=10)
assert_equal(loc, 0)
assert_equal(scale, 10)
assert_raises(ValueError, stats.uniform.fit, x, floc=2.0)
assert_raises(ValueError, stats.uniform.fit, x, fscale=5.0)
def test_fshapes(self):
# take a beta distribution, with shapes='a, b', and make sure that
# fa is equivalent to f0, and fb is equivalent to f1
a, b = 3., 4.
x = stats.beta.rvs(a, b, size=100, random_state=1234)
res_1 = stats.beta.fit(x, f0=3.)
res_2 = stats.beta.fit(x, fa=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_2 = stats.beta.fit(x, fix_a=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_3 = stats.beta.fit(x, f1=4.)
res_4 = stats.beta.fit(x, fb=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
res_4 = stats.beta.fit(x, fix_b=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
# cannot specify both positional and named args at the same time
assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2)
# check that attempting to fix all parameters raises a ValueError
assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
floc=2, fscale=3)
# check that specifying floc, fscale and fshapes works for
# beta and gamma which override the generic fit method
res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1)
aa, bb, ll, ss = res_5
assert_equal([aa, ll, ss], [3., 0, 1])
# gamma distribution
a = 3.
data = stats.gamma.rvs(a, size=100)
aa, ll, ss = stats.gamma.fit(data, fa=a)
assert_equal(aa, a)
def test_extra_params(self):
# unknown parameters should raise rather than be silently ignored
dist = stats.exponnorm
data = dist.rvs(K=2, size=100)
dct = dict(enikibeniki=-101)
assert_raises(TypeError, dist.fit, data, **dct)
class TestFrozen(object):
def setup_method(self):
np.random.seed(1234)
# Test that a frozen distribution gives the same results as the original
# object.
#
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, loc=10.0, scale=3.0)
assert_equal(result_f, result)
assert_equal(frozen.a, dist.a)
assert_equal(frozen.b, dist.b)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
assert_equal(frozen.a, frozen.dist.a)
assert_equal(frozen.b, frozen.dist.b)
def test_regression_ticket_1293(self):
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_ab(self):
# test that the support of a frozen distribution
# (i) remains frozen even if it changes for the original one
# (ii) is actually correct if the shape parameters are such that
# the values of [a, b] are not the default [0, inf]
# take a genpareto as an example where the support
# depends on the value of the shape parameter:
# for c > 0: a, b = 0, inf
# for c < 0: a, b = 0, -1/c
c = -0.1
rv = stats.genpareto(c=c)
a, b = rv.dist._get_support(c)
assert_equal([a, b], [0., 10.])
c = 0.1
stats.genpareto.pdf(0, c=c)
assert_equal(rv.dist._get_support(c), [0, np.inf])
c = -0.1
rv = stats.genpareto(c=c)
a, b = rv.dist._get_support(c)
assert_equal([a, b], [0., 10.])
c = 0.1
stats.genpareto.pdf(0, c) # this should NOT change genpareto.b
assert_equal((rv.dist.a, rv.dist.b), stats.genpareto._get_support(c))
rv1 = stats.genpareto(c=0.1)
assert_(rv1.dist is not rv.dist)
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
rv = stats.genpareto(c=c)
a, b = rv.a, rv.b
assert_equal(a, 0.)
assert_(np.isposinf(b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
a, b = stats.genpareto._get_support(c)
assert_allclose([a, b], [0., 0.5])
def test_rv_frozen_in_namespace(self):
# Regression test for gh-3522
assert_(hasattr(stats.distributions, 'rv_frozen'))
def test_random_state(self):
# only check that the random_state attribute exists,
frozen = stats.norm()
assert_(hasattr(frozen, 'random_state'))
# ... that it can be set,
frozen.random_state = 42
assert_equal(frozen.random_state.get_state(),
np.random.RandomState(42).get_state())
# ... and that .rvs method accepts it as an argument
rndm = np.random.RandomState(1234)
frozen.rvs(size=8, random_state=rndm)
def test_pickling(self):
# test that a frozen instance pickles and unpickles
# (this method is a clone of common_tests.check_pickling)
beta = stats.beta(2.3098496451481823, 0.62687954300963677)
poiss = stats.poisson(3.)
sample = stats.rv_discrete(values=([0, 1, 2, 3],
[0.1, 0.2, 0.3, 0.4]))
for distfn in [beta, poiss, sample]:
distfn.random_state = 1234
distfn.rvs(size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(size=8)
assert_equal(r0, r1)
# also smoke test some methods
medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]
assert_equal(medians[0], medians[1])
assert_equal(distfn.cdf(medians[0]),
unpickled.cdf(medians[1]))
def test_expect(self):
# smoke test the expect method of the frozen distribution
# only take a gamma w/loc and scale and poisson with loc specified
def func(x):
return x
gm = stats.gamma(a=2, loc=3, scale=4)
gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
lb=1, ub=2, conditional=True)
assert_allclose(gm_val, gamma_val)
p = stats.poisson(3, loc=4)
p_val = p.expect(func)
poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
assert_allclose(p_val, poisson_val)
class TestExpect(object):
# Test for expect method.
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
assert_almost_equal(m, 5, decimal=14)
lb = stats.norm.ppf(0.05, loc=5, scale=2)
ub = stats.norm.ppf(0.95, loc=5, scale=2)
prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
assert_almost_equal(prob90, 0.9, decimal=14)
prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
conditional=True)
assert_almost_equal(prob90c, 1., decimal=14)
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,
scale=2., lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
def test_hypergeom(self):
# test case with finite bounds
# without specifying bounds
m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
assert_almost_equal(m, m_true, decimal=13)
v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5.)
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2,
args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
# drop boundary points
prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
loc=5., lb=6, ub=12)
assert_almost_equal(prob_bounds, prob_true, decimal=13)
# conditional
prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
lb=6, ub=12, conditional=True)
assert_almost_equal(prob_bc, 1, decimal=14)
# check simple integral
prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
lb=0, ub=8)
assert_almost_equal(prob_b, 1, decimal=13)
def test_poisson(self):
# poisson, use lower bound only
prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
conditional=False)
prob_b_true = 1-stats.poisson.cdf(2, 2)
assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
conditional=True)
assert_almost_equal(prob_lb, 1, decimal=14)
def test_genhalflogistic(self):
# genhalflogistic, changes upper bound of support in _argcheck
# regression test for gh-2622
halflog = stats.genhalflogistic
# check consistency when calling expect twice with the same input
res1 = halflog.expect(args=(1.5,))
halflog.expect(args=(0.5,))
res2 = halflog.expect(args=(1.5,))
assert_almost_equal(res1, res2, decimal=14)
def test_rice_overflow(self):
# rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
# check that using i0e fixes it
assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
def test_logser(self):
# test a discrete distribution with infinite support and loc
p, loc = 0.3, 3
res_0 = stats.logser.expect(lambda k: k, args=(p,))
# check against the correct answer (sum of a geom series)
assert_allclose(res_0,
p / (p - 1.) / np.log(1. - p), atol=1e-15)
# now check it with `loc`
res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc)
assert_allclose(res_l, res_0 + loc, atol=1e-15)
def test_skellam(self):
# Use a discrete distribution w/ bi-infinite support. Compute two first
# moments and compare to known values (cf skellam.stats)
p1, p2 = 18, 22
m1 = stats.skellam.expect(lambda x: x, args=(p1, p2))
m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2))
assert_allclose(m1, p1 - p2, atol=1e-12)
assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12)
def test_randint(self):
# Use a discrete distribution w/ parameter-dependent support, which
# is larger than the default chunksize
lo, hi = 0, 113
res = stats.randint.expect(lambda x: x, (lo, hi))
assert_allclose(res,
sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15)
def test_zipf(self):
# Test that there is no infinite loop even if the sum diverges
assert_warns(RuntimeWarning, stats.zipf.expect,
lambda x: x**2, (2,))
def test_discrete_kwds(self):
# check that discrete expect accepts keywords to control the summation
n0 = stats.poisson.expect(lambda x: 1, args=(2,))
n1 = stats.poisson.expect(lambda x: 1, args=(2,),
maxcount=1001, chunksize=32, tolerance=1e-8)
assert_almost_equal(n0, n1, decimal=14)
def test_moment(self):
# test the .moment() method: compute a higher moment and compare to
# a known value
def poiss_moment5(mu):
return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu
for mu in [5, 7]:
m5 = stats.poisson.moment(5, mu)
assert_allclose(m5, poiss_moment5(mu), rtol=1e-10)
class TestNct(object):
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
rv = stats.nct(5, 0)
assert_equal(rv.cdf(0), 0.5)
rv = stats.nct(5, -1)
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4, 7)[:, None],
np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
assert_allclose(res, expected, rtol=1e-5)
def test_variance_gh_issue_2401(self):
# Computation of the variance of a non-central t-distribution resulted
# in a TypeError: ufunc 'isinf' not supported for the input types,
# and the inputs could not be safely coerced to any supported types
# according to the casting rule 'safe'
rv = stats.nct(4, 0)
assert_equal(rv.var(), 2.0)
def test_nct_inf_moments(self):
# n-th moment of nct only exists for df > n
m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
assert_(np.isfinite(m))
assert_equal([v, s, k], [np.inf, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
assert_(np.isfinite([m, v, s]).all())
assert_equal(k, np.nan)
class TestRice(object):
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
q = [0.1, 0.1, 0.5, 0.9]
assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
mvsk = stats.rice.stats(0, moments='mvsk')
assert_(np.isfinite(mvsk).all())
# furthermore, pdf is continuous as b\to 0
# rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
# see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
b = 1e-8
assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
atol=b, rtol=0)
def test_rice_rvs(self):
rvs = stats.rice.rvs
assert_equal(rvs(b=3.).size, 1)
assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
class TestErlang(object):
def setup_method(self):
np.random.seed(1234)
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a
# RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
# Calling the fit method with `f0` set to an integer should
# *not* trigger a RuntimeWarning. It should return the same
# values as gamma.fit(...).
data = [0.5, 1.0, 2.0, 4.0]
result_erlang = stats.erlang.fit(data, f0=1)
result_gamma = stats.gamma.fit(data, f0=1)
assert_allclose(result_erlang, result_gamma, rtol=1e-3)
def test_gh_pr_10949_argcheck(self):
assert_equal(stats.erlang.pdf(0.5, a=[1, -1]),
stats.gamma.pdf(0.5, a=[1, -1]))
class TestRayleigh(object):
def setup_method(self):
np.random.seed(1234)
# gh-6227
def test_logpdf(self):
y = stats.rayleigh.logpdf(50)
assert_allclose(y, -1246.0879769945718)
def test_logsf(self):
y = stats.rayleigh.logsf(50)
assert_allclose(y, -1250)
@pytest.mark.parametrize("rvs_loc,rvs_scale", [np.random.rand(2)])
def test_fit(self, rvs_loc, rvs_scale):
data = stats.rayleigh.rvs(size=250, loc=rvs_loc, scale=rvs_scale)
def scale_mle(data, floc):
return (np.sum((data - floc) ** 2) / (2 * len(data))) ** .5
# when `floc` is provided, `scale` is found with an analytical formula
scale_expect = scale_mle(data, rvs_loc)
loc, scale = stats.rayleigh.fit(data, floc=rvs_loc)
assert_equal(loc, rvs_loc)
assert_equal(scale, scale_expect)
# when `fscale` is fixed, superclass fit is used to determine `loc`.
loc, scale = stats.rayleigh.fit(data, fscale=.6)
assert_equal(scale, .6)
# with both parameters free, one dimensional optimization is done
# over a new function that takes into account the dependent relation
# of `scale` to `loc`.
loc, scale = stats.rayleigh.fit(data)
# test that `scale` is defined by its relation to `loc`
assert_equal(scale, scale_mle(data, loc))
@pytest.mark.parametrize("rvs_loc,rvs_scale", [np.random.rand(2)])
def test_fit_comparison_super_method(self, rvs_loc, rvs_scale):
# test that the objective function result of the analytical MLEs is
# less than or equal to that of the numerically optimized estimate
data = stats.rayleigh.rvs(size=250, loc=rvs_loc, scale=rvs_scale)
# obtain objective function with same method as `rv_continuous.fit`
args = [data, (stats.rayleigh._fitstart(data), )]
func = stats.rayleigh._reduce_func(args, {})[1]
_assert_less_or_close_loglike(stats.rayleigh, data, func)
def test_fit_warnings(self):
assert_fit_warnings(stats.rayleigh)
class TestExponWeib(object):
def test_pdf_logpdf(self):
# Regression test for gh-3508.
x = 0.1
a = 1.0
c = 100.0
p = stats.exponweib.pdf(x, a, c)
logp = stats.exponweib.logpdf(x, a, c)
# Expected values were computed with mpmath.
assert_allclose([p, logp],
[1.0000000000000054e-97, -223.35075402042244])
def test_a_is_1(self):
# For issue gh-3508.
# Check that when a=1, the pdf and logpdf methods of exponweib are the
# same as those of weibull_min.
x = np.logspace(-4, -1, 4)
a = 1
c = 100
p = stats.exponweib.pdf(x, a, c)
expected = stats.weibull_min.pdf(x, c)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.weibull_min.logpdf(x, c)
assert_allclose(logp, expected)
def test_a_is_1_c_is_1(self):
# When a = 1 and c = 1, the distribution is exponential.
x = np.logspace(-8, 1, 10)
a = 1
c = 1
p = stats.exponweib.pdf(x, a, c)
expected = stats.expon.pdf(x)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.expon.logpdf(x)
assert_allclose(logp, expected)
class TestWeibull(object):
def test_logpdf(self):
# gh-6217
y = stats.weibull_min.logpdf(0, 1)
assert_equal(y, 0)
def test_with_maxima_distrib(self):
# Tests for weibull_min and weibull_max.
# The expected values were computed using the symbolic algebra
# program 'maxima' with the package 'distrib', which has
# 'pdf_weibull' and 'cdf_weibull'. The mapping between the
# scipy and maxima functions is as follows:
# -----------------------------------------------------------------
# scipy maxima
# --------------------------------- ------------------------------
# weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b)
# weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b))
# weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b)
# weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b))
# weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b)
# weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b))
#
# weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b)
# weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b))
# weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b)
# weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b))
# weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b)
# weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b))
# -----------------------------------------------------------------
x = 1.5
a = 2.0
b = 3.0
# weibull_min
p = stats.weibull_min.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_min.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_min.cdf(x, a, scale=b)
assert_allclose(c, -special.expm1(-0.25))
lc = stats.weibull_min.logcdf(x, a, scale=b)
assert_allclose(lc, np.log(-special.expm1(-0.25)))
s = stats.weibull_min.sf(x, a, scale=b)
assert_allclose(s, np.exp(-0.25))
ls = stats.weibull_min.logsf(x, a, scale=b)
assert_allclose(ls, -0.25)
# Also test using a large value x, for which computing the survival
# function using the CDF would result in 0.
s = stats.weibull_min.sf(30, 2, scale=3)
assert_allclose(s, np.exp(-100))
ls = stats.weibull_min.logsf(30, 2, scale=3)
assert_allclose(ls, -100)
# weibull_max
x = -1.5
p = stats.weibull_max.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_max.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_max.cdf(x, a, scale=b)
assert_allclose(c, np.exp(-0.25))
lc = stats.weibull_max.logcdf(x, a, scale=b)
assert_allclose(lc, -0.25)
s = stats.weibull_max.sf(x, a, scale=b)
assert_allclose(s, -special.expm1(-0.25))
ls = stats.weibull_max.logsf(x, a, scale=b)
assert_allclose(ls, np.log(-special.expm1(-0.25)))
# Also test using a value of x close to 0, for which computing the
# survival function using the CDF would result in 0.
s = stats.weibull_max.sf(-1e-9, 2, scale=3)
assert_allclose(s, -special.expm1(-1/9000000000000000000))
ls = stats.weibull_max.logsf(-1e-9, 2, scale=3)
assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000)))
class TestRdist(object):
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
distfn = stats.rdist
values = [0.001, 0.5, 0.999]
assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
values, decimal=5)
def test_rdist_beta(self):
# rdist is a special case of stats.beta
x = np.linspace(-0.99, 0.99, 10)
c = 2.7
assert_almost_equal(0.5*stats.beta(c/2, c/2).pdf((x + 1)/2),
stats.rdist(c).pdf(x))
class TestTrapezoid(object):
def test_reduces_to_triang(self):
modes = [0, 0.3, 0.5, 1]
for mode in modes:
x = [0, mode, 1]
assert_almost_equal(stats.trapezoid.pdf(x, mode, mode),
stats.triang.pdf(x, mode))
assert_almost_equal(stats.trapezoid.cdf(x, mode, mode),
stats.triang.cdf(x, mode))
def test_reduces_to_uniform(self):
x = np.linspace(0, 1, 10)
assert_almost_equal(stats.trapezoid.pdf(x, 0, 1), stats.uniform.pdf(x))
assert_almost_equal(stats.trapezoid.cdf(x, 0, 1), stats.uniform.cdf(x))
def test_cases(self):
# edge cases
assert_almost_equal(stats.trapezoid.pdf(0, 0, 0), 2)
assert_almost_equal(stats.trapezoid.pdf(1, 1, 1), 2)
assert_almost_equal(stats.trapezoid.pdf(0.5, 0, 0.8),
1.11111111111111111)
assert_almost_equal(stats.trapezoid.pdf(0.5, 0.2, 1.0),
1.11111111111111111)
# straightforward case
assert_almost_equal(stats.trapezoid.pdf(0.1, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapezoid.pdf(0.5, 0.2, 0.8), 1.25)
assert_almost_equal(stats.trapezoid.pdf(0.9, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapezoid.cdf(0.1, 0.2, 0.8), 0.03125)
assert_almost_equal(stats.trapezoid.cdf(0.2, 0.2, 0.8), 0.125)
assert_almost_equal(stats.trapezoid.cdf(0.5, 0.2, 0.8), 0.5)
assert_almost_equal(stats.trapezoid.cdf(0.9, 0.2, 0.8), 0.96875)
assert_almost_equal(stats.trapezoid.cdf(1.0, 0.2, 0.8), 1.0)
def test_moments_and_entropy(self):
# issue #11795: improve precision of trapezoid stats
# Apply formulas from Wikipedia for the following parameters:
a, b, c, d = -3, -1, 2, 3 # => 1/3, 5/6, -3, 6
p1, p2, loc, scale = (b-a) / (d-a), (c-a) / (d-a), a, d-a
h = 2 / (d+c-b-a)
def moment(n):
return (h * ((d**(n+2) - c**(n+2)) / (d-c)
- (b**(n+2) - a**(n+2)) / (b-a)) /
(n+1) / (n+2))
mean = moment(1)
var = moment(2) - mean**2
entropy = 0.5 * (d-c+b-a) / (d+c-b-a) + np.log(0.5 * (d+c-b-a))
assert_almost_equal(stats.trapezoid.mean(p1, p2, loc, scale),
mean, decimal=13)
assert_almost_equal(stats.trapezoid.var(p1, p2, loc, scale),
var, decimal=13)
assert_almost_equal(stats.trapezoid.entropy(p1, p2, loc, scale),
entropy, decimal=13)
# Check boundary cases where scipy d=0 or d=1.
assert_almost_equal(stats.trapezoid.mean(0, 0, -3, 6), -1, decimal=13)
assert_almost_equal(stats.trapezoid.mean(0, 1, -3, 6), 0, decimal=13)
assert_almost_equal(stats.trapezoid.var(0, 1, -3, 6), 3, decimal=13)
def test_trapezoid_vect(self):
# test that array-valued shapes and arguments are handled
c = np.array([0.1, 0.2, 0.3])
d = np.array([0.5, 0.6])[:, None]
x = np.array([0.15, 0.25, 0.9])
v = stats.trapezoid.pdf(x, c, d)
cc, dd, xx = np.broadcast_arrays(c, d, x)
res = np.empty(xx.size, dtype=xx.dtype)
ind = np.arange(xx.size)
for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()):
res[i] = stats.trapezoid.pdf(x1, c1, d1)
assert_allclose(v, res.reshape(v.shape), atol=1e-15)
# Check that the stats() method supports vector arguments.
v = np.asarray(stats.trapezoid.stats(c, d, moments="mvsk"))
cc, dd = np.broadcast_arrays(c, d)
res = np.empty((cc.size, 4)) # 4 stats returned per value
ind = np.arange(cc.size)
for i, c1, d1 in zip(ind, cc.ravel(), dd.ravel()):
res[i] = stats.trapezoid.stats(c1, d1, moments="mvsk")
assert_allclose(v, res.T.reshape(v.shape), atol=1e-15)
def test_trapz(self):
# Basic test for alias
x = np.linspace(0, 1, 10)
assert_almost_equal(stats.trapz.pdf(x, 0, 1), stats.uniform.pdf(x))
class TestTriang(object):
def test_edge_cases(self):
with np.errstate(all='raise'):
assert_equal(stats.triang.pdf(0, 0), 2.)
assert_equal(stats.triang.pdf(0.5, 0), 1.)
assert_equal(stats.triang.pdf(1, 0), 0.)
assert_equal(stats.triang.pdf(0, 1), 0)
assert_equal(stats.triang.pdf(0.5, 1), 1.)
assert_equal(stats.triang.pdf(1, 1), 2)
assert_equal(stats.triang.cdf(0., 0.), 0.)
assert_equal(stats.triang.cdf(0.5, 0.), 0.75)
assert_equal(stats.triang.cdf(1.0, 0.), 1.0)
assert_equal(stats.triang.cdf(0., 1.), 0.)
assert_equal(stats.triang.cdf(0.5, 1.), 0.25)
assert_equal(stats.triang.cdf(1., 1.), 1)
class TestMielke(object):
def test_moments(self):
k, s = 4.642, 0.597
# n-th moment exists only if n < s
assert_equal(stats.mielke(k, s).moment(1), np.inf)
assert_equal(stats.mielke(k, 1.0).moment(1), np.inf)
assert_(np.isfinite(stats.mielke(k, 1.01).moment(1)))
def test_burr_equivalence(self):
x = np.linspace(0.01, 100, 50)
k, s = 2.45, 5.32
assert_allclose(stats.burr.pdf(x, s, k/s), stats.mielke.pdf(x, k, s))
class TestBurr(object):
def test_endpoints_7491(self):
# gh-7491
# Compute the pdf at the left endpoint dst.a.
data = [
[stats.fisk, (1,), 1],
[stats.burr, (0.5, 2), 1],
[stats.burr, (1, 1), 1],
[stats.burr, (2, 0.5), 1],
[stats.burr12, (1, 0.5), 0.5],
[stats.burr12, (1, 1), 1.0],
[stats.burr12, (1, 2), 2.0]]
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
ans = [_f.logpdf(_f.a, *_args) for _f, _args, _ in data]
correct = [np.log(_correct_) for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_burr_stats_9544(self):
# gh-9544. Test from gh-9978
c, d = 5.0, 3
mean, variance = stats.burr(c, d).stats()
# mean = sc.beta(3 + 1/5, 1. - 1/5) * 3 = 1.4110263...
# var = sc.beta(3 + 2 / 5, 1. - 2 / 5) * 3 -
# (sc.beta(3 + 1 / 5, 1. - 1 / 5) * 3) ** 2
mean_hc, variance_hc = 1.4110263183925857, 0.22879948026191643
assert_allclose(mean, mean_hc)
assert_allclose(variance, variance_hc)
def test_burr_nan_mean_var_9544(self):
# gh-9544. Test from gh-9978
c, d = 0.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isnan(mean))
assert_(np.isnan(variance))
c, d = 1.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isfinite(mean))
assert_(np.isnan(variance))
c, d = 0.5, 3
e1, e2, e3, e4 = stats.burr._munp(np.array([1, 2, 3, 4]), c, d)
assert_(np.isnan(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 1.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 2.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 3.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isnan(e4))
c, d = 4.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isfinite(e4))
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),
0.98353464004309321,
decimal=10, err_msg='test_540_567')
def test_regression_ticket_1316():
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
stats._continuous_distns.gamma_gen(name='gamma')
def test_regression_ticket_1326():
# adjust to avoid nan with 0*log(0)
assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles
# non-positive lambdas.
x = np.linspace(-5.0, 5.0, 101)
with np.errstate(divide='ignore'):
for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
p = stats.tukeylambda.pdf(x, lam)
assert_((p != 0.0).all())
assert_(~np.isnan(p).all())
lam = np.array([[-1.0], [0.0], [2.0]])
p = stats.tukeylambda.pdf(x, lam)
assert_(~np.isnan(p).all())
assert_((p[0] != 0.0).all())
assert_((p[1] != 0.0).all())
assert_((p[2] != 0.0).any())
assert_((p[2] == 0.0).any())
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
np.random.seed(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
with np.errstate(divide='ignore'):
params = np.array(stats.lognorm.fit(x, floc=0.))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
np.random.seed(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
np.random.seed(654321)
rvs = stats.cauchy.rvs(size=100)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_gh_pr_4806():
# Check starting values for Cauchy distribution fit.
np.random.seed(1234)
x = np.random.randn(42)
for offset in 10000.0, 1222333444.0:
loc, scale = stats.cauchy.fit(x + offset)
assert_allclose(loc, offset, atol=1.0)
assert_allclose(scale, 0.6, atol=1.0)
def test_tukeylambda_stats_ticket_1545():
# Some test for the variance and kurtosis of the Tukey Lambda distr.
# See test_tukeylamdba_stats.py for more tests.
mv = stats.tukeylambda.stats(0, moments='mvsk')
# Known exact values:
expected = [0, np.pi**2/3, 0, 1.2]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(3.13, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(0.14, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
assert_almost_equal(mv, expected, decimal=10)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see https://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See https://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
# Regression tests for gh-4724.
p = stats.gengamma._munp(-2, 200, 1.)
assert_almost_equal(p, 1./199/198)
p = stats.gengamma._munp(-2, 10, 1.)
assert_almost_equal(p, 1./9/8)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
with np.errstate(invalid='ignore'):
with suppress_warnings() as sup:
sup.filter(IntegrationWarning,
"The maximum number of subdivisions .50. has been "
"achieved.")
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
stats.ksone.fit(d)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
# also test the complex-valued code path
assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8)
# test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf)
deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag
deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x))
assert_allclose(deriv, deriv_expected, atol=1e-10)
def test_levy_cdf_ppf():
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_levy_sf():
# Large values, far into the tail of the distribution.
x = np.array([1e15, 1e25, 1e35, 1e50])
# Expected values were calculated with mpmath.
expected = np.array([2.5231325220201597e-08,
2.52313252202016e-13,
2.52313252202016e-18,
7.978845608028653e-26])
y = stats.levy.sf(x)
assert_allclose(y, expected, rtol=1e-14)
def test_levy_l_sf():
# Test levy_l.sf for small arguments.
x = np.array([-0.016, -0.01, -0.005, -0.0015])
# Expected values were calculated with mpmath.
expected = np.array([2.6644463892359302e-15,
1.523970604832107e-23,
2.0884875837625492e-45,
5.302850374626878e-147])
y = stats.levy_l.sf(x)
assert_allclose(y, expected, rtol=1e-13)
def test_levy_l_isf():
# Test roundtrip sf(isf(p)), including a small input value.
p = np.array([3.0e-15, 0.25, 0.99])
x = stats.levy_l.isf(p)
q = stats.levy_l.sf(x)
assert_allclose(q, p, rtol=5e-14)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
np.random.seed(1234)
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5)
stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_ncx2_tails_pdf():
# ncx2.pdf does not return nans in extreme tails(example from gh-1577)
# NB: this is to check that nan_to_num is not needed in ncx2.pdf
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)
logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)
assert_(np.isneginf(logval).all())
# Verify logpdf has extended precision when pdf underflows to 0
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
assert_equal(stats.ncx2.pdf(10000, 3, 12), 0)
assert_allclose(stats.ncx2.logpdf(10000, 3, 12), -4662.444377524883)
@pytest.mark.parametrize('method, expected', [
('cdf', np.array([2.497951336e-09, 3.437288941e-10])),
('pdf', np.array([1.238579980e-07, 1.710041145e-08])),
('logpdf', np.array([-15.90413011, -17.88416331])),
('ppf', np.array([4.865182052, 7.017182271]))
])
def test_ncx2_zero_nc(method, expected):
# gh-5441
# ncx2 with nc=0 is identical to chi2
# Comparison to R (v3.5.1)
# > options(digits=10)
# > pchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4), log=TRUE)
# > qchisq(0.1, df=10, ncp=c(0,4))
result = getattr(stats.ncx2, method)(0.1, nc=[0, 4], df=10)
assert_allclose(result, expected, atol=1e-15)
def test_ncx2_zero_nc_rvs():
# gh-5441
# ncx2 with nc=0 is identical to chi2
result = stats.ncx2.rvs(df=10, nc=0, random_state=1)
expected = stats.chi2.rvs(df=10, random_state=1)
assert_allclose(result, expected, atol=1e-15)
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# -1 is not a legal shape parameter
mv3 = stats.lognorm.stats([2, 2.4, -1])
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
def _cdf(self, x, a):
# Different # of shape params from _pdf, to be able to check that
# inspection catches the inconsistency."""
return 42 * a + x
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a*x + b
def _cdf(self, x, a, b):
return 42 * a + x
class TestSubclassingExplicitShapes(object):
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling"
# is ignoring *args and looking for ``extra_kwarg`` and using
# that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
class TestSubclassingNoShapes(object):
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall(r'logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall(r'logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
assert_raises(TypeError, _distr3_gen, name='dummy')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_docstrings():
badones = [r',\s*,', r'\(\s*,', r'^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_gompertz_accuracy():
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)
assert_almost_equal(p, 9.0, decimal=15)
def test_genextreme_give_no_warnings():
"""regression test for gh-6219"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
stats.genextreme.cdf(.5, 0)
stats.genextreme.pdf(.5, 0)
stats.genextreme.ppf(.5, 0)
stats.genextreme.logpdf(-np.inf, 0.0)
number_of_warnings_thrown = len(w)
assert_equal(number_of_warnings_thrown, 0)
def test_genextreme_entropy():
# regression test for gh-5181
euler_gamma = 0.5772156649015329
h = stats.genextreme.entropy(-1.0)
assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(0)
assert_allclose(h, euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(1.0)
assert_equal(h, 1)
h = stats.genextreme.entropy(-2.0, scale=10)
assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)
h = stats.genextreme.entropy(10)
assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(-10)
assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)
def test_genextreme_sf_isf():
# Expected values were computed using mpmath:
#
# import mpmath
#
# def mp_genextreme_sf(x, xi, mu=0, sigma=1):
# # Formula from wikipedia, which has a sign convention for xi that
# # is the opposite of scipy's shape parameter.
# if xi != 0:
# t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)
# else:
# t = mpmath.exp(-(x - mu)/sigma)
# return 1 - mpmath.exp(-t)
#
# >>> mpmath.mp.dps = 1000
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125"))
# >>> float(s)
# 1.6777205262585625e-57
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125"))
# >>> float(s)
# 1.52587890625e-21
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0"))
# >>> float(s)
# 0.00034218086528426593
x = 1e8
s = stats.genextreme.sf(x, -0.125)
assert_allclose(s, 1.6777205262585625e-57)
x2 = stats.genextreme.isf(s, -0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0.125)
assert_allclose(s, 1.52587890625e-21)
x2 = stats.genextreme.isf(s, 0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0)
assert_allclose(s, 0.00034218086528426593)
x2 = stats.genextreme.isf(s, 0)
assert_allclose(x2, x)
def test_burr12_ppf_small_arg():
prob = 1e-16
quantile = stats.burr12.ppf(prob, 2, 3)
# The expected quantile was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 100
# >>> prob = mpmath.mpf('1e-16')
# >>> c = mpmath.mpf(2)
# >>> d = mpmath.mpf(3)
# >>> float(((1-prob)**(-1/d) - 1)**(1/c))
# 5.7735026918962575e-09
assert_allclose(quantile, 5.7735026918962575e-09)
def test_crystalball_function():
"""
All values are calculated using the independent implementation of the
ROOT framework (see https://root.cern.ch/).
Corresponding ROOT code is given in the comments.
"""
X = np.linspace(-5.0, 5.0, 21)[:-1]
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=1.0, m=2.0)
expected = np.array([0.0202867, 0.0241428, 0.0292128, 0.0360652, 0.045645,
0.059618, 0.0811467, 0.116851, 0.18258, 0.265652,
0.301023, 0.265652, 0.18258, 0.097728, 0.0407391,
0.013226, 0.00334407, 0.000658486, 0.000100982,
1.20606e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0)
expected = np.array([0.0019648, 0.00279754, 0.00417592, 0.00663121,
0.0114587, 0.0223803, 0.0530497, 0.12726, 0.237752,
0.345928, 0.391987, 0.345928, 0.237752, 0.12726,
0.0530497, 0.0172227, 0.00435458, 0.000857469,
0.000131497, 1.57051e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.00785921, 0.0111902, 0.0167037, 0.0265249,
0.0423866, 0.0636298, 0.0897324, 0.118876, 0.147944,
0.172964, 0.189964, 0.195994, 0.189964, 0.172964,
0.147944, 0.118876, 0.0897324, 0.0636298, 0.0423866,
0.0265249])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=1.0, m=2.0)
expected = np.array([0.12172, 0.132785, 0.146064, 0.162293, 0.18258,
0.208663, 0.24344, 0.292128, 0.36516, 0.478254,
0.622723, 0.767192, 0.880286, 0.94959, 0.982834,
0.995314, 0.998981, 0.999824, 0.999976, 0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0)
expected = np.array([0.00442081, 0.00559509, 0.00730787, 0.00994682,
0.0143234, 0.0223803, 0.0397873, 0.0830763, 0.173323,
0.320592, 0.508717, 0.696841, 0.844111, 0.934357,
0.977646, 0.993899, 0.998674, 0.999771, 0.999969,
0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.0176832, 0.0223803, 0.0292315, 0.0397873, 0.0567945,
0.0830763, 0.121242, 0.173323, 0.24011, 0.320592,
0.411731, 0.508717, 0.605702, 0.696841, 0.777324,
0.844111, 0.896192, 0.934357, 0.960639, 0.977646])
assert_allclose(expected, calculated, rtol=0.001)
def test_crystalball_function_moments():
"""
All values are calculated using the pdf formula and the integrate function
of Mathematica
"""
# The Last two (alpha, n) pairs test the special case n == alpha**2
beta = np.array([2.0, 1.0, 3.0, 2.0, 3.0])
m = np.array([3.0, 3.0, 2.0, 4.0, 9.0])
# The distribution should be correctly normalised
expected_0th_moment = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
calculated_0th_moment = stats.crystalball._munp(0, beta, m)
assert_allclose(expected_0th_moment, calculated_0th_moment, rtol=0.001)
# calculated using wolframalpha.com
# e.g. for beta = 2 and m = 3 we calculate the norm like this:
# integrate exp(-x^2/2) from -2 to infinity +
# integrate (3/2)^3*exp(-2^2/2)*(3/2-2-x)^(-3) from -infinity to -2
norm = np.array([2.5511, 3.01873, 2.51065, 2.53983, 2.507410455])
a = np.array([-0.21992, -3.03265, np.inf, -0.135335, -0.003174])
expected_1th_moment = a / norm
calculated_1th_moment = stats.crystalball._munp(1, beta, m)
assert_allclose(expected_1th_moment, calculated_1th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, 3.2616, 2.519908])
expected_2th_moment = a / norm
calculated_2th_moment = stats.crystalball._munp(2, beta, m)
assert_allclose(expected_2th_moment, calculated_2th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -0.0577668])
expected_3th_moment = a / norm
calculated_3th_moment = stats.crystalball._munp(3, beta, m)
assert_allclose(expected_3th_moment, calculated_3th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, 7.78468])
expected_4th_moment = a / norm
calculated_4th_moment = stats.crystalball._munp(4, beta, m)
assert_allclose(expected_4th_moment, calculated_4th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -1.31086])
expected_5th_moment = a / norm
calculated_5th_moment = stats.crystalball._munp(5, beta, m)
assert_allclose(expected_5th_moment, calculated_5th_moment, rtol=0.001)
@pytest.mark.parametrize(
'df1,df2,x',
[(2, 2, [-0.5, 0.2, 1.0, 2.3]),
(4, 11, [-0.5, 0.2, 1.0, 2.3]),
(7, 17, [1, 2, 3, 4, 5])]
)
def test_ncf_edge_case(df1, df2, x):
# Test for edge case described in gh-11660.
# Non-central Fisher distribution when nc = 0
# should be the same as Fisher distribution.
nc = 0
expected_cdf = stats.f.cdf(x, df1, df2)
calculated_cdf = stats.ncf.cdf(x, df1, df2, nc)
assert_allclose(expected_cdf, calculated_cdf, rtol=1e-14)
# when ncf_gen._skip_pdf will be used instead of generic pdf,
# this additional test will be useful.
expected_pdf = stats.f.pdf(x, df1, df2)
calculated_pdf = stats.ncf.pdf(x, df1, df2, nc)
assert_allclose(expected_pdf, calculated_pdf, rtol=1e-6)
def test_ncf_variance():
# Regression test for gh-10658 (incorrect variance formula for ncf).
# The correct value of ncf.var(2, 6, 4), 42.75, can be verified with, for
# example, Wolfram Alpha with the expression
# Variance[NoncentralFRatioDistribution[2, 6, 4]]
# or with the implementation of the noncentral F distribution in the C++
# library Boost.
v = stats.ncf.var(2, 6, 4)
assert_allclose(v, 42.75, rtol=1e-14)
class TestHistogram(object):
def setup_method(self):
np.random.seed(1234)
# We have 8 bins
# [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9)
# But actually np.histogram will put the last 9 also in the [8,9) bin!
# Therefore there is a slight difference below for the last bin, from
# what you might have expected.
histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5,
6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)
self.template = stats.rv_histogram(histogram)
data = stats.norm.rvs(loc=1.0, scale=2.5, size=10000, random_state=123)
norm_histogram = np.histogram(data, bins=50)
self.norm_template = stats.rv_histogram(norm_histogram)
def test_pdf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0,
2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0,
4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0,
4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0,
3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0])
assert_allclose(self.template.pdf(values), pdf_values)
# Test explicitly the corner cases:
# As stated above the pdf in the bin [8,9) is greater than
# one would naively expect because np.histogram putted the 9
# into the [8,9) bin.
assert_almost_equal(self.template.pdf(8.0), 3.0/25.0)
assert_almost_equal(self.template.pdf(8.5), 3.0/25.0)
# 9 is outside our defined bins [8,9) hence the pdf is already 0
# for a continuous distribution this is fine, because a single value
# does not have a finite probability!
assert_almost_equal(self.template.pdf(9.0), 0.0/25.0)
assert_almost_equal(self.template.pdf(10.0), 0.0/25.0)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.pdf(x),
stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_cdf_ppf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0,
1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0,
6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0,
15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0,
22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0])
assert_allclose(self.template.cdf(values), cdf_values)
# First three and last two values in cdf_value are not unique
assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1])
# Test of cdf and ppf are inverse functions
x = np.linspace(1.0, 9.0, 100)
assert_allclose(self.template.ppf(self.template.cdf(x)), x)
x = np.linspace(0.0, 1.0, 100)
assert_allclose(self.template.cdf(self.template.ppf(x)), x)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.cdf(x),
stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_rvs(self):
N = 10000
sample = self.template.rvs(size=N, random_state=123)
assert_equal(np.sum(sample < 1.0), 0.0)
assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_equal(np.sum(sample > 9.0), 0.0)
def test_munp(self):
for n in range(4):
assert_allclose(self.norm_template._munp(n),
stats.norm(1.0, 2.5).moment(n), rtol=0.05)
def test_entropy(self):
assert_allclose(self.norm_template.entropy(),
stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05)
def test_loguniform():
# This test makes sure the alias of "loguniform" is log-uniform
rv = stats.loguniform(10 ** -3, 10 ** 0)
rvs = rv.rvs(size=10000, random_state=42)
vals, _ = np.histogram(np.log10(rvs), bins=10)
assert 900 <= vals.min() <= vals.max() <= 1100
assert np.abs(np.median(vals) - 1000) <= 10
class TestArgus(object):
def test_argus_rvs_large_chi(self):
# test that the algorithm can handle large values of chi
x = stats.argus.rvs(50, size=500, random_state=325)
assert_almost_equal(stats.argus(50).mean(), x.mean(), decimal=4)
def test_argus_rvs_ratio_uniforms(self):
# test that the ratio of uniforms algorithms works for chi > 2.611
x = stats.argus.rvs(3.5, size=1500, random_state=1535)
assert_almost_equal(stats.argus(3.5).mean(), x.mean(), decimal=3)
assert_almost_equal(stats.argus(3.5).std(), x.std(), decimal=3)
# Expected values were computed with mpmath.
@pytest.mark.parametrize('chi, expected_mean',
[(1, 0.6187026683551835),
(10, 0.984805536783744),
(40, 0.9990617659702923),
(60, 0.9995831885165300),
(99, 0.9998469348663028)])
def test_mean(self, chi, expected_mean):
m = stats.argus.mean(chi, scale=1)
assert_allclose(m, expected_mean, rtol=1e-13)
# Expected values were computed with mpmath.
@pytest.mark.parametrize('chi, expected_var, rtol',
[(1, 0.05215651254197807, 1e-13),
(10, 0.00015805472008165595, 1e-11),
(40, 5.877763210262901e-07, 1e-8),
(60, 1.1590179389611416e-07, 1e-8),
(99, 1.5623277006064666e-08, 1e-8)])
def test_var(self, chi, expected_var, rtol):
v = stats.argus.var(chi, scale=1)
assert_allclose(v, expected_var, rtol=rtol)
def test_rvs_no_size_warning():
class rvs_no_size_gen(stats.rv_continuous):
def _rvs(self):
return 1
rvs_no_size = rvs_no_size_gen(name='rvs_no_size')
with assert_warns(np.VisibleDeprecationWarning):
rvs_no_size.rvs()
| 39.357343 | 79 | 0.570109 |
4a1a009e7eb7d13e012fabdb2c91ed36ee9439a4
| 1,896 |
py
|
Python
|
Assignment 1/grid_search_opencl.py
|
Panyw97/LargeScaleComputing_S20
|
b3da9750d48fe1917f62f99dbbecd31766e89d1a
|
[
"MIT"
] | null | null | null |
Assignment 1/grid_search_opencl.py
|
Panyw97/LargeScaleComputing_S20
|
b3da9750d48fe1917f62f99dbbecd31766e89d1a
|
[
"MIT"
] | null | null | null |
Assignment 1/grid_search_opencl.py
|
Panyw97/LargeScaleComputing_S20
|
b3da9750d48fe1917f62f99dbbecd31766e89d1a
|
[
"MIT"
] | null | null | null |
import numpy as np
import pyopencl as cl
import time
import pyopencl.clrandom as clrand
import pyopencl.array as cl_array
def grid_search(n_runs):
# Set up OpenCL context and command queue
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
t0 = time.time()
T = int(4160) # Set the number of periods for each simulation
rand_gen = clrand.PhiloxGenerator(ctx)
ran = rand_gen.normal(queue, (n_runs * T), np.float32, mu=0, sigma=1)
rho_l = np.linspace(-0.95, 0.95, 200).astype(np.float32)
opt = []
scan_sim = cl.Program(ctx, """
__kernel void grid_search(__global float *ary_a, __global float *ary_b,
float rho, __global float *result)
{
int idx = get_global_id(0);
for (int i=0; i<4160; i++)
{
if (i == 0){
ary_b[idx * 4160 + i] = ary_a[idx * 4160 + i] + 3;
}
else {
ary_b[idx * 4160 + i] = rho * ary_b[idx * 4160 + i - 1] + 3 * (1 - rho) + ary_a[idx * 4160 + i];
}
if (ary_b[idx * 4160 + i] <= 0 || i == 4159) {
result[idx] = i;
break;
}
}
}
""").build()
result = cl_array.to_device(queue, np.empty(n_runs).astype(np.float32))
ary_b = cl_array.to_device(queue, np.empty(n_runs * T).astype(np.float32))
for r in rho_l:
scan_sim.grid_search(queue, (n_runs,), None, ran.data, ary_b.data, np.float32(r), result.data)
opt.append(result.get().mean())
time_elapsed = time.time() - t0
opt_rho = rho_l[np.argmax(opt)]
print("The optimal rho is", opt_rho)
print("The period is", max(opt))
print("Computation Time:", time_elapsed)
return
def main():
grid_search(n_runs = 1000)
if __name__ == '__main__':
main()
| 30.580645 | 111 | 0.547996 |
4a1a010b41acc48d6f633fc193f8bb93526ef561
| 1,117 |
py
|
Python
|
dongtai/models/project_report.py
|
Maskhe/dongtai-core
|
1521e1377e997b262a7fe678d64e49fd5065110c
|
[
"Apache-2.0"
] | 5 |
2021-09-06T09:30:38.000Z
|
2022-03-22T02:11:49.000Z
|
dongtai/models/project_report.py
|
Maskhe/dongtai-core
|
1521e1377e997b262a7fe678d64e49fd5065110c
|
[
"Apache-2.0"
] | 4 |
2021-11-29T08:35:27.000Z
|
2021-12-18T06:08:31.000Z
|
dongtai/models/project_report.py
|
Maskhe/dongtai-core
|
1521e1377e997b262a7fe678d64e49fd5065110c
|
[
"Apache-2.0"
] | 13 |
2021-09-01T07:24:29.000Z
|
2022-03-18T08:12:50.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:luzhongyang
# datetime:2021/10/29 下午5:29
# software: PyCharm
# project: dongtai-models
from django.db import models
from django.utils.translation import gettext_lazy as _
from dongtai.models import User
from dongtai.models.project import IastProject
from dongtai.models.server import IastServer
from dongtai.utils.settings import get_managed
class ProjectReport(models.Model):
user = models.ForeignKey(User, models.DO_NOTHING)
project = models.ForeignKey(IastProject, models.DO_NOTHING, blank=True, null=True)
vul_id = models.IntegerField(blank=True, null=True, default=0)
type = models.CharField(max_length=10, blank=True, null=True)
language = models.CharField(max_length=10, blank=True, null=True)
status = models.IntegerField(default=0, null=False)
path = models.CharField(default='', max_length=255, blank=True, null=True)
file = models.BinaryField(blank=True, null=True)
create_time = models.IntegerField(default=0, null=False)
class Meta:
managed = get_managed()
db_table = 'iast_project_report'
| 37.233333 | 86 | 0.749329 |
4a1a011aef0e7aa99d8a01a6688c7c20fa7b2ae9
| 5,400 |
py
|
Python
|
hashkernel/files/ignore_file.py
|
hashstore/hashkernel
|
4a0116b1872047626e87c5c350ffd65e311e618f
|
[
"Apache-2.0"
] | null | null | null |
hashkernel/files/ignore_file.py
|
hashstore/hashkernel
|
4a0116b1872047626e87c5c350ffd65e311e618f
|
[
"Apache-2.0"
] | null | null | null |
hashkernel/files/ignore_file.py
|
hashstore/hashkernel
|
4a0116b1872047626e87c5c350ffd65e311e618f
|
[
"Apache-2.0"
] | null | null | null |
import codecs
import fnmatch
import logging
import os
from collections import defaultdict
from functools import total_ordering
from pathlib import Path
from typing import List, Sequence, Union
from hashkernel import reraise_with_msg
from hashkernel.files import aio_read_text, ensure_path, read_text
log = logging.getLogger(__name__)
@total_ordering
class PathMatch:
"""
>>> abc_txt = PathMatch('a/b/c','*.txt')
>>> ab_log = PathMatch('a/b','*.log')
>>> repr(ab_log) == repr(PathMatch('a/b','*.log'))
True
>>> abc_txt.match('a/b/c/d.txt')
True
>>> ab_log.match('a/b/c/d.log')
True
>>> ab_log == abc_txt
False
>>> PathMatch('a/b/','c/*.txt').match('a/b/c/d.txt')
True
>>> PathMatch('a/b/','c/*.txt').match('a/b/c2/d.txt')
False
>>> PathMatch('a/b/','c/*/').match('a/b/c/d')
True
>>> PathMatch('a/b/','c/*/').match('q/b/c/d')
False
>>> sorted([abc_txt, ab_log, abc_txt]) == [ab_log, abc_txt, abc_txt]
True
"""
def __init__(self, cur_dir, pattern):
self.root = ensure_path(cur_dir)
self.pattern = pattern
def match(self, path):
path = ensure_path(path)
if self.root in path.parents:
rel_path = path.relative_to(self.root)
return rel_path.match(self.pattern)
return False
def __key__(self):
return (self.pattern, self.root)
def __repr__(self):
return f"PathMatch({str(self.root)!r}, {self.pattern!r})"
def __lt__(self, other):
return self.__key__() < other.__key__()
def __eq__(self, other):
return self.__key__() == other.__key__()
def __hash__(self):
return hash(self.__key__())
def is_included(self, other):
return self.pattern == other.pattern and self.root in other.root.parents
class PathMatchSet:
"""
>>> pms = PathMatchSet()
>>> pms.match('a/b/c/d.txt')
False
>>> pms.add(PathMatch('a/b/c', '*.txt'))
True
>>> pms.match('a/b/c/d.log')
False
>>> pms.add(PathMatch('a/b', '*.log'))
True
>>> pms.add(PathMatch('a/b/c', '*.txt'))
False
>>> pms.add(PathMatch('a/b/c/q/f', '*.txt'))
False
>>> pms.add(PathMatch('r/b/c/q/f', '*.txt'))
True
>>> pms.match('a/b/c/d.log')
True
>>> pms.match('a/b/c/d.txt')
True
>>>
"""
def __init__(self):
self.match_by_pattern = defaultdict(set)
self.all_matches = set()
def add(self, path_match):
if path_match not in self.all_matches:
for c in self.match_by_pattern[path_match.pattern]:
if c.is_included(path_match):
return False
self.match_by_pattern[path_match.pattern].add(path_match)
self.all_matches.add(path_match)
return True
return False
def match(self, path):
path = ensure_path(path)
return any(pm.match(path) for pm in self.all_matches)
class IgnoreRuleSet:
root: Path
ignore_files: PathMatchSet
spec_to_parse: PathMatchSet
ignore_symlinks: bool = True
def __init__(self, path: Path):
self.root = path
self.ignore_files = PathMatchSet()
self.spec_to_parse = PathMatchSet()
def update_ignore_files(self, *args: Union[str, PathMatch]):
added = 0
for pm in args:
if isinstance(pm, PathMatch):
assert self.root == pm.root or self.root in pm.root.parents
if self.ignore_files.add(pm):
added += 1
else:
if self.ignore_files.add(PathMatch(self.root, pm)):
added += 1
return added
def update_spec_to_parse(self, *args: str):
added = 0
for pm in args:
if self.spec_to_parse.add(PathMatch(self.root, pm)):
added += 1
return added
def parse_specs(self, listdir: List[Path]) -> int:
""" Returns number of specs parsed """
specs_parsed = 0
for p in listdir:
if self.spec_to_parse.match(p):
read_text(p, self.parse_spec)
specs_parsed += 1
return specs_parsed
def parse_spec(self, path: Path, text: str):
dir = path.parent
for l in text.split("\n"):
l = l.strip()
if l != "" and l[0] != "#":
self.ignore_files.add(PathMatch(dir, l))
def path_filter(self, path: Path):
return not self.ignore_files.match(path)
class IgnoreFilePolicy:
def __init__(self, ignore_files, spec_to_parse=()):
self.ignore_files = ignore_files
self.spec_to_parse = spec_to_parse
def apply(self, path: Path) -> IgnoreRuleSet:
rule_set = IgnoreRuleSet(path)
if self.ignore_files:
rule_set.update_ignore_files(*self.ignore_files)
if self.spec_to_parse:
rule_set.update_spec_to_parse(*self.spec_to_parse)
return rule_set
INCLUSIVE_POLICY = IgnoreFilePolicy(ignore_files=(), spec_to_parse=())
DEFAULT_IGNORE_POLICY = IgnoreFilePolicy(
ignore_files=(
".svn",
".hash_tree",
".git",
".DS_Store",
".vol",
".hotfiles.btree",
".ssh",
".hs_*",
".backup*",
".Spotlight*",
"._*",
".Trash*",
),
spec_to_parse=(".gitignore", ".ignore"),
)
| 27.135678 | 80 | 0.575926 |
4a1a02c90ee3cd5374fd3272e20d186ae785c618
| 1,844 |
py
|
Python
|
setup.py
|
bjmarfito/autoRIFT
|
1d54a2cca349a6f2386121a8f5466b5b0eb10e1f
|
[
"Apache-2.0"
] | 70 |
2021-08-29T17:32:12.000Z
|
2022-03-13T02:09:31.000Z
|
setup.py
|
bjmarfito/autoRIFT
|
1d54a2cca349a6f2386121a8f5466b5b0eb10e1f
|
[
"Apache-2.0"
] | 19 |
2021-10-04T18:53:03.000Z
|
2022-03-29T07:20:46.000Z
|
setup.py
|
bjmarfito/autoRIFT
|
1d54a2cca349a6f2386121a8f5466b5b0eb10e1f
|
[
"Apache-2.0"
] | 18 |
2019-09-07T13:13:06.000Z
|
2021-08-07T07:46:41.000Z
|
#!/usr/bin/env python3
import numpy as np
import os
from distutils.core import setup
from distutils.extension import Extension
##Figure out opencv paths
try:
import cv2
except:
raise Exception('OpenCV does not appear to be installed. Install before proceeding ... ')
##Figure out paths for headers and libraries
bldInfo = cv2.getBuildInformation().splitlines()
for line in bldInfo:
if 'Install to:' in line:
path = line.split()[-1]
break
print('Open CV path: ', path)
extensions = [
Extension(
name="autoRIFT/autoriftcore",
sources= ['geo_autoRIFT/autoRIFT/bindings/autoriftcoremodule.cpp'],
include_dirs=[np.get_include()] +
['geo_autoRIFT/autoRIFT/include',
os.path.join(path, 'include/opencv4/')],
library_dirs = [os.path.join(path, 'lib')],
libraries=['opencv_core', 'opencv_highgui', 'opencv_imgproc'],
extra_compile_args=['-std=c++11'],
language="c++"
),
Extension(
name="geogrid/geogridOptical",
sources= ['geo_autoRIFT/geogrid/bindings/geogridOpticalmodule.cpp','geo_autoRIFT/geogrid/src/geogridOptical.cpp'],
include_dirs=[np.get_include()] +
['geo_autoRIFT/geogrid/include',
os.path.join(path, 'include')],
library_dirs = [os.path.join(path, 'lib')],
libraries=['gomp','gdal'],
extra_compile_args=['-std=c++11'],
language="c++"
)
]
setup (name = 'geo_autoRIFT',
version = '1.3.0',
description = 'This is the autoRIFT python package',
package_dir={'autoRIFT': 'geo_autoRIFT/autoRIFT','geogrid': 'geo_autoRIFT/geogrid'},
packages=['autoRIFT','geogrid'],
# scripts=['geo_autoRIFT/geogrid/GeogridOptical.py'],
ext_modules = extensions)
| 31.254237 | 122 | 0.623644 |
4a1a02dc8d855b2e33e30ea744cd92868d9a6ed3
| 1,525 |
py
|
Python
|
plotly/validators/surface/contours/x/_project.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12 |
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/surface/contours/x/_project.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27 |
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/surface/contours/x/_project.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6 |
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class ProjectValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name='project',
parent_name='surface.contours.x',
**kwargs
):
super(ProjectValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Project'),
data_docs=kwargs.pop(
'data_docs', """
x
Determines whether or not these contour lines
are projected on the x plane. If `highlight` is
set to True (the default), the projected lines
are shown on hover. If `show` is set to True,
the projected lines are shown in permanence.
y
Determines whether or not these contour lines
are projected on the y plane. If `highlight` is
set to True (the default), the projected lines
are shown on hover. If `show` is set to True,
the projected lines are shown in permanence.
z
Determines whether or not these contour lines
are projected on the z plane. If `highlight` is
set to True (the default), the projected lines
are shown on hover. If `show` is set to True,
the projected lines are shown in permanence.
"""
),
**kwargs
)
| 38.125 | 71 | 0.56459 |
4a1a0621db3aeafecac8827de08efb34cc3c9de3
| 2,246 |
py
|
Python
|
src/reqompyler/reqompyler.py
|
zurutech/reqompyler
|
433e92b1771bf049e0ce7d338def83250ed1acf3
|
[
"Apache-2.0"
] | 4 |
2020-02-13T12:01:39.000Z
|
2020-03-18T16:41:14.000Z
|
src/reqompyler/reqompyler.py
|
zurutech/reqompyler
|
433e92b1771bf049e0ce7d338def83250ed1acf3
|
[
"Apache-2.0"
] | 87 |
2020-02-20T11:19:38.000Z
|
2021-07-13T00:47:27.000Z
|
src/reqompyler/reqompyler.py
|
zurutech/reqompyler
|
433e92b1771bf049e0ce7d338def83250ed1acf3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2020 Zuru Tech HK Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Use `pip-tools <https://github.com/jazzband/pip-tools>`_ to compile the requirements files."""
import subprocess
from pathlib import Path
from typing import List, Optional
__ALL__ = ["reqcompyle"]
def reqcompyle(
in_folder: Path,
out_folder: Path,
tld: Optional[Path],
ignore: Optional[List[Path]],
) -> None:
"""
Compile requirements files using `pip-tools <https://github.com/jazzband/pip-tools>`_.
Args:
in_folder (:obj:`pathlib.Path`): Path to the folder with your requirements.in files.
out_folder (:obj:`pathlib.Path`): Path to the folder were the pinned requirements
file will be saved.
tld (:obj:`pathlib.Path`): Top level directory of your package, if passed, copies pinned
the ``dev.txt`` as ``requirements.txt`` to this location.
ignore (:obj:`list` of [:obj:`pathlib.Path`]): Array of requirements files
(without extension) to ignore.
"""
if not (out_folder.exists() and out_folder.is_dir()):
Path(out_folder).mkdir(parents=True)
for req_file in in_folder.iterdir():
if ignore and req_file.stem in ignore:
continue
subprocess.run(
[
"pip-compile",
"--pre",
"--annotate",
f"--output-file={out_folder.joinpath(req_file.name)}",
f"{in_folder.joinpath(req_file.name)}",
],
capture_output=True,
)
if tld:
subprocess.run(
["cp", out_folder.joinpath("dev.txt"), tld.joinpath("requirements.txt")]
)
| 35.09375 | 97 | 0.640249 |
4a1a06278a6c2a2a0734b4041d818c4492d9f498
| 425 |
py
|
Python
|
file_clean/file_process/gettop1000.py
|
majiashu/data-processing
|
772dc341657c416cef7046473ed3030efa200e33
|
[
"MIT"
] | 1 |
2020-07-31T15:13:02.000Z
|
2020-07-31T15:13:02.000Z
|
file_clean/file_process/gettop1000.py
|
majiashu/data-processing
|
772dc341657c416cef7046473ed3030efa200e33
|
[
"MIT"
] | null | null | null |
file_clean/file_process/gettop1000.py
|
majiashu/data-processing
|
772dc341657c416cef7046473ed3030efa200e33
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
gettop1000.py
Created on 2018/9/13 16:28
@author: 马家树(majstx@163.com)
"""
import openpyxl
import time
import pandas as pd
start = time.time()
# wb = openpyxl.load_workbook('C:/Users/meridian/Desktop/九江市第一人民医院/九江市第一人民医院_原始.xlsx')
data = pd.read_excel('C:/Users/meridian/Desktop/九江市第一人民医院/九江市第一人民医院_原始.xlsx', sheet_name=0)
end = time.time()
use_time = end - start
print("读取该文件耗时{0}".format())
| 21.25 | 91 | 0.72 |
4a1a0664f6ef3849d44a336155fc094ca9f20431
| 5,540 |
py
|
Python
|
RoboticsLanguage/Base/Tools/ErrorHandling.py
|
robotcaresystems/roboticslanguage
|
3bb7a2bf64ab8e9068889713fbeb18a45cd5a3ed
|
[
"Apache-2.0"
] | 64 |
2018-05-15T14:36:44.000Z
|
2022-03-09T05:00:31.000Z
|
RoboticsLanguage/Base/Tools/ErrorHandling.py
|
robotcaresystems/roboticslanguage
|
3bb7a2bf64ab8e9068889713fbeb18a45cd5a3ed
|
[
"Apache-2.0"
] | 9 |
2018-04-17T21:12:27.000Z
|
2019-11-08T20:53:32.000Z
|
RoboticsLanguage/Base/Tools/ErrorHandling.py
|
robotcaresystems/roboticslanguage
|
3bb7a2bf64ab8e9068889713fbeb18a45cd5a3ed
|
[
"Apache-2.0"
] | 10 |
2018-03-27T12:09:12.000Z
|
2021-02-16T08:07:26.000Z
|
#
# This is the Robotics Language compiler
#
# ErrorHandling.py: Implements Error Handling functions
#
# Created on: June 22, 2017
# Author: Gabriel A. D. Lopes
# Licence: Apache 2.0
# Copyright: 2014-2017 Robot Care Systems BV, The Hague, The Netherlands. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import sys
import re
from RoboticsLanguage.Base import Utilities
class ReturnException(Exception):
pass
def createErrorMessage(parameters, error_type, reason, line='', filename='', line_number=0, column_number=0):
# the optional snipped of code
line_text = '\n' + line.strip('\n') + '\n' + (' ' * column_number + '^') + '\n' if line is not '' else ''
# the optional finename
file_text = (tryMessageInLanguage(parameters,'error-in-file').format(filename)) if filename is not '' else ''
# the opitional line number
line_number_text = (tryMessageInLanguage(parameters,'error-at-line').format(line_number)) if line_number > 0 else ''
# the optional column number
column_number_text = (tryMessageInLanguage(parameters,
'error-at-column').format(column_number)) if column_number > 0 else ''
return tryMessageInLanguage(parameters,'error-sentence').format(
line_text, error_type, file_text, line_number_text, column_number_text, reason)
def tryMessageInLanguage(parameters, key):
try:
return tryInLanguage(parameters['messages'][key], parameters['globals']['compilerLanguage'])
except:
return default_error_message(parameters)
def tryInLanguage(text, language):
if language in text.keys():
return text[language]
else:
# revert to english
return text['en']
def default_error_message(parameters):
'''Default error message for all languages'''
return tryInLanguage('default_error_message', parameters['globals']['compilerLanguage'])
def fileLineNumberToLine(filename, line_number):
'''given a file name and a line number, returns the text line'''
with open(filename) as file:
line = [next(file) for x in xrange(line_number)][-1]
return line
def textLineNumberToLine(text, line_number):
'''given a text string and a line number, returns the text line'''
return text.split('\n')[line_number - 1]
def positionToLineColumn(position, text):
'''given a position (byte counter) and text, returns the line, line number and column number'''
lines = str(text).split('\n')
counter = 0
line_number = 1
column_number = 0
for line in lines:
new_counter = counter + len(line)
if new_counter > position:
column_number = position - counter
break
else:
counter += len(line) + 1
line_number += 1
return line_number, column_number, line
@contextmanager
def tryToProceed():
try:
yield
except Exception as e:
if type(e).__name__ == 'ReturnException':
pass
else:
raise Exception
def handler(parameters, key='default', **options):
# get the logger level if defined. If not, default to error
level = options['level'] if 'level' in options.keys() else 'error'
try:
# create a message
message = parameters['errorHandlingFunctions'][key](parameters, **options)
except:
message = default_error_message(parameters)
# show the message
Utilities.logging.log(level, message)
# log the messages
Utilities.logErrors(message, key, parameters)
# apply actions
if 'action' in options.keys() and not parameters['developer']['ignoreErrors']:
if options['action'] == 'stop':
# stop the RoL script
sys.exit(1)
elif options['action'] == 'return':
# this will return the parent function
raise ReturnException
@contextmanager
def exception(e, parameters, key='default', **options):
try:
yield
except Exception as e:
# get the logger level if defined. If not, default to error
level = options['level'] if 'level' in options.keys() else 'error'
try:
# try the desired exception
exception_emmiter = re.search("<.*'([^']*)'>", str(type(e))).group(1)
# create a message
message = parameters['errorExceptionFunctions'][exception_emmiter][key](e,parameters, **options)
except:
try:
# try the default exception for the emmiter class
exception_emmiter = '.'.join(exception_emmiter.split('.')[:-1])
message = parameters['errorExceptionFunctions'][exception_emmiter]['default'](e,parameters, **options)
except:
# return the default error message
message = default_error_message(parameters)
# show the message
Utilities.logging.log(level, message)
# log the messages
Utilities.logErrors(message, key, parameters, exception=e)
# apply actions
if 'action' in options.keys() and not parameters['developer']['ignoreErrors']:
if options['action'] == 'stop':
# stop the RoL script
sys.exit(1)
elif options['action'] == 'return':
# this will return the parent function
raise ReturnException
| 32.023121 | 118 | 0.695668 |
4a1a067bf63d2e2ffe937c0201866ac723fc10d5
| 334 |
py
|
Python
|
algorithms/1441. Build an Array With Stack Operations.py
|
vuzway9132/leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | 1 |
2020-12-02T13:54:30.000Z
|
2020-12-02T13:54:30.000Z
|
algorithms/1441. Build an Array With Stack Operations.py
|
vuzway9132/leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | null | null | null |
algorithms/1441. Build an Array With Stack Operations.py
|
vuzway9132/leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | null | null | null |
class Solution:
def buildArray(self, target: List[int], n: int) -> List[str]:
j, ans = 0, []
for i in range(1, n+1):
if j >= len(target):
break
else:
if target[j] == i:
ans.append('Push')
j += 1
elif target[j] > i:
ans.extend(['Push', 'Pop'])
return ans
| 23.857143 | 63 | 0.467066 |
4a1a086cf15b3615fd9c0a87ec54498096f104a5
| 789 |
py
|
Python
|
attacks/multi_keys/same_n_huge_e.py
|
cyborgflashtime/RsaCtfTool
|
de39fe22d2fc3615a60373d1b9a5b4201ed0452c
|
[
"Beerware"
] | 2 |
2020-11-22T19:03:01.000Z
|
2021-03-17T11:32:06.000Z
|
attacks/multi_keys/same_n_huge_e.py
|
OlcaytoKorcan/RsaCtfTool
|
13c74d8a8707d76c0dc646b8760619ee8700f7ee
|
[
"Beerware"
] | null | null | null |
attacks/multi_keys/same_n_huge_e.py
|
OlcaytoKorcan/RsaCtfTool
|
13c74d8a8707d76c0dc646b8760619ee8700f7ee
|
[
"Beerware"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import tempfile
from Crypto.PublicKey import RSA
from lib.keys_wrapper import PublicKey
def attack(attack_rsa_obj, publickey, cipher=[]):
""" Same n huge e attack
"""
if not isinstance(publickey, list):
return (None, None)
if len(set([_.n for _ in publickey])) == 1:
new_e = 1
for k in publickey:
new_e = new_e * k.e
tmpfile = tempfile.NamedTemporaryFile()
with open(tmpfile.name, "wb") as tmpfd:
tmpfd.write(RSA.construct((publickey[0].n, new_e)).publickey().exportKey())
result = attack_rsa_obj.attack_single_key(tmpfile.name)
if result:
return (attack_rsa_obj.priv_key, None)
return (None, None)
| 29.222222 | 87 | 0.619772 |
4a1a08b8c8fcb7697bb412fec29020f1bb739dc7
| 909 |
py
|
Python
|
examples/makecircles/make_circles_distmean.py
|
jaspreet321/kepler-mapper
|
a145a31665806713819fe08623275b04d973274a
|
[
"MIT"
] | null | null | null |
examples/makecircles/make_circles_distmean.py
|
jaspreet321/kepler-mapper
|
a145a31665806713819fe08623275b04d973274a
|
[
"MIT"
] | null | null | null |
examples/makecircles/make_circles_distmean.py
|
jaspreet321/kepler-mapper
|
a145a31665806713819fe08623275b04d973274a
|
[
"MIT"
] | null | null | null |
import kmapper as km
# Make very noisy circles
import sklearn
from sklearn import datasets
data, labels = datasets.make_circles(n_samples=5000, noise=0.05, factor=0.3)
# Initialize
mapper = km.KeplerMapper(verbose=2)
# Fit to and transform the data
projected_data = mapper.fit_transform(data, projection="dist_mean")
# Create dictionary called 'simplicial_complex' with nodes, edges and meta-information
simplicial_complex = mapper.map(projected_data, X=data,
clusterer=sklearn.cluster.DBSCAN(eps=0.1, min_samples=5),
cover=km.Cover(perc_overlap=0.2))
# Visualize it
mapper.visualize(simplicial_complex, path_html="keplermapper-makecircles-distmean.html",
custom_meta={"Data:": "datasets.make_circles(n_samples=5000, noise=0.05, factor=0.3)"},
custom_tooltips=labels,
color_values=labels)
| 37.875 | 104 | 0.69637 |
4a1a095c9f74c4ac6d840b887babcacea7ced928
| 1,883 |
py
|
Python
|
convolutional_attention/visualization/attfeature_viz.py
|
s1530129650/convolutional-attention
|
8839da8146962879bb419a61253e7cf1b684fb22
|
[
"BSD-3-Clause"
] | 128 |
2016-05-10T01:38:27.000Z
|
2022-02-04T07:14:12.000Z
|
convolutional_attention/visualization/attfeature_viz.py
|
s1530129650/convolutional-attention
|
8839da8146962879bb419a61253e7cf1b684fb22
|
[
"BSD-3-Clause"
] | 6 |
2016-07-19T09:27:47.000Z
|
2021-07-08T21:22:32.000Z
|
convolutional_attention/visualization/attfeature_viz.py
|
s1530129650/convolutional-attention
|
8839da8146962879bb419a61253e7cf1b684fb22
|
[
"BSD-3-Clause"
] | 36 |
2016-05-11T08:57:26.000Z
|
2021-07-07T02:37:07.000Z
|
import cPickle
import numpy as np
from scipy.spatial.distance import pdist, squareform
import sys
if __name__ == "__main__":
if len(sys.argv) !=2:
print "Usage <inputPkl>"
sys.exit(-1)
with open(sys.argv[1], 'rb') as f:
code_att_feats = cPickle.load(f)
# Construct matrix
feat_pos = []
feature_vecs = []
cnt = 0
for i, sentence_data in enumerate(code_att_feats):
if not sentence_data[0].startswith("is"):
continue
elif cnt > 200:
break # Just use the first X sentences for now
cnt +=1
sentence_features = sentence_data[2].T
for j in xrange(1, sentence_features.shape[0]-1): # Ignore START/END
feat_pos.append((i, j))
feature_vecs.append(sentence_features[j])
feature_vecs = np.array(feature_vecs)
print feature_vecs.shape
distances = squareform(pdist(feature_vecs, 'cosine'))
def highlight_location(code_tokens, position, context_size=6):
return "..." + " ".join(code_tokens[max(position-context_size, 0):position]) + " ***" + code_tokens[position] \
+ "*** " + " ".join(code_tokens[position+1:position+context_size+1]) + "..."
for i in xrange(distances.shape[0]):
code_id, tok_id = feat_pos[i]
print "Neighbors of " + highlight_location(code_att_feats[code_id][1], tok_id) + " in " + code_att_feats[code_id][0]
nearest_neighbors = np.argsort(distances[i])[1:] # Ignore self
for j in xrange(4):
neigh_id, neigh_tok_id = feat_pos[nearest_neighbors[j]]
print str(j+1) + ". " + highlight_location(code_att_feats[neigh_id][1], neigh_tok_id) + \
" (distance " + str(distances[i][nearest_neighbors[j]]) + ")" + " in " + code_att_feats[neigh_id][0]
print "---------------------------------------"
print ""
| 38.428571 | 124 | 0.60223 |
4a1a0967b80365e27ba3f73a403b17358f266ca2
| 9,170 |
py
|
Python
|
utils.py
|
abeja-inc/platform-template-image-segmentation
|
ce04da51931d7a50cf245a5d8ae8bb6dc3f65236
|
[
"MIT"
] | null | null | null |
utils.py
|
abeja-inc/platform-template-image-segmentation
|
ce04da51931d7a50cf245a5d8ae8bb6dc3f65236
|
[
"MIT"
] | 14 |
2019-09-09T08:26:31.000Z
|
2019-12-09T07:18:26.000Z
|
utils.py
|
abeja-inc/platform-template-image-segmentation
|
ce04da51931d7a50cf245a5d8ae8bb6dc3f65236
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from collections import defaultdict, deque
import datetime
import random
import time
import torch
import torch.distributed as dist
from PIL import Image
import numpy as np
import errno
import os
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class ConfusionMatrix(object):
def __init__(self, num_classes):
self.num_classes = num_classes
self.mat = None
def update(self, a, b):
n = self.num_classes
if self.mat is None:
self.mat = torch.zeros((n, n), dtype=torch.int64, device=a.device)
with torch.no_grad():
k = (a >= 0) & (a < n)
inds = n * a[k].to(torch.int64) + b[k]
self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n)
def reset(self):
self.mat.zero_()
def compute(self):
h = self.mat.float()
acc_global = torch.diag(h).sum() / h.sum()
acc = torch.diag(h) / h.sum(1)
iu = torch.diag(h) / (h.sum(1) + h.sum(0) - torch.diag(h))
return acc_global, acc, iu
def reduce_from_all_processes(self):
if not torch.distributed.is_available():
return
if not torch.distributed.is_initialized():
return
torch.distributed.barrier()
torch.distributed.all_reduce(self.mat)
def __str__(self):
acc_global, acc, iu = self.compute()
return (
'global correct: {:.1f}\n'
'average row correct: {}\n'
'IoU: {}\n'
'mean IoU: {:.1f}').format(
acc_global.item() * 100,
['{:.1f}'.format(i) for i in (acc * 100).tolist()],
['{:.1f}'.format(i) for i in (iu * 100).tolist()],
iu.mean().item() * 100)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {}'.format(header, total_time_str))
def worker_init_fn(worker_id):
random.seed(worker_id)
def hex2rgb(hex: str):
return tuple(int(hex.lstrip('#')[i:i+2], 16) for i in (0, 2, 4))
def create_colormap(labels: list) -> list:
colormap = [(0, 0, 0)]
for label in labels:
colormap.append(hex2rgb(label['color']))
return colormap
def create_palette(labels: dict):
palimg = Image.new('P', (16, 16))
palette = [0, 0, 0]
for label in labels:
palette.extend(list(hex2rgb(label['color'])))
palimg.putpalette(palette)
return palimg
def cat_list(images, fill_value=0):
max_size = tuple(max(s) for s in zip(*[img.shape for img in images]))
batch_shape = (len(images),) + max_size
batched_imgs = images[0].new(*batch_shape).fill_(fill_value)
for img, pad_img in zip(images, batched_imgs):
pad_img[..., :img.shape[-2], :img.shape[-1]].copy_(img)
return batched_imgs
def collate_fn(batch):
images, targets = list(zip(*batch))
batched_imgs = cat_list(images, fill_value=0)
batched_targets = cat_list(targets, fill_value=255)
return batched_imgs, batched_targets
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if not args.DISTRIBUTED:
print('Not using distributed mode')
return
torch.cuda.set_device(args.GPU)
print('| distributed init (rank {}): {}'.format(
args.RANK, args.DIST_URL), flush=True)
torch.distributed.init_process_group(backend=args.DIST_BACKEND, init_method=args.DIST_URL,
world_size=args.WORLD_SIZE, rank=args.RANK)
setup_for_distributed(args.RANK == 0)
| 29.018987 | 94 | 0.561396 |
4a1a0a785a58fed2e40b031de48c30f551554d7e
| 6,073 |
py
|
Python
|
tests/test_optimization.py
|
MarcelGM/transformers
|
aad1d9b6d5c58fd974618ac0aead1c5bd1119467
|
[
"Apache-2.0"
] | 309 |
2020-02-07T23:09:27.000Z
|
2022-03-31T08:01:53.000Z
|
tests/test_optimization.py
|
MarcelGM/transformers
|
aad1d9b6d5c58fd974618ac0aead1c5bd1119467
|
[
"Apache-2.0"
] | 93 |
2020-02-22T05:56:28.000Z
|
2022-03-27T08:43:38.000Z
|
tests/test_optimization.py
|
MarcelGM/transformers
|
aad1d9b6d5c58fd974618ac0aead1c5bd1119467
|
[
"Apache-2.0"
] | 148 |
2020-02-14T22:16:11.000Z
|
2022-03-22T17:08:04.000Z
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def unwrap_schedule(scheduler, num_steps=10):
lrs = []
for _ in range(num_steps):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
return lrs
def unwrap_and_save_reload_schedule(scheduler, num_steps=10):
lrs = []
for step in range(num_steps):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
file_name = os.path.join(tmpdirname, "schedule.bin")
torch.save(scheduler.state_dict(), file_name)
state_dict = torch.load(file_name)
scheduler.load_state_dict(state_dict)
return lrs
@require_torch
class OptimizationTest(unittest.TestCase):
def assertListAlmostEqual(self, list1, list2, tol):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
def test_adam_w(self):
w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True)
target = torch.tensor([0.4, 0.2, -0.5])
criterion = torch.nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
optimizer = AdamW(params=[w], lr=2e-1, weight_decay=0.0)
for _ in range(100):
loss = criterion(w, target)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2)
def test_adafactor(self):
w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True)
target = torch.tensor([0.4, 0.2, -0.5])
criterion = torch.nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
optimizer = Adafactor(
params=[w],
lr=1e-2,
eps=(1e-30, 1e-3),
clip_threshold=1.0,
decay_rate=-0.8,
beta1=None,
weight_decay=0.0,
relative_step=False,
scale_parameter=False,
warmup_init=False,
)
for _ in range(1000):
loss = criterion(w, target)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2)
@require_torch
class ScheduleInitTest(unittest.TestCase):
m = torch.nn.Linear(50, 50) if is_torch_available() else None
optimizer = AdamW(m.parameters(), lr=10.0) if is_torch_available() else None
num_steps = 10
def assertListAlmostEqual(self, list1, list2, tol, msg=None):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol, msg=msg)
def test_schedulers(self):
common_kwargs = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
scheds = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
}
for scheduler_func, data in scheds.items():
kwargs, expected_learning_rates = data
scheduler = scheduler_func(self.optimizer, **kwargs)
self.assertEqual(len([scheduler.get_lr()[0]]), 1)
lrs_1 = unwrap_schedule(scheduler, self.num_steps)
self.assertListAlmostEqual(
lrs_1,
expected_learning_rates,
tol=1e-2,
msg=f"failed for {scheduler_func} in normal scheduler",
)
scheduler = scheduler_func(self.optimizer, **kwargs)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual(lrs_1, lrs_2, msg=f"failed for {scheduler_func} in save and reload")
| 36.584337 | 101 | 0.602174 |
4a1a0aceaf345d9f91658422f4ce9864fa2bfd70
| 34,812 |
py
|
Python
|
lithops/executors.py
|
Cohen-J-Omer/lithops
|
12bf3babbce6e9eb70a5e16cdd40093552a2ecfc
|
[
"Apache-2.0"
] | null | null | null |
lithops/executors.py
|
Cohen-J-Omer/lithops
|
12bf3babbce6e9eb70a5e16cdd40093552a2ecfc
|
[
"Apache-2.0"
] | 1 |
2021-09-21T01:16:43.000Z
|
2021-09-21T01:16:43.000Z
|
lithops/executors.py
|
LaudateCorpus1/lithops
|
f03f45eb2795b31c2299b6873797add3e97ddf41
|
[
"Apache-2.0"
] | 1 |
2021-05-25T07:28:05.000Z
|
2021-05-25T07:28:05.000Z
|
#
# (C) Copyright IBM Corp. 2020
# (C) Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import copy
import logging
import atexit
import pickle
import tempfile
import numpy as np
import subprocess as sp
from typing import Optional, List, Union, Tuple, Dict, Any
from collections.abc import Callable
from datetime import datetime
from lithops import constants
from lithops.future import ResponseFuture
from lithops.invokers import create_invoker
from lithops.storage import InternalStorage
from lithops.wait import wait, ALL_COMPLETED, THREADPOOL_SIZE, WAIT_DUR_SEC
from lithops.job import create_map_job, create_reduce_job
from lithops.config import default_config, \
extract_localhost_config, extract_standalone_config, \
extract_serverless_config, get_log_info, extract_storage_config
from lithops.constants import LOCALHOST, CLEANER_DIR, \
SERVERLESS, STANDALONE
from lithops.utils import is_notebook, setup_lithops_logger, \
is_lithops_worker, create_executor_id, create_futures_list
from lithops.localhost.localhost import LocalhostHandler
from lithops.standalone.standalone import StandaloneHandler
from lithops.serverless.serverless import ServerlessHandler
from lithops.storage.utils import create_job_key, CloudObject
from lithops.monitor import JobMonitor
from lithops.utils import FuturesList
logger = logging.getLogger(__name__)
class FunctionExecutor:
"""
Executor abstract class that contains the common logic for the Localhost, Serverless and Standalone executors
:param mode: Execution mode. One of: localhost, serverless or standalone
:param config: Settings passed in here will override those in lithops_config
:param backend: Compute backend to run the functions
:param storage: Storage backend to store Lithops data
:param runtime: Name of the runtime to run the functions
:param runtime_memory: Memory (in MB) to use to run the functions
:param monitoring: Monitoring system implementation. One of: storage, rabbitmq
:param workers: Max number of parallel workers
:param worker_processes: Worker granularity, number of concurrent/parallel processes in each worker
:param remote_invoker: Spawn a function that will perform the actual job invocation (True/False)
:param log_level: Log level printing (INFO, DEBUG, ...). Set it to None to hide all logs. If this is param is set, all logging params in config are disabled
"""
def __init__(self,
mode: Optional[str] = None,
config: Optional[Dict[str, Any]] = None,
backend: Optional[str] = None,
storage: Optional[str] = None,
runtime: Optional[str] = None,
runtime_memory: Optional[int] = None,
monitoring: Optional[str] = None,
workers: Optional[int] = None,
worker_processes: Optional[int] = None,
remote_invoker: Optional[bool] = None,
log_level: Optional[str] = False):
self.is_lithops_worker = is_lithops_worker()
self.executor_id = create_executor_id()
self.futures = []
self.cleaned_jobs = set()
self.total_jobs = 0
self.last_call = None
# setup lithops logging
if not self.is_lithops_worker:
# if is lithops worker, logging has been set up in entry_point.py
if log_level:
setup_lithops_logger(log_level)
elif log_level is False and logger.getEffectiveLevel() == logging.WARNING:
# Set default logging from config
setup_lithops_logger(*get_log_info(config))
# overwrite user-provided parameters
config_ow = {'lithops': {}}
if runtime is not None:
config_ow['runtime'] = runtime
if runtime_memory is not None:
config_ow['runtime_memory'] = int(runtime_memory)
if remote_invoker is not None:
config_ow['remote_invoker'] = remote_invoker
if mode is not None:
config_ow['lithops']['mode'] = mode
if backend is not None:
config_ow['lithops']['backend'] = backend
if storage is not None:
config_ow['lithops']['storage'] = storage
if workers is not None:
config_ow['lithops']['workers'] = workers
if monitoring is not None:
config_ow['lithops']['monitoring'] = monitoring
if worker_processes is not None:
config_ow['lithops']['worker_processes'] = worker_processes
self.config = default_config(copy.deepcopy(config), config_ow)
self.data_cleaner = self.config['lithops'].get('data_cleaner', True)
if self.data_cleaner and not self.is_lithops_worker:
spawn_cleaner = int(self.executor_id.split('-')[1]) == 0
atexit.register(self.clean, spawn_cleaner=spawn_cleaner,
clean_cloudobjects=False)
storage_config = extract_storage_config(self.config)
self.internal_storage = InternalStorage(storage_config)
self.storage = self.internal_storage.storage
self.backend = self.config['lithops']['backend']
self.mode = self.config['lithops']['mode']
if self.mode == LOCALHOST:
localhost_config = extract_localhost_config(self.config)
self.compute_handler = LocalhostHandler(localhost_config)
elif self.mode == SERVERLESS:
serverless_config = extract_serverless_config(self.config)
self.compute_handler = ServerlessHandler(serverless_config, self.internal_storage)
elif self.mode == STANDALONE:
standalone_config = extract_standalone_config(self.config)
self.compute_handler = StandaloneHandler(standalone_config)
# Create the monitoring system
monitoring_backend = self.config['lithops']['monitoring'].lower()
self.job_monitor = JobMonitor(
executor_id=self.executor_id,
internal_storage=self.internal_storage,
backend=monitoring_backend,
config=self.config.get(monitoring_backend)
)
# Create the invoker
self.invoker = create_invoker(
config=self.config,
executor_id=self.executor_id,
internal_storage=self.internal_storage,
compute_handler=self.compute_handler,
job_monitor=self.job_monitor
)
logger.debug(f'Function executor for {self.backend} created with ID: {self.executor_id}')
self.log_path = None
def __enter__(self):
""" Context manager method """
return self
def __exit__(self, exc_type, exc_value, traceback):
""" Context manager method """
self.job_monitor.stop()
self.invoker.stop()
self.compute_handler.clear()
def _create_job_id(self, call_type):
job_id = str(self.total_jobs).zfill(3)
self.total_jobs += 1
return '{}{}'.format(call_type, job_id)
def call_async(self,
func: Callable,
data: Union[List[Any], Tuple[Any, ...], Dict[str, Any]],
extra_env: Optional[Dict] = None,
runtime_memory: Optional[int] = None,
timeout: Optional[int] = None,
include_modules: Optional[List] = [],
exclude_modules: Optional[List] = []) -> ResponseFuture:
"""
For running one function execution asynchronously.
:param func: The function to map over the data.
:param data: Input data. Arguments can be passed as a list or tuple, or as a dictionary for keyword arguments.
:param extra_env: Additional env variables for function environment.
:param runtime_memory: Memory to use to run the function.
:param timeout: Time that the function has to complete its execution before raising a timeout.
:param include_modules: Explicitly pickle these dependencies.
:param exclude_modules: Explicitly keep these modules from pickled dependencies.
:return: Response future.
"""
job_id = self._create_job_id('A')
self.last_call = 'call_async'
runtime_meta = self.invoker.select_runtime(job_id, runtime_memory)
job = create_map_job(config=self.config,
internal_storage=self.internal_storage,
executor_id=self.executor_id,
job_id=job_id,
map_function=func,
iterdata=[data],
runtime_meta=runtime_meta,
runtime_memory=runtime_memory,
extra_env=extra_env,
include_modules=include_modules,
exclude_modules=exclude_modules,
execution_timeout=timeout)
futures = self.invoker.run_job(job)
self.futures.extend(futures)
return futures[0]
def map(self,
map_function: Callable,
map_iterdata: List[Union[List[Any], Tuple[Any, ...], Dict[str, Any]]],
chunksize: Optional[int] = None,
extra_args: Optional[Union[List[Any], Tuple[Any, ...], Dict[str, Any]]] = None,
extra_env: Optional[Dict[str, str]] = None,
runtime_memory: Optional[int] = None,
obj_chunk_size: Optional[int] = None,
obj_chunk_number: Optional[int] = None,
timeout: Optional[int] = None,
include_modules: Optional[List[str]] = [],
exclude_modules: Optional[List[str]] = []) -> FuturesList:
"""
Spawn multiple function activations based on the items of an input list.
:param map_function: The function to map over the data
:param map_iterdata: An iterable of input data (e.g python list).
:param chunksize: Split map_iteradata in chunks of this size. Lithops spawns 1 worker per resulting chunk
:param extra_args: Additional arguments to pass to each map_function activation
:param extra_env: Additional environment variables for function environment
:param runtime_memory: Memory (in MB) to use to run the functions
:param obj_chunk_size: Used for data processing. Chunk size to split each object in bytes. Must be >= 1MiB. 'None' for processing the whole file in one function activation
:param obj_chunk_number: Used for data processing. Number of chunks to split each object. 'None' for processing the whole file in one function activation. chunk_n has prevalence over chunk_size if both parameters are set
:param timeout: Max time per function activation (seconds)
:param include_modules: Explicitly pickle these dependencies. All required dependencies are pickled if default empty list. No one dependency is pickled if it is explicitly set to None
:param exclude_modules: Explicitly keep these modules from pickled dependencies. It is not taken into account if you set include_modules.
:return: A list with size `len(map_iterdata)` of futures for each job (Futures are also internally stored by Lithops).
"""
job_id = self._create_job_id('M')
self.last_call = 'map'
runtime_meta = self.invoker.select_runtime(job_id, runtime_memory)
job = create_map_job(config=self.config,
internal_storage=self.internal_storage,
executor_id=self.executor_id,
job_id=job_id,
map_function=map_function,
iterdata=map_iterdata,
chunksize=chunksize,
runtime_meta=runtime_meta,
runtime_memory=runtime_memory,
extra_env=extra_env,
include_modules=include_modules,
exclude_modules=exclude_modules,
execution_timeout=timeout,
extra_args=extra_args,
obj_chunk_size=obj_chunk_size,
obj_chunk_number=obj_chunk_number)
futures = self.invoker.run_job(job)
self.futures.extend(futures)
if isinstance(map_iterdata, FuturesList):
for fut in map_iterdata:
fut._produce_output = False
return create_futures_list(futures, self)
def map_reduce(self,
map_function: Callable,
map_iterdata: List[Union[List[Any], Tuple[Any, ...], Dict[str, Any]]],
reduce_function: Callable,
chunksize: Optional[int] = None,
extra_args: Optional[Union[List[Any], Tuple[Any, ...], Dict[str, Any]]] = None,
extra_env: Optional[Dict[str, str]] = None,
map_runtime_memory: Optional[int] = None,
reduce_runtime_memory: Optional[int] = None,
obj_chunk_size: Optional[int] = None,
obj_chunk_number: Optional[int] = None,
timeout: Optional[int] = None,
reducer_one_per_object: Optional[bool] = False,
reducer_wait_local: Optional[bool] = False,
include_modules: Optional[List[str]] = [],
exclude_modules: Optional[List[str]] = []) -> FuturesList:
"""
Map the map_function over the data and apply the reduce_function across all futures.
:param map_function: The function to map over the data
:param map_iterdata: An iterable of input data
:param reduce_function: The function to reduce over the futures
:param chunksize: Split map_iteradata in chunks of this size. Lithops spawns 1 worker per resulting chunk. Default 1
:param extra_args: Additional arguments to pass to function activation. Default None
:param extra_env: Additional environment variables for action environment. Default None
:param map_runtime_memory: Memory to use to run the map function. Default None (loaded from config)
:param reduce_runtime_memory: Memory to use to run the reduce function. Default None (loaded from config)
:param obj_chunk_size: the size of the data chunks to split each object. 'None' for processing the whole file in one function activation
:param obj_chunk_number: Number of chunks to split each object. 'None' for processing the whole file in one function activation
:param timeout: Time that the functions have to complete their execution before raising a timeout
:param reducer_one_per_object: Set one reducer per object after running the partitioner
:param reducer_wait_local: Wait for results locally
:param include_modules: Explicitly pickle these dependencies.
:param exclude_modules: Explicitly keep these modules from pickled dependencies.
:return: A list with size `len(map_iterdata)` of futures.
"""
self.last_call = 'map_reduce'
map_job_id = self._create_job_id('M')
runtime_meta = self.invoker.select_runtime(map_job_id, map_runtime_memory)
map_job = create_map_job(config=self.config,
internal_storage=self.internal_storage,
executor_id=self.executor_id,
job_id=map_job_id,
map_function=map_function,
iterdata=map_iterdata,
chunksize=chunksize,
runtime_meta=runtime_meta,
runtime_memory=map_runtime_memory,
extra_args=extra_args,
extra_env=extra_env,
obj_chunk_size=obj_chunk_size,
obj_chunk_number=obj_chunk_number,
include_modules=include_modules,
exclude_modules=exclude_modules,
execution_timeout=timeout)
map_futures = self.invoker.run_job(map_job)
self.futures.extend(map_futures)
if isinstance(map_iterdata, FuturesList):
for fut in map_iterdata:
fut._produce_output = False
if reducer_wait_local:
self.wait(map_futures)
reduce_job_id = map_job_id.replace('M', 'R')
runtime_meta = self.invoker.select_runtime(reduce_job_id, reduce_runtime_memory)
reduce_job = create_reduce_job(config=self.config,
internal_storage=self.internal_storage,
executor_id=self.executor_id,
reduce_job_id=reduce_job_id,
reduce_function=reduce_function,
map_job=map_job,
map_futures=map_futures,
runtime_meta=runtime_meta,
runtime_memory=reduce_runtime_memory,
reducer_one_per_object=reducer_one_per_object,
extra_env=extra_env,
include_modules=include_modules,
exclude_modules=exclude_modules)
reduce_futures = self.invoker.run_job(reduce_job)
self.futures.extend(reduce_futures)
for f in map_futures:
f._produce_output = False
return create_futures_list(map_futures + reduce_futures, self)
def wait(self,
fs: Optional[Union[ResponseFuture, FuturesList, List[ResponseFuture]]] = None,
throw_except: Optional[bool] = True,
return_when: Optional[Any] = ALL_COMPLETED,
download_results: Optional[bool] = False,
timeout: Optional[int] = None,
threadpool_size: Optional[int] = THREADPOOL_SIZE,
wait_dur_sec: Optional[int] = WAIT_DUR_SEC) -> Tuple[FuturesList, FuturesList]:
"""
Wait for the Future instances (possibly created by different Executor instances)
given by fs to complete. Returns a named 2-tuple of sets. The first set, named done,
contains the futures that completed (finished or cancelled futures) before the wait
completed. The second set, named not_done, contains the futures that did not complete
(pending or running futures). timeout can be used to control the maximum number of
seconds to wait before returning.
:param fs: Futures list. Default None
:param throw_except: Re-raise exception if call raised. Default True
:param return_when: One of `ALL_COMPLETED`, `ANY_COMPLETED`, `ALWAYS`
:param download_results: Download results. Default false (Only get statuses)
:param timeout: Timeout of waiting for results
:param threadpool_size: Number of threads to use. Default 64
:param wait_dur_sec: Time interval between each check
:return: `(fs_done, fs_notdone)` where `fs_done` is a list of futures that have completed and `fs_notdone` is a list of futures that have not completed.
"""
futures = fs or self.futures
if type(futures) != list and type(futures) != FuturesList:
futures = [futures]
# Start waiting for results
try:
wait(fs=futures,
internal_storage=self.internal_storage,
job_monitor=self.job_monitor,
download_results=download_results,
throw_except=throw_except,
return_when=return_when,
timeout=timeout,
threadpool_size=threadpool_size,
wait_dur_sec=wait_dur_sec)
except (KeyboardInterrupt, Exception) as e:
self.invoker.stop()
self.job_monitor.stop()
if not fs and is_notebook():
del self.futures[len(self.futures) - len(futures):]
if self.data_cleaner and not self.is_lithops_worker:
self.clean(clean_cloudobjects=False, force=True)
raise e
finally:
present_jobs = {f.job_key for f in futures}
if self.data_cleaner and not self.is_lithops_worker:
self.compute_handler.clear(present_jobs)
self.clean(clean_cloudobjects=False)
if download_results:
fs_done = [f for f in futures if f.done]
fs_notdone = [f for f in futures if not f.done]
else:
fs_done = [f for f in futures if f.success or f.done]
fs_notdone = [f for f in futures if not f.success and not f.done]
return create_futures_list(fs_done, self), create_futures_list(fs_notdone, self)
def get_result(self,
fs: Optional[Union[ResponseFuture, FuturesList, List[ResponseFuture]]] = None,
throw_except: Optional[bool] = True,
timeout: Optional[int] = None,
threadpool_size: Optional[int] = THREADPOOL_SIZE,
wait_dur_sec: Optional[int] = WAIT_DUR_SEC):
"""
For getting the results from all function activations
:param fs: Futures list. Default None
:param throw_except: Reraise exception if call raised. Default True.
:param timeout: Timeout for waiting for results.
:param threadpool_size: Number of threads to use. Default 128
:param wait_dur_sec: Time interval between each check.
:return: The result of the future/s
"""
fs_done, _ = self.wait(fs=fs, throw_except=throw_except,
timeout=timeout, download_results=True,
threadpool_size=threadpool_size,
wait_dur_sec=wait_dur_sec)
result = []
fs_done = [f for f in fs_done if not f.futures and f._produce_output]
for f in fs_done:
if fs:
# Process futures provided by the user
result.append(f.result(throw_except=throw_except,
internal_storage=self.internal_storage))
elif not fs and not f._read:
# Process internally stored futures
result.append(f.result(throw_except=throw_except,
internal_storage=self.internal_storage))
f._read = True
logger.debug(f'ExecutorID {self.executor_id} - Finished getting results')
if len(result) == 1 and self.last_call != 'map':
return result[0]
return result
def plot(self,
fs: Optional[Union[ResponseFuture, List[ResponseFuture]]] = None,
dst: Optional[str] = None):
"""
Creates timeline and histogram of the current execution in dst_dir.
:param fs: list of futures.
:param dst: destination path to save .png plots.
"""
ftrs = self.futures if not fs else fs
if type(ftrs) != list:
ftrs = [ftrs]
ftrs_to_plot = [f for f in ftrs if (f.success or f.done) and not f.error]
if not ftrs_to_plot:
logger.debug(f'ExecutorID {self.executor_id} - No futures ready to plot')
return
logging.getLogger('matplotlib').setLevel(logging.WARNING)
from lithops.plots import create_timeline, create_histogram
logger.info(f'ExecutorID {self.executor_id} - Creating execution plots')
create_timeline(ftrs_to_plot, dst)
create_histogram(ftrs_to_plot, dst)
def clean(self,
fs: Optional[Union[ResponseFuture, List[ResponseFuture]]] = None,
cs: Optional[List[CloudObject]] = None,
clean_cloudobjects: Optional[bool] = True,
spawn_cleaner: Optional[bool] = True,
force: Optional[bool] = False):
"""
Deletes all the temp files from storage. These files include the function,
the data serialization and the function invocation results. It can also clean
cloudobjects.
:param fs: List of futures to clean
:param cs: List of cloudobjects to clean
:param clean_cloudobjects: Delete all cloudobjects created with this executor
:param spawn_cleaner: Spawn cleaner background process
:param force: Clean all future objects even if they have not benn completed
"""
os.makedirs(CLEANER_DIR, exist_ok=True)
def save_data_to_clean(data):
with tempfile.NamedTemporaryFile(dir=CLEANER_DIR, delete=False) as temp:
pickle.dump(data, temp)
if cs:
data = {
'cos_to_clean': list(cs),
'storage_config': self.internal_storage.get_storage_config()
}
save_data_to_clean(data)
if not fs:
return
futures = fs or self.futures
futures = [futures] if type(futures) != list else futures
present_jobs = {create_job_key(f.executor_id, f.job_id) for f in futures
if (f.executor_id.count('-') == 1 and f.done) or force}
jobs_to_clean = present_jobs - self.cleaned_jobs
if jobs_to_clean:
logger.info(f'ExecutorID {self.executor_id} - Cleaning temporary data')
data = {
'jobs_to_clean': jobs_to_clean,
'clean_cloudobjects': clean_cloudobjects,
'storage_config': self.internal_storage.get_storage_config()
}
save_data_to_clean(data)
self.cleaned_jobs.update(jobs_to_clean)
if (jobs_to_clean or cs) and spawn_cleaner:
cmdstr = [sys.executable, '-m', 'lithops.scripts.cleaner']
sp.Popen(' '.join(cmdstr), shell=True)
def job_summary(self,
cloud_objects_n: Optional[int] = 0):
"""
Logs information of a job executed by the calling function executor.
currently supports: code_engine, ibm_vpc and ibm_cf.
:param cloud_objects_n: number of cloud object used in COS, declared by user.
"""
import pandas as pd
def init():
headers = ['Job_ID', 'Function', 'Invocations', 'Memory(MB)', 'AvgRuntime', 'Cost', 'CloudObjects']
pd.DataFrame([], columns=headers).to_csv(self.log_path, index=False)
def append(content):
""" appends job information to log file."""
pd.DataFrame(content).to_csv(self.log_path, mode='a', header=False, index=False)
def append_summary():
""" add a summary row to the log file"""
df = pd.read_csv(self.log_path)
total_average = sum(df.AvgRuntime * df.Invocations) / df.Invocations.sum()
total_row = pd.DataFrame([['Summary', ' ', df.Invocations.sum(), df['Memory(MB)'].sum(),
round(total_average, 10), df.Cost.sum(), cloud_objects_n]])
total_row.to_csv(self.log_path, mode='a', header=False, index=False)
def get_object_num():
"""returns cloud objects used up to this point, using this function executor. """
df = pd.read_csv(self.log_path)
return float(df.iloc[-1].iloc[-1])
# Avoid logging info unless chosen computational backend is supported.
if hasattr(self.compute_handler.backend, 'calc_cost'):
if self.log_path: # retrieve cloud_objects_n from last log file
cloud_objects_n += get_object_num()
else:
self.log_path = os.path.join(constants.LOGS_DIR, datetime.now().strftime("%Y-%m-%d_%H:%M:%S.csv"))
# override current logfile
init()
futures = self.futures
if type(futures) != list:
futures = [futures]
memory = []
runtimes = []
curr_job_id = futures[0].job_id
job_func = futures[0].function_name # each job is conducted on a single function
for future in futures:
if curr_job_id != future.job_id:
cost = self.compute_handler.backend.calc_cost(runtimes, memory)
append([[curr_job_id, job_func, len(runtimes), sum(memory),
np.round(np.average(runtimes), 10), cost, ' ']])
# updating next iteration's variables:
curr_job_id = future.job_id
job_func = future.function_name
memory.clear()
runtimes.clear()
memory.append(future.runtime_memory)
runtimes.append(future.stats['worker_exec_time'])
# appends last Job-ID
cost = self.compute_handler.backend.calc_cost(runtimes, memory)
append([[curr_job_id, job_func, len(runtimes), sum(memory),
np.round(np.average(runtimes), 10), cost, ' ']])
# append summary row to end of the dataframe
append_summary()
else: # calc_cost() doesn't exist for chosen computational backend.
logger.warning("Could not log job: {} backend isn't supported by this function."
.format(self.compute_handler.backend.name))
return
logger.info("View log file logs at {}".format(self.log_path))
class LocalhostExecutor(FunctionExecutor):
"""
Initialize a LocalhostExecutor class.
:param config: Settings passed in here will override those in config file.
:param runtime: Runtime name to use.
:param storage: Name of the storage backend to use.
:param worker_processes: Worker granularity, number of concurrent/parallel processes in each worker
:param monitoring: monitoring system.
:param log_level: log level to use during the execution.
"""
def __init__(self,
config: Optional[Dict[str, Any]] = None,
runtime: Optional[int] = None,
storage: Optional[str] = None,
worker_processes: Optional[int] = None,
monitoring: Optional[str] = None,
log_level: Optional[str] = False):
super().__init__(backend=LOCALHOST,
config=config,
runtime=runtime,
storage=storage or LOCALHOST,
log_level=log_level,
monitoring=monitoring,
worker_processes=worker_processes)
class ServerlessExecutor(FunctionExecutor):
"""
Initialize a ServerlessExecutor class.
:param config: Settings passed in here will override those in config file
:param runtime: Runtime name to use
:param runtime_memory: memory to use in the runtime
:param backend: Name of the serverless compute backend to use
:param storage: Name of the storage backend to use
:param workers: Max number of concurrent workers
:param worker_processes: Worker granularity, number of concurrent/parallel processes in each worker
:param monitoring: monitoring system
:param remote_invoker: Spawn a function that will perform the actual job invocation (True/False)
:param log_level: log level to use during the execution
"""
def __init__(self,
config: Optional[Dict[str, Any]] = None,
runtime: Optional[str] = None,
runtime_memory: Optional[int] = None,
backend: Optional[str] = None,
storage: Optional[str] = None,
workers: Optional[int] = None,
worker_processes: Optional[int] = None,
monitoring: Optional[str] = None,
remote_invoker: Optional[bool] = None,
log_level: Optional[str] = False):
backend = backend or constants.SERVERLESS_BACKEND_DEFAULT
super().__init__(config=config,
runtime=runtime,
runtime_memory=runtime_memory,
backend=backend,
storage=storage,
workers=workers,
worker_processes=worker_processes,
monitoring=monitoring,
log_level=log_level,
remote_invoker=remote_invoker)
class StandaloneExecutor(FunctionExecutor):
"""
Initialize a StandaloneExecutor class.
:param config: Settings passed in here will override those in config file
:param runtime: Runtime name to use
:param backend: Name of the standalone compute backend to use
:param storage: Name of the storage backend to use
:param workers: Max number of concurrent workers
:param worker_processes: Worker granularity, number of concurrent/parallel processes in each worker
:param monitoring: monitoring system
:param log_level: log level to use during the execution
"""
def __init__(self,
config: Optional[Dict[str, Any]] = None,
runtime: Optional[str] = None,
backend: Optional[str] = None,
storage: Optional[str] = None,
workers: Optional[int] = None,
worker_processes: Optional[int] = None,
monitoring: Optional[str] = None,
log_level: Optional[str] = False):
backend = backend or constants.STANDALONE_BACKEND_DEFAULT
super().__init__(config=config,
runtime=runtime,
backend=backend,
storage=storage,
workers=workers,
worker_processes=worker_processes,
monitoring=monitoring,
log_level=log_level)
| 46.231076 | 228 | 0.61249 |
4a1a0c06544cb46960ff25a38e02bed778081b1f
| 1,923 |
py
|
Python
|
io_util.py
|
rneher/nextstain_base
|
f3c3478cd476a94d656a9bde7a27bfc42884491e
|
[
"MIT"
] | 2 |
2016-07-15T19:22:08.000Z
|
2020-03-25T06:59:54.000Z
|
io_util.py
|
rneher/nextstain_base
|
f3c3478cd476a94d656a9bde7a27bfc42884491e
|
[
"MIT"
] | null | null | null |
io_util.py
|
rneher/nextstain_base
|
f3c3478cd476a94d656a9bde7a27bfc42884491e
|
[
"MIT"
] | 3 |
2016-04-22T20:28:36.000Z
|
2020-03-25T06:59:55.000Z
|
from __future__ import division, print_function
def myopen(fname, mode='r'):
if fname[-2:] == 'gz':
from gzip import open as gopen
return gopen(fname, mode)
else:
return open(fname, mode)
def make_dir(dname):
import os
if not os.path.isdir(dname):
try:
os.makedirs(dname)
except OSError as e:
print("Cannot create run_dir",e)
def remove_dir(dname):
import os, shutil
if os.path.isdir(dname):
import shutil
shutil.rmtree(dname)
def write_json(data, file_name, indent=1):
import json
try:
handle = open(file_name, 'w')
except IOError:
pass
else:
json.dump(data, handle, indent=indent)
handle.close()
def tree_to_json(node, extra_attr = []):
tree_json = {}
str_attr = ['country','region','clade','strain', 'date', 'muts']
num_attr = ['xvalue', 'yvalue', 'tvalue', 'num_date']
if hasattr(node, 'name'):
tree_json['strain'] = node.name
for prop in str_attr:
if hasattr(node, prop):
tree_json[prop] = node.__getattribute__(prop)
for prop in num_attr:
if hasattr(node, prop):
try:
tree_json[prop] = round(node.__getattribute__(prop),5)
except:
print("cannot round:", node.__getattribute__(prop), "assigned as is")
tree_json[prop] = node.__getattribute__(prop)
for prop in extra_attr:
if len(prop)==2 and callable(prop[1]):
if hasattr(node, prop[0]):
tree_json[prop] = prop[1](node.__getattribute__(prop[0]))
else:
if hasattr(node, prop):
tree_json[prop] = node.__getattribute__(prop)
if node.clades:
tree_json["children"] = []
for ch in node.clades:
tree_json["children"].append(tree_to_json(ch, extra_attr))
return tree_json
| 29.584615 | 85 | 0.582423 |
4a1a0c191158248c4c44739dcbf3769656117742
| 6,485 |
py
|
Python
|
src/saml2/ecp.py
|
richtera/pysaml2
|
cd446e3d08f8adb0b3541442c3f7a32c16c8385f
|
[
"BSD-2-Clause"
] | null | null | null |
src/saml2/ecp.py
|
richtera/pysaml2
|
cd446e3d08f8adb0b3541442c3f7a32c16c8385f
|
[
"BSD-2-Clause"
] | null | null | null |
src/saml2/ecp.py
|
richtera/pysaml2
|
cd446e3d08f8adb0b3541442c3f7a32c16c8385f
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2011 Umeå University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains classes used in the SAML ECP profile
"""
import logging
from saml2.client_base import ACTOR
from saml2.ecp_client import SERVICE
from saml2 import element_to_extension_element
from saml2 import samlp
from saml2 import soap
from saml2 import BINDING_SOAP, BINDING_PAOS
from saml2.profile import paos
from saml2.profile import ecp
#from saml2.client import Saml2Client
from saml2.server import Server
from saml2.schema import soapenv
from saml2.response import authn_response
logger = logging.getLogger(__name__)
def ecp_capable(headers):
if "application/vnd.paos+xml" in headers["Accept"]:
if "PAOS" in headers:
if 'ver="%s";"%s"' % (paos.NAMESPACE,
SERVICE) in headers["PAOS"]:
return True
return False
#noinspection PyUnusedLocal
def ecp_auth_request(cls, entityid=None, relay_state="", sign=False):
""" Makes an authentication request.
:param entityid: The entity ID of the IdP to send the request to
:param relay_state: To where the user should be returned after
successfull log in.
:param sign: Whether the request should be signed or not.
:return: AuthnRequest response
"""
eelist = []
# ----------------------------------------
# <paos:Request>
# ----------------------------------------
my_url = cls.service_url(BINDING_PAOS)
# must_understand and actor according to the standard
#
paos_request = paos.Request(must_understand="1", actor=ACTOR,
response_consumer_url=my_url,
service=SERVICE)
eelist.append(element_to_extension_element(paos_request))
# ----------------------------------------
# <ecp:Request>
# ----------------------------------------
# idp = samlp.IDPEntry(
# provider_id = "https://idp.example.org/entity",
# name = "Example identity provider",
# loc = "https://idp.example.org/saml2/sso",
# )
#
# idp_list = samlp.IDPList(idp_entry= [idp])
#
# ecp_request = ecp.Request(
# actor = ACTOR, must_understand = "1",
# provider_name = "Example Service Provider",
# issuer=saml.Issuer(text="https://sp.example.org/entity"),
# idp_list = idp_list)
#
# eelist.append(element_to_extension_element(ecp_request))
# ----------------------------------------
# <ecp:RelayState>
# ----------------------------------------
relay_state = ecp.RelayState(actor=ACTOR, must_understand="1",
text=relay_state)
eelist.append(element_to_extension_element(relay_state))
header = soapenv.Header()
header.extension_elements = eelist
# ----------------------------------------
# <samlp:AuthnRequest>
# ----------------------------------------
logger.info("entityid: %s, binding: %s" % (entityid, BINDING_SOAP))
location = cls._sso_location(entityid, binding=BINDING_SOAP)
req_id, authn_req = cls.create_authn_request(
location, binding=BINDING_PAOS, service_url_binding=BINDING_PAOS)
body = soapenv.Body()
body.extension_elements = [element_to_extension_element(authn_req)]
# ----------------------------------------
# The SOAP envelope
# ----------------------------------------
soap_envelope = soapenv.Envelope(header=header, body=body)
return req_id, "%s" % soap_envelope
def handle_ecp_authn_response(cls, soap_message, outstanding=None):
rdict = soap.class_instances_from_soap_enveloped_saml_thingies(
soap_message, [paos, ecp, samlp])
_relay_state = None
for item in rdict["header"]:
if item.c_tag == "RelayState" and item.c_namespace == ecp.NAMESPACE:
_relay_state = item
response = authn_response(cls.config, cls.service_url(), outstanding,
allow_unsolicited=True)
response.loads("%s" % rdict["body"], False, soap_message)
response.verify()
cls.users.add_information_about_person(response.session_info())
return response, _relay_state
def ecp_response(target_url, response):
# ----------------------------------------
# <ecp:Response
# ----------------------------------------
ecp_response = ecp.Response(assertion_consumer_service_url=target_url)
header = soapenv.Header()
header.extension_elements = [element_to_extension_element(ecp_response)]
# ----------------------------------------
# <samlp:Response
# ----------------------------------------
body = soapenv.Body()
body.extension_elements = [element_to_extension_element(response)]
soap_envelope = soapenv.Envelope(header=header, body=body)
return "%s" % soap_envelope
class ECPServer(Server):
""" This deals with what the IdP has to do
TODO: Still tentative
"""
def __init__(self, config_file="", config=None, cache=None):
Server.__init__(self, config_file, config, cache)
def parse_ecp_authn_query(self):
pass
def ecp_response(self):
# ----------------------------------------
# <ecp:Response
# ----------------------------------------
target_url = ""
ecp_response = ecp.Response(assertion_consumer_service_url=target_url)
header = soapenv.Body()
header.extension_elements = [element_to_extension_element(ecp_response)]
# ----------------------------------------
# <samlp:Response
# ----------------------------------------
response = samlp.Response()
body = soapenv.Body()
body.extension_elements = [element_to_extension_element(response)]
soap_envelope = soapenv.Envelope(header=header, body=body)
return "%s" % soap_envelope
| 31.328502 | 80 | 0.588435 |
4a1a0d1547f927d206336139f5efc46b3b346359
| 5,184 |
py
|
Python
|
wx_jump_py3.py
|
williamfzc/wx_jump
|
499800c00ff8fd4dbc02cb0e2948703d03f85d62
|
[
"MIT"
] | 70 |
2017-12-30T12:13:18.000Z
|
2020-01-16T07:41:33.000Z
|
wx_jump_py3.py
|
williamfzc/wx_jump
|
499800c00ff8fd4dbc02cb0e2948703d03f85d62
|
[
"MIT"
] | 3 |
2017-12-31T02:16:51.000Z
|
2018-01-16T08:58:57.000Z
|
wx_jump_py3.py
|
williamfzc/wx_jump
|
499800c00ff8fd4dbc02cb0e2948703d03f85d62
|
[
"MIT"
] | 26 |
2017-12-30T14:45:45.000Z
|
2022-03-07T11:21:36.000Z
|
from PIL import Image, ImageFilter, ImageDraw
import os
import numpy as np
import time
import random
# 该数值为1080x1920上的,可能需要微调
# 调整方法:
# 先把下方的DEVICE_SCREEN参数改成你手机的分辨率
# 用adb命令 ‘adb shell input swipe 580 1600 580 1600 XXX’ 尝试跳第一个点,记录准确的XXX
# DISTANCE_ARG 为 XXX 除以 跳第一个点算出来的距离
# 例如,我运行第一遍算出来的distance为562.5,记录到的XXX为720
# 那么此处的DISTANCE_ARG 为 720/562.5 = 1.28
# 还没在很多机型上试过,后期会将该过程封装起来,目前大概是这么调整
DISTANCE_ARG = 1.3925
# 设备型号
DEVICE_SCREEN = (1080, 1920)
# 每次跳的停等时间,如果前期纪录较低建议设为2以防止“超越”字样的影响
WAIT_TIME = 2
# ----------------------------------------------------------
# 临时文件位置
TEMP_FILE_PATH = 'temp.png'
# 棋子底端中心点到棋子边缘的距离
CHESS_WIDTH = int(DEVICE_SCREEN[0] * 0.032407)
# 屏蔽区域厚度
IGNORE_HEIGHT = (int(DEVICE_SCREEN[1] / 4), int(DEVICE_SCREEN[1] / 2))
# 棋子的RGB数值,可能因为设备不同有偏差,可能需要微调
SELF_RGB = (62, 56, 79)
def get_pic(_pic_path: '临时图片路径'):
""" 从设备中获取截图 """
os.system('adb shell screencap -p /sdcard/wx.png')
os.system('adb pull /sdcard/wx.png {}'.format(_pic_path))
def calculate_time(dis: '距离'):
""" 根据距离计算时间 """
_result = int(dis * DISTANCE_ARG)
return _result if _result > 200 else 200
def get_distance(point1, point2):
""" 计算两点间距离 """
draw = ImageDraw.Draw(Image.open('temp.png'))
draw.arc((point2[0], point2[1], point2[0] + 20, point2[1] + 20), 0, 360, fill=150)
return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) ** 0.5
def get_self_position(_img_path: '临时图片路径'):
""" 获取自身位置 """
_img = Image.open(_img_path)
point_list = list()
for x in range(DEVICE_SCREEN[0]):
for y in range(DEVICE_SCREEN[1]):
each_point = _img.getpixel((x, y))
if rgb_compare(SELF_RGB, each_point):
point_list.append((x, y))
return point_list[-1][0]-CHESS_WIDTH, point_list[-1][1]
def rgb_compare(a, b):
""" 两个RGB点的比较 """
for i in range(3):
if abs(a[i] - b[i]) >= 5:
return False
else:
return True
def get_des_position(_img_path: '临时图片路径', _self_point: '起始点坐标'):
""" 获取目标点位置 """
_img = Image.open(_img_path)
# 两次边缘检测
_img = _img.filter(ImageFilter.FIND_EDGES)
_img = _img.filter(ImageFilter.FIND_EDGES)
# 2 value
_img = _img.convert('1')
_img.save('temp1.png')
# 排除顶端的干扰
_img = np.array(_img)[IGNORE_HEIGHT[0]:]
# 按行扫描图片
for index, each in enumerate(_img):
old_line = _img[index-1]
# 如果有变化说明检测到顶端
if (each[1:-1] - old_line[1:-1]).any():
# black line
if any(map(lambda x: list(x).count(True) > int(len(each)/2), (each, old_line))):
continue
else:
des_x = _get_des_x(each, old_line)
des_y = _get_des_y(index, des_x, _img)
if abs(des_x - self_point[0]) < CHESS_WIDTH * 2:
continue
else:
break
else:
raise ValueError('Something error.')
return des_x, des_y
def _get_des_x(line1, line2):
""" 获取目标点横坐标,通过前后行的差异匹配 """
for i, a in enumerate(zip(line1[1:-1], line2[1:-1])):
if a[0] != a[1]:
return i + 1
else:
raise ValueError('Nothing different.')
def _get_des_y(_cur_row: '目标顶端所在行', _des_x: '目标点横坐标', _img: '图片矩阵'):
""" 目标顶端从上往下扫描,如果右侧边缘不继续递增说明到达边界 """
_rows = _img[_cur_row:]
_des_x += list(_rows[0][_des_x::]).index(False)
for row_num, each_row in enumerate(_rows[1:]):
_next = list(_rows[row_num+1][_des_x:]).index(True) if True in list(_rows[row_num+1][_des_x:]) else 0
if _next > 15:
_next = list(_rows[row_num+2][_des_x:]).index(True)
if _next > 15:
return row_num + IGNORE_HEIGHT[0] + _cur_row + 1
else:
_des_x += _next
elif _next == 0:
_des_x += 1
else:
_des_x += _next
if _des_x >= DEVICE_SCREEN[0]:
return row_num + IGNORE_HEIGHT[0] + _cur_row + 1
else:
raise ValueError('NO DES POINT FOUND.')
def print_log(_self_point, _des_point, _distance, _t):
""" 打印计算结果方便调试 """
print('self location: {}, {}'.format(_self_point[0], _self_point[1]))
print('des location: {}, {}'.format(_des_point[0], _des_point[1]))
print('x distance: {}'.format(_distance))
print('press time: {}'.format(_t))
def apply_to_adb(_t: '长按时间'):
""" 用adb操作手机 """
r_x, r_y = random.uniform(DEVICE_SCREEN[0]/2, DEVICE_SCREEN[0]/2 + 100), \
random.uniform(DEVICE_SCREEN[1]/6, DEVICE_SCREEN[1]/6 - 100)
os.system('adb shell input swipe {} {} {} {} {}'.format(r_x, r_y, r_x, r_y, _t))
time.sleep(WAIT_TIME + random.random())
if __name__ == '__main__':
while True:
# get screen pic
get_pic(TEMP_FILE_PATH)
# get self location
self_point = get_self_position(TEMP_FILE_PATH)
# get des location
des_point = get_des_position(TEMP_FILE_PATH, self_point)
# get distance
distance = get_distance(self_point, des_point)
# cal press time
t = calculate_time(distance)
# print log
print_log(self_point, des_point, distance, t)
# DO
apply_to_adb(t)
| 29.454545 | 109 | 0.5951 |
4a1a0d33593aaa096566f54f51a4995ff0c2ec70
| 24,182 |
py
|
Python
|
python/taichi/lang/__init__.py
|
zhiyaluo/taichi
|
3c48f4f3ba43737761a5492c4aba298abff9f0dc
|
[
"MIT"
] | null | null | null |
python/taichi/lang/__init__.py
|
zhiyaluo/taichi
|
3c48f4f3ba43737761a5492c4aba298abff9f0dc
|
[
"MIT"
] | null | null | null |
python/taichi/lang/__init__.py
|
zhiyaluo/taichi
|
3c48f4f3ba43737761a5492c4aba298abff9f0dc
|
[
"MIT"
] | null | null | null |
from .impl import *
from .util import deprecated
from .matrix import Matrix, Vector
from .transformer import TaichiSyntaxError
from .ndrange import ndrange, GroupedNDRange
from copy import deepcopy as _deepcopy
import functools
import os
core = taichi_lang_core
runtime = get_runtime()
i = indices(0)
j = indices(1)
k = indices(2)
l = indices(3)
ij = indices(0, 1)
ji = indices(1, 0)
jk = indices(1, 2)
kj = indices(2, 1)
ik = indices(0, 2)
ki = indices(2, 0)
ijk = indices(0, 1, 2)
ijkl = indices(0, 1, 2, 3)
outer_product = deprecated('ti.outer_product(a, b)',
'a.outer_product(b)')(Matrix.outer_product)
cross = deprecated('ti.cross(a, b)', 'a.cross(b)')(Matrix.cross)
dot = deprecated('ti.dot(a, b)', 'a.dot(b)')(Matrix.dot)
normalized = deprecated('ti.normalized(a)',
'a.normalized()')(Matrix.normalized)
cfg = default_cfg()
current_cfg = current_cfg()
x86_64 = core.x64
x64 = core.x64
arm64 = core.arm64
cuda = core.cuda
metal = core.metal
opengl = core.opengl
cc = core.cc
gpu = [cuda, metal, opengl]
cpu = core.host_arch()
kernel_profiler_print = lambda: get_runtime().prog.kernel_profiler_print()
kernel_profiler_clear = lambda: get_runtime().prog.kernel_profiler_clear()
kernel_profiler_total_time = lambda: get_runtime(
).prog.kernel_profiler_total_time()
# Unstable API
type_factory_ = core.get_type_factory_instance()
def memory_profiler_print():
get_runtime().materialize()
get_runtime().prog.print_memory_profiler_info()
extension = core.Extension
is_extension_supported = core.is_extension_supported
def reset():
from .impl import reset as impl_reset
impl_reset()
global runtime
runtime = get_runtime()
class _EnvironmentConfigurator:
def __init__(self, kwargs, cfg):
self.cfg = cfg
self.kwargs = kwargs
self.keys = []
def add(self, key, cast=None):
cast = cast or self.bool_int
self.keys.append(key)
# TI_ASYNC= : no effect
# TI_ASYNC=0 : False
# TI_ASYNC=1 : True
name = 'TI_' + key.upper()
value = os.environ.get(name, '')
if len(value):
self[key] = cast(value)
if key in self.kwargs:
core.warn(
f'ti.init argument "{key}" overridden by environment variable {name}={value}'
)
del self.kwargs[key] # mark as recognized
elif key in self.kwargs:
self[key] = self.kwargs[key]
del self.kwargs[key] # mark as recognized
def __getitem__(self, key):
return getattr(self.cfg, key)
def __setitem__(self, key, value):
setattr(self.cfg, key, value)
@staticmethod
def bool_int(x):
return bool(int(x))
class _SpecialConfig:
# like CompileConfig in C++, this is the configurations that belong to other submodules
def __init__(self):
self.print_preprocessed = False
self.log_level = 'info'
self.gdb_trigger = False
self.excepthook = False
def init(arch=None,
default_fp=None,
default_ip=None,
_test_mode=False,
**kwargs):
import taichi as ti
# Make a deepcopy in case these args reference to items from ti.cfg, which are
# actually references. If no copy is made and the args are indeed references,
# ti.reset() could override the args to their default values.
default_fp = _deepcopy(default_fp)
default_ip = _deepcopy(default_ip)
kwargs = _deepcopy(kwargs)
ti.reset()
spec_cfg = _SpecialConfig()
env_comp = _EnvironmentConfigurator(kwargs, ti.cfg)
env_spec = _EnvironmentConfigurator(kwargs, spec_cfg)
# configure default_fp/ip:
# TODO: move these stuff to _SpecialConfig too:
env_default_fp = os.environ.get("TI_DEFAULT_FP")
if env_default_fp:
if default_fp is not None:
core.warn(
f'ti.init argument "default_fp" overridden by environment variable TI_DEFAULT_FP={env_default_fp}'
)
if env_default_fp == '32':
default_fp = f32
elif env_default_fp == '64':
default_fp = f64
elif env_default_fp is not None:
raise ValueError(
f'Invalid TI_DEFAULT_FP={env_default_fp}, should be 32 or 64')
env_default_ip = os.environ.get("TI_DEFAULT_IP")
if env_default_ip:
if default_ip is not None:
core.warn(
f'ti.init argument "default_ip" overridden by environment variable TI_DEFAULT_IP={env_default_ip}'
)
if env_default_ip == '32':
default_ip = i32
elif env_default_ip == '64':
default_ip = i64
elif env_default_ip is not None:
raise ValueError(
f'Invalid TI_DEFAULT_IP={env_default_ip}, should be 32 or 64')
if default_fp is not None:
ti.get_runtime().set_default_fp(default_fp)
if default_ip is not None:
ti.get_runtime().set_default_ip(default_ip)
# submodule configurations (spec_cfg):
env_spec.add('print_preprocessed')
env_spec.add('log_level', str)
env_spec.add('gdb_trigger')
env_spec.add('excepthook')
# compiler configurations (ti.cfg):
for key in dir(ti.cfg):
if key in ['arch', 'default_fp', 'default_ip']:
continue
cast = type(getattr(ti.cfg, key))
if cast is bool:
cast = None
env_comp.add(key, cast)
unexpected_keys = kwargs.keys()
if len(unexpected_keys):
raise KeyError(
f'Unrecognized keyword argument(s) for ti.init: {", ".join(unexpected_keys)}'
)
# dispatch configurations that are not in ti.cfg:
if not _test_mode:
ti.set_gdb_trigger(spec_cfg.gdb_trigger)
ti.get_runtime().print_preprocessed = spec_cfg.print_preprocessed
ti.set_logging_level(spec_cfg.log_level.lower())
if spec_cfg.excepthook:
# TODO(#1405): add a way to restore old excepthook
ti.enable_excepthook()
# select arch (backend):
env_arch = os.environ.get('TI_ARCH')
if env_arch is not None:
ti.info(f'Following TI_ARCH setting up for arch={env_arch}')
arch = ti.core.arch_from_name(env_arch)
ti.cfg.arch = adaptive_arch_select(arch)
print(f'[Taichi] Starting on arch={ti.core.arch_name(ti.cfg.arch)}')
if _test_mode:
return spec_cfg
# create a new program:
ti.get_runtime().create_program()
def no_activate(*args):
for v in args:
taichi_lang_core.no_activate(v.snode.ptr)
def cache_shared(*args):
for a in args:
for v in a.get_field_members():
taichi_lang_core.insert_snode_access_flag(
taichi_lang_core.SNodeAccessFlag.block_local, v.ptr)
def cache_read_only(*args):
for a in args:
for v in a.get_field_members():
taichi_lang_core.insert_snode_access_flag(
taichi_lang_core.SNodeAccessFlag.read_only, v.ptr)
def assume_in_range(val, base, low, high):
return taichi_lang_core.expr_assume_in_range(
Expr(val).ptr,
Expr(base).ptr, low, high)
def loop_unique(val):
return taichi_lang_core.expr_loop_unique(Expr(val).ptr)
parallelize = core.parallelize
serialize = lambda: parallelize(1)
vectorize = core.vectorize
block_dim = core.block_dim
inversed = deprecated('ti.inversed(a)', 'a.inverse()')(Matrix.inversed)
transposed = deprecated('ti.transposed(a)', 'a.transpose()')(Matrix.transposed)
def polar_decompose(A, dt=None):
if dt is None:
dt = get_runtime().default_fp
from .linalg import polar_decompose
return polar_decompose(A, dt)
def svd(A, dt=None):
if dt is None:
dt = get_runtime().default_fp
from .linalg import svd
return svd(A, dt)
determinant = deprecated('ti.determinant(a)',
'a.determinant()')(Matrix.determinant)
tr = deprecated('ti.tr(a)', 'a.trace()')(Matrix.trace)
def Tape(loss, clear_gradients=True):
get_runtime().materialize()
if len(loss.shape) != 0:
raise RuntimeError(
'The loss of `Tape` must be a 0-D field, i.e. scalar')
if not loss.snode.ptr.has_grad():
raise RuntimeError(
'Gradients of loss are not allocated, please use ti.field(..., needs_grad=True)'
' for all fields that are required by autodiff.')
if clear_gradients:
clear_all_gradients()
from .meta import clear_loss
clear_loss(loss)
return runtime.get_tape(loss)
def clear_all_gradients():
get_runtime().materialize()
import taichi as ti
def visit(node):
places = []
for i in range(node.ptr.get_num_ch()):
ch = node.ptr.get_ch(i)
if not ch.is_place():
visit(SNode(ch))
else:
if not ch.is_primal():
places.append(ch.get_expr())
places = tuple(places)
if places:
from .meta import clear_gradients
clear_gradients(places)
visit(ti.root)
lang_core = core
def benchmark(func, repeat=300, args=()):
import taichi as ti
import time
def run_benchmark():
compile_time = time.time()
func(*args) # compile the kernel first
ti.sync()
compile_time = time.time() - compile_time
ti.stat_write('compilation_time', compile_time)
codegen_stat = ti.core.stat()
for line in codegen_stat.split('\n'):
try:
a, b = line.strip().split(':')
except:
continue
a = a.strip()
b = int(float(b))
if a == 'codegen_kernel_statements':
ti.stat_write('compiled_inst', b)
if a == 'codegen_offloaded_tasks':
ti.stat_write('compiled_tasks', b)
elif a == 'launched_tasks':
ti.stat_write('launched_tasks', b)
# Use 3 initial iterations to warm up
# instruction/data caches. Discussion:
# https://github.com/taichi-dev/taichi/pull/1002#discussion_r426312136
for i in range(3):
func(*args)
ti.sync()
ti.kernel_profiler_clear()
t = time.time()
for n in range(repeat):
func(*args)
ti.sync()
elapsed = time.time() - t
avg = elapsed / repeat
ti.stat_write('wall_clk_t', avg)
device_time = ti.kernel_profiler_total_time()
ti.stat_write('exec_t', device_time)
run_benchmark()
def benchmark_plot(fn=None,
cases=None,
columns=None,
archs=None,
title=None,
bars='sync_vs_async',
bar_width=0.4,
bar_distance=0,
left_margin=0):
import taichi as ti
import yaml
import matplotlib.pyplot as plt
if fn is None:
fn = os.path.join(ti.core.get_repo_dir(), 'benchmarks', 'output',
'benchmark.yml')
with open(fn, 'r') as f:
data = yaml.load(f, Loader=yaml.SafeLoader)
if bars != 'sync_vs_async': # need baseline
baseline_dir = os.path.join(ti.core.get_repo_dir(), 'benchmarks',
'baseline')
baseline_file = f'{baseline_dir}/benchmark.yml'
with open(baseline_file, 'r') as f:
baseline_data = yaml.load(f, Loader=yaml.SafeLoader)
if cases is None:
cases = list(data.keys())
assert len(cases) >= 1
if len(cases) == 1:
cases = [cases[0], cases[0]]
ti.warning(
'Function benchmark_plot does not support plotting with only one case for now. Duplicating the item to move on.'
)
if columns is None:
columns = list(data[cases[0]].keys())
normalize_to_lowest = lambda x: True
figure, subfigures = plt.subplots(len(cases), len(columns))
if title is None:
title = 'Taichi Performance Benchmarks (Higher means more)'
figure.suptitle(title, fontweight="bold")
for col_id in range(len(columns)):
subfigures[0][col_id].set_title(columns[col_id])
for case_id in range(len(cases)):
case = cases[case_id]
subfigures[case_id][0].annotate(
case,
xy=(0, 0.5),
xytext=(-subfigures[case_id][0].yaxis.labelpad - 5, 0),
xycoords=subfigures[case_id][0].yaxis.label,
textcoords='offset points',
size='large',
ha='right',
va='center')
for col_id in range(len(columns)):
col = columns[col_id]
if archs is None:
current_archs = data[case][col].keys()
else:
current_archs = archs & data[case][col].keys()
if bars == 'sync_vs_async':
y_left = [
data[case][col][arch]['sync'] for arch in current_archs
]
label_left = 'sync'
y_right = [
data[case][col][arch]['async'] for arch in current_archs
]
label_right = 'async'
elif bars == 'sync_regression':
y_left = [
baseline_data[case][col][arch]['sync']
for arch in current_archs
]
label_left = 'before'
y_right = [
data[case][col][arch]['sync'] for arch in current_archs
]
label_right = 'after'
elif bars == 'async_regression':
y_left = [
baseline_data[case][col][arch]['async']
for arch in current_archs
]
label_left = 'before'
y_right = [
data[case][col][arch]['async'] for arch in current_archs
]
label_right = 'after'
else:
raise RuntimeError('Unknown bars type')
if normalize_to_lowest(col):
for i in range(len(current_archs)):
maximum = max(y_left[i], y_right[i])
y_left[i] = y_left[i] / maximum if y_left[i] != 0 else 1
y_right[i] = y_right[i] / maximum if y_right[i] != 0 else 1
ax = subfigures[case_id][col_id]
bar_left = ax.bar(x=[
i - bar_width / 2 - bar_distance / 2
for i in range(len(current_archs))
],
height=y_left,
width=bar_width,
label=label_left,
color=(0.3, 0.7, 0.9, 1.0))
bar_right = ax.bar(x=[
i + bar_width / 2 + bar_distance / 2
for i in range(len(current_archs))
],
height=y_right,
width=bar_width,
label=label_right,
color=(0.8, 0.2, 0.3, 1.0))
ax.set_xticks(range(len(current_archs)))
ax.set_xticklabels(current_archs)
figure.legend((bar_left, bar_right), (label_left, label_right),
loc='lower center')
figure.subplots_adjust(left=left_margin)
fig = plt.gcf()
fig.set_size_inches(13, 8)
plt.show()
def stat_write(key, value):
import taichi as ti
import yaml
case_name = os.environ.get('TI_CURRENT_BENCHMARK')
if case_name is None:
return
if case_name.startswith('benchmark_'):
case_name = case_name[10:]
arch_name = core.arch_name(ti.cfg.arch)
async_mode = 'async' if ti.cfg.async_mode else 'sync'
output_dir = os.environ.get('TI_BENCHMARK_OUTPUT_DIR', '.')
filename = f'{output_dir}/benchmark.yml'
try:
with open(filename, 'r') as f:
data = yaml.load(f, Loader=yaml.SafeLoader)
except FileNotFoundError:
data = {}
data.setdefault(case_name, {})
data[case_name].setdefault(key, {})
data[case_name][key].setdefault(arch_name, {})
data[case_name][key][arch_name][async_mode] = value
with open(filename, 'w') as f:
yaml.dump(data, f, Dumper=yaml.SafeDumper)
def is_arch_supported(arch):
arch_table = {
cuda: core.with_cuda,
metal: core.with_metal,
opengl: core.with_opengl,
cc: core.with_cc,
cpu: lambda: True
}
with_arch = arch_table.get(arch, lambda: False)
try:
return with_arch()
except Exception as e:
arch = core.arch_name(arch)
core.warn(
f"{e.__class__.__name__}: '{e}' occurred when detecting "
f"{arch}, consider add `export TI_WITH_{arch.upper()}=0` "
f" to environment variables to depress this warning message.")
return False
def supported_archs():
archs = [cpu, cuda, metal, opengl, cc]
wanted_archs = os.environ.get('TI_WANTED_ARCHS', '')
want_exclude = wanted_archs.startswith('^')
if want_exclude:
wanted_archs = wanted_archs[1:]
wanted_archs = wanted_archs.split(',')
# Note, ''.split(',') gives you [''], which is not an empty array.
wanted_archs = list(filter(lambda x: x != '', wanted_archs))
if len(wanted_archs):
archs, old_archs = [], archs
for arch in old_archs:
if want_exclude == (core.arch_name(arch) not in wanted_archs):
archs.append(arch)
archs, old_archs = [], archs
for arch in old_archs:
if is_arch_supported(arch):
archs.append(arch)
return archs
def adaptive_arch_select(arch):
if arch is None:
return cpu
import taichi as ti
if not isinstance(arch, (list, tuple)):
arch = [arch]
for a in arch:
if is_arch_supported(a):
return a
ti.warn(f'Arch={arch} is not supported, falling back to CPU')
return cpu
class _ArchCheckers(object):
def __init__(self):
self._checkers = []
def register(self, c):
self._checkers.append(c)
def __call__(self, arch):
assert isinstance(arch, core.Arch)
return all([c(arch) for c in self._checkers])
_tests_arch_checkers_argname = '_tests_arch_checkers'
def _get_or_make_arch_checkers(kwargs):
k = _tests_arch_checkers_argname
if k not in kwargs:
kwargs[k] = _ArchCheckers()
return kwargs[k]
# test with all archs
def all_archs_with(**kwargs):
kwargs = _deepcopy(kwargs)
def decorator(test):
# @pytest.mark.parametrize decorator only knows about regular function args,
# without *args or **kwargs. By decorating with @functools.wraps, the
# signature of |test| is preserved, so that @ti.all_archs can be used after
# the parametrization decorator.
#
# Full discussion: https://github.com/pytest-dev/pytest/issues/6810
@functools.wraps(test)
def wrapped(*test_args, **test_kwargs):
import taichi as ti
can_run_on = test_kwargs.pop(_tests_arch_checkers_argname,
_ArchCheckers())
# Filter away archs that don't support 64-bit data.
fp = kwargs.get('default_fp', ti.f32)
ip = kwargs.get('default_ip', ti.i32)
if fp == ti.f64 or ip == ti.i64:
can_run_on.register(lambda arch: is_extension_supported(
arch, extension.data64))
for arch in ti.supported_archs():
if can_run_on(arch):
print('Running test on arch={}'.format(arch))
ti.init(arch=arch, **kwargs)
test(*test_args, **test_kwargs)
else:
print('Skipped test on arch={}'.format(arch))
return wrapped
return decorator
# test with all archs
def all_archs(test):
return all_archs_with()(test)
# Exclude the given archs when running the tests
#
# Example usage:
#
# @ti.archs_excluding(ti.cuda, ti.metal)
# def test_xx():
# ...
#
# @ti.archs_excluding(ti.cuda, default_fp=ti.f64)
# def test_yy():
# ...
def archs_excluding(*excluded_archs, **kwargs):
# |kwargs| will be passed to all_archs_with(**kwargs)
assert all([isinstance(a, core.Arch) for a in excluded_archs])
excluded_archs = set(excluded_archs)
def decorator(test):
@functools.wraps(test)
def wrapped(*test_args, **test_kwargs):
def checker(arch):
return arch not in excluded_archs
_get_or_make_arch_checkers(test_kwargs).register(checker)
return all_archs_with(**kwargs)(test)(*test_args, **test_kwargs)
return wrapped
return decorator
# Specifies the extension features the archs are required to support in order
# to run the test.
#
# Example usage:
#
# @ti.require(ti.extension.data64)
# @ti.all_archs_with(default_fp=ti.f64)
# def test_xx():
# ...
def require(*exts):
# Because this decorator injects an arch checker, its usage must be followed
# with all_archs_with(), either directly or indirectly.
assert all([isinstance(e, core.Extension) for e in exts])
def decorator(test):
@functools.wraps(test)
def wrapped(*test_args, **test_kwargs):
def checker(arch):
return all([is_extension_supported(arch, e) for e in exts])
_get_or_make_arch_checkers(test_kwargs).register(checker)
test(*test_args, **test_kwargs)
return wrapped
return decorator
def archs_support_sparse(test, **kwargs):
wrapped = all_archs_with(**kwargs)(test)
return require(extension.sparse)(wrapped)
def torch_test(func):
import taichi as ti
if ti.has_pytorch():
# OpenGL somehow crashes torch test without a reason, unforturnately
return ti.archs_excluding(ti.opengl)(func)
else:
return lambda: None
# test with host arch only
def host_arch_only(func):
import taichi as ti
@functools.wraps(func)
def test(*args, **kwargs):
archs = [ti.core.host_arch()]
for arch in archs:
ti.init(arch=arch)
func(*args, **kwargs)
return test
def archs_with(archs, **init_kwags):
"""
Run the test on the given archs with the given init args.
Args:
archs: a list of Taichi archs
init_kwargs: kwargs passed to ti.init()
"""
import taichi as ti
def decorator(test):
@functools.wraps(test)
def wrapped(*test_args, **test_kwargs):
for arch in archs:
ti.init(arch=arch, **init_kwags)
test(*test_args, **test_kwargs)
return wrapped
return decorator
def must_throw(ex):
def decorator(func):
def func__(*args, **kwargs):
finishes = False
try:
host_arch_only(func)(*args, **kwargs)
finishes = True
except ex:
# throws. test passed
pass
except Exception as err_actual:
assert False, 'Exception {} instead of {} thrown'.format(
str(type(err_actual)), str(ex))
if finishes:
assert False, 'Test successfully finished instead of throwing {}'.format(
str(ex))
return func__
return decorator
def complex_kernel(func):
def decorated(*args, **kwargs):
get_runtime().inside_complex_kernel = True
if get_runtime().target_tape:
get_runtime().target_tape.insert(decorated, args)
try:
func(*args, **kwargs)
finally:
get_runtime().inside_complex_kernel = False
decorated.grad = None
return decorated
def complex_kernel_grad(primal):
def decorator(func):
def decorated(*args, **kwargs):
func(*args, **kwargs)
primal.grad = decorated
return decorated
return decorator
def sync():
get_runtime().sync()
__all__ = [s for s in dir() if not s.startswith('_')]
| 30.532828 | 124 | 0.592465 |
4a1a0d72d4350b7d5615dd47e5cfb3d8c081b388
| 15,801 |
py
|
Python
|
tests/storage/test_mongo_adapter.py
|
macdaliot/ChatterBot
|
8eaab0865158aeb81a2deb4ca9d48d4ae8b483a3
|
[
"BSD-3-Clause"
] | 1 |
2020-08-13T02:46:51.000Z
|
2020-08-13T02:46:51.000Z
|
tests/storage/test_mongo_adapter.py
|
macdaliot/ChatterBot
|
8eaab0865158aeb81a2deb4ca9d48d4ae8b483a3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/storage/test_mongo_adapter.py
|
macdaliot/ChatterBot
|
8eaab0865158aeb81a2deb4ca9d48d4ae8b483a3
|
[
"BSD-3-Clause"
] | 1 |
2019-04-20T09:54:28.000Z
|
2019-04-20T09:54:28.000Z
|
from unittest import TestCase
from chatterbot.storage import MongoDatabaseAdapter
from chatterbot.conversation import Statement
class MongoAdapterTestCase(TestCase):
@classmethod
def setUpClass(cls):
"""
Instantiate the adapter before any tests in the test case run.
"""
from pymongo.errors import ServerSelectionTimeoutError
from pymongo import MongoClient
cls.has_mongo_connection = False
try:
client = MongoClient(
serverSelectionTimeoutMS=0.1
)
client.server_info()
cls.adapter = MongoDatabaseAdapter(
database_uri='mongodb://localhost:27017/chatterbot_test_database'
)
cls.has_mongo_connection = True
except ServerSelectionTimeoutError:
pass
def setUp(self):
"""
Skip these tests if a mongo client is not running.
"""
if not self.has_mongo_connection:
self.skipTest('Unable to connect to mongo database.')
def tearDown(self):
"""
Remove the test database.
"""
self.adapter.drop()
class MongoDatabaseAdapterTestCase(MongoAdapterTestCase):
def test_count_returns_zero(self):
"""
The count method should return a value of 0
when nothing has been saved to the database.
"""
self.assertEqual(self.adapter.count(), 0)
def test_count_returns_value(self):
"""
The count method should return a value of 1
when one item has been saved to the database.
"""
self.adapter.create(text="Test statement")
self.assertEqual(self.adapter.count(), 1)
def test_filter_text_statement_not_found(self):
"""
Test that None is returned by the find method
when a matching statement is not found.
"""
results = list(self.adapter.filter(text='Non-existant'))
self.assertEqual(len(results), 0)
def test_filter_text_statement_found(self):
"""
Test that a matching statement is returned
when it exists in the database.
"""
self.adapter.create(text='New statement')
results = list(self.adapter.filter(text='New statement'))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'New statement')
def test_update_adds_new_statement(self):
self.adapter.create(text='New statement')
results = list(self.adapter.filter(text='New statement'))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'New statement')
def test_update_modifies_existing_statement(self):
statement = Statement(text="New statement")
self.adapter.update(statement)
# Check the initial values
results = list(self.adapter.filter(text=statement.text))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].in_response_to, None)
# Update the statement value
statement.in_response_to = "New response"
self.adapter.update(statement)
# Check that the values have changed
results = list(self.adapter.filter(text=statement.text))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].in_response_to, "New response")
def test_get_random_returns_statement(self):
text = "New statement"
self.adapter.create(text=text)
random_statement = self.adapter.get_random()
self.assertEqual(random_statement.text, text)
def test_mongo_to_object(self):
self.adapter.create(text='Hello', in_response_to='Hi')
statement_data = self.adapter.statements.find_one({'text': 'Hello'})
obj = self.adapter.mongo_to_object(statement_data)
self.assertEqual(type(obj), Statement)
self.assertEqual(obj.text, 'Hello')
self.assertEqual(obj.in_response_to, 'Hi')
self.assertEqual(obj.id, statement_data['_id'])
def test_remove(self):
text = "Sometimes you have to run before you can walk."
self.adapter.create(text=text)
self.adapter.remove(text)
results = list(self.adapter.filter(text=text))
self.assertEqual(results, [])
def test_remove_response(self):
text = "Sometimes you have to run before you can walk."
self.adapter.create(text='', in_response_to=text)
self.adapter.remove(text)
results = list(self.adapter.filter(text=text))
self.assertEqual(results, [])
class MongoAdapterFilterTestCase(MongoAdapterTestCase):
def test_filter_text_no_matches(self):
self.adapter.create(
text='Testing...',
in_response_to='Why are you counting?'
)
results = list(self.adapter.filter(text='Howdy'))
self.assertEqual(len(results), 0)
def test_filter_in_response_to_no_matches(self):
self.adapter.create(
text='Testing...',
in_response_to='Why are you counting?'
)
results = list(self.adapter.filter(in_response_to='Maybe'))
self.assertEqual(len(results), 0)
def test_filter_equal_results(self):
statement1 = Statement(
text="Testing...",
in_response_to=[]
)
statement2 = Statement(
text="Testing one, two, three.",
in_response_to=[]
)
self.adapter.update(statement1)
self.adapter.update(statement2)
results = list(self.adapter.filter(in_response_to=[]))
self.assertEqual(len(results), 2)
self.assertIn(statement1, results)
self.assertIn(statement2, results)
def test_filter_no_parameters(self):
"""
If no parameters are passed to the filter,
then all statements should be returned.
"""
self.adapter.create(text="Testing...")
self.adapter.create(text="Testing one, two, three.")
results = list(self.adapter.filter())
self.assertEqual(len(results), 2)
def test_filter_in_response_to(self):
self.adapter.create(text="A", in_response_to="Yes")
self.adapter.create(text="B", in_response_to="No")
results = list(self.adapter.filter(
in_response_to="Yes"
))
# Get the first response
response = results[0]
self.assertEqual(len(results), 1)
self.assertEqual(response.in_response_to, "Yes")
def test_filter_by_tag(self):
self.adapter.create(text="Hello!", tags=["greeting", "salutation"])
self.adapter.create(text="Hi everyone!", tags=["greeting", "exclamation"])
self.adapter.create(text="The air contains Oxygen.", tags=["fact"])
results = list(self.adapter.filter(tags=["greeting"]))
results_text_list = [statement.text for statement in results]
self.assertEqual(len(results_text_list), 2)
self.assertIn("Hello!", results_text_list)
self.assertIn("Hi everyone!", results_text_list)
def test_filter_by_tags(self):
self.adapter.create(text="Hello!", tags=["greeting", "salutation"])
self.adapter.create(text="Hi everyone!", tags=["greeting", "exclamation"])
self.adapter.create(text="The air contains Oxygen.", tags=["fact"])
results = list(self.adapter.filter(
tags=["exclamation", "fact"]
))
results_text_list = [statement.text for statement in results]
self.assertEqual(len(results_text_list), 2)
self.assertIn("Hi everyone!", results_text_list)
self.assertIn("The air contains Oxygen.", results_text_list)
def test_exclude_text(self):
self.adapter.create(text='Hello!')
self.adapter.create(text='Hi everyone!')
results = list(self.adapter.filter(
exclude_text=[
'Hello!'
]
))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'Hi everyone!')
def test_exclude_text_words(self):
self.adapter.create(text='This is a good example.')
self.adapter.create(text='This is a bad example.')
self.adapter.create(text='This is a worse example.')
results = list(self.adapter.filter(
exclude_text_words=[
'bad', 'worse'
]
))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'This is a good example.')
def test_persona_not_startswith(self):
self.adapter.create(text='Hello!', persona='bot:tester')
self.adapter.create(text='Hi everyone!', persona='user:person')
results = list(self.adapter.filter(
persona_not_startswith='bot:'
))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'Hi everyone!')
def test_search_text_contains(self):
self.adapter.create(text='Hello!', search_text='hello exclamation')
self.adapter.create(text='Hi everyone!', search_text='hi everyone')
results = list(self.adapter.filter(
search_text_contains='everyone'
))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'Hi everyone!')
def test_search_text_contains_multiple_matches(self):
self.adapter.create(text='Hello!', search_text='hello exclamation')
self.adapter.create(text='Hi everyone!', search_text='hi everyone')
results = list(self.adapter.filter(
search_text_contains='hello everyone'
))
self.assertEqual(len(results), 2)
class MongoOrderingTestCase(MongoAdapterTestCase):
"""
Test cases for the ordering of sets of statements.
"""
def test_order_by_text(self):
statement_a = Statement(text='A is the first letter of the alphabet.')
statement_b = Statement(text='B is the second letter of the alphabet.')
self.adapter.update(statement_b)
self.adapter.update(statement_a)
results = list(self.adapter.filter(order_by=['text']))
self.assertEqual(len(results), 2)
self.assertEqual(results[0], statement_a)
self.assertEqual(results[1], statement_b)
def test_order_by_created_at(self):
from datetime import datetime, timedelta
today = datetime.now()
yesterday = datetime.now() - timedelta(days=1)
statement_a = Statement(
text='A is the first letter of the alphabet.',
created_at=today
)
statement_b = Statement(
text='B is the second letter of the alphabet.',
created_at=yesterday
)
self.adapter.update(statement_b)
self.adapter.update(statement_a)
results = list(self.adapter.filter(order_by=['created_at']))
self.assertEqual(len(results), 2)
self.assertEqual(results[0], statement_a)
self.assertEqual(results[1], statement_b)
class StorageAdapterCreateTestCase(MongoAdapterTestCase):
"""
Tests for the create function of the storage adapter.
"""
def test_create_text(self):
self.adapter.create(text='testing')
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'testing')
def test_create_search_text(self):
self.adapter.create(
text='testing',
search_text='test'
)
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].search_text, 'test')
def test_create_search_in_response_to(self):
self.adapter.create(
text='testing',
search_in_response_to='test'
)
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].search_in_response_to, 'test')
def test_create_tags(self):
self.adapter.create(text='testing', tags=['a', 'b'])
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertIn('a', results[0].get_tags())
self.assertIn('b', results[0].get_tags())
def test_create_duplicate_tags(self):
"""
The storage adapter should not create a statement with tags
that are duplicates.
"""
self.adapter.create(text='testing', tags=['ab', 'ab'])
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertEqual(len(results[0].get_tags()), 1)
self.assertEqual(results[0].get_tags(), ['ab'])
def test_create_many_text(self):
self.adapter.create_many([
Statement(text='A'),
Statement(text='B')
])
results = list(self.adapter.filter())
self.assertEqual(len(results), 2)
self.assertEqual(results[0].text, 'A')
self.assertEqual(results[1].text, 'B')
def test_create_many_search_text(self):
self.adapter.create_many([
Statement(text='A', search_text='a'),
Statement(text='B', search_text='b')
])
results = list(self.adapter.filter())
self.assertEqual(len(results), 2)
self.assertEqual(results[0].search_text, 'a')
self.assertEqual(results[1].search_text, 'b')
def test_create_many_search_in_response_to(self):
self.adapter.create_many([
Statement(text='A', search_in_response_to='a'),
Statement(text='B', search_in_response_to='b')
])
results = list(self.adapter.filter())
self.assertEqual(len(results), 2)
self.assertEqual(results[0].search_in_response_to, 'a')
self.assertEqual(results[1].search_in_response_to, 'b')
def test_create_many_tags(self):
self.adapter.create_many([
Statement(text='A', tags=['first', 'letter']),
Statement(text='B', tags=['second', 'letter'])
])
results = list(self.adapter.filter())
self.assertEqual(len(results), 2)
self.assertIn('letter', results[0].get_tags())
self.assertIn('letter', results[1].get_tags())
self.assertIn('first', results[0].get_tags())
self.assertIn('second', results[1].get_tags())
def test_create_many_duplicate_tags(self):
"""
The storage adapter should not create a statement with tags
that are duplicates.
"""
self.adapter.create_many([
Statement(text='testing', tags=['ab', 'ab'])
])
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertEqual(len(results[0].get_tags()), 1)
self.assertEqual(results[0].get_tags(), ['ab'])
class StorageAdapterUpdateTestCase(MongoAdapterTestCase):
"""
Tests for the update function of the storage adapter.
"""
def test_update_adds_tags(self):
statement = self.adapter.create(text='Testing')
statement.add_tags('a', 'b')
self.adapter.update(statement)
statements = list(self.adapter.filter())
self.assertEqual(len(statements), 1)
self.assertIn('a', statements[0].get_tags())
self.assertIn('b', statements[0].get_tags())
def test_update_duplicate_tags(self):
"""
The storage adapter should not update a statement with tags
that are duplicates.
"""
statement = self.adapter.create(text='Testing', tags=['ab'])
statement.add_tags('ab')
self.adapter.update(statement)
statements = list(self.adapter.filter())
self.assertEqual(len(statements), 1)
self.assertEqual(len(statements[0].get_tags()), 1)
self.assertEqual(statements[0].get_tags(), ['ab'])
| 32.05071 | 82 | 0.627049 |
4a1a0dcd4c3868560e1f17e6aff49dd21d299386
| 42,290 |
py
|
Python
|
lib/sqlalchemy/sql/operators.py
|
edelooff/sqlalchemy
|
97d2a2091ed4caee1e19168d0db39e4d94a6d12f
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/sql/operators.py
|
edelooff/sqlalchemy
|
97d2a2091ed4caee1e19168d0db39e4d94a6d12f
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/sql/operators.py
|
edelooff/sqlalchemy
|
97d2a2091ed4caee1e19168d0db39e4d94a6d12f
|
[
"MIT"
] | 1 |
2019-08-27T09:47:08.000Z
|
2019-08-27T09:47:08.000Z
|
# sql/operators.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines operators used in SQL expressions."""
from operator import add
from operator import and_
from operator import contains
from operator import eq
from operator import ge
from operator import getitem
from operator import gt
from operator import inv
from operator import le
from operator import lshift
from operator import lt
from operator import mod
from operator import mul
from operator import ne
from operator import neg
from operator import or_
from operator import rshift
from operator import sub
from operator import truediv
from .. import util
if util.py2k:
from operator import div
else:
div = truediv
class Operators(object):
"""Base of comparison and logical operators.
Implements base methods
:meth:`~sqlalchemy.sql.operators.Operators.operate` and
:meth:`~sqlalchemy.sql.operators.Operators.reverse_operate`, as well as
:meth:`~sqlalchemy.sql.operators.Operators.__and__`,
:meth:`~sqlalchemy.sql.operators.Operators.__or__`,
:meth:`~sqlalchemy.sql.operators.Operators.__invert__`.
Usually is used via its most common subclass
:class:`.ColumnOperators`.
"""
__slots__ = ()
def __and__(self, other):
"""Implement the ``&`` operator.
When used with SQL expressions, results in an
AND operation, equivalent to
:func:`~.expression.and_`, that is::
a & b
is equivalent to::
from sqlalchemy import and_
and_(a, b)
Care should be taken when using ``&`` regarding
operator precedence; the ``&`` operator has the highest precedence.
The operands should be enclosed in parenthesis if they contain
further sub expressions::
(a == 2) & (b == 4)
"""
return self.operate(and_, other)
def __or__(self, other):
"""Implement the ``|`` operator.
When used with SQL expressions, results in an
OR operation, equivalent to
:func:`~.expression.or_`, that is::
a | b
is equivalent to::
from sqlalchemy import or_
or_(a, b)
Care should be taken when using ``|`` regarding
operator precedence; the ``|`` operator has the highest precedence.
The operands should be enclosed in parenthesis if they contain
further sub expressions::
(a == 2) | (b == 4)
"""
return self.operate(or_, other)
def __invert__(self):
"""Implement the ``~`` operator.
When used with SQL expressions, results in a
NOT operation, equivalent to
:func:`~.expression.not_`, that is::
~a
is equivalent to::
from sqlalchemy import not_
not_(a)
"""
return self.operate(inv)
def op(
self, opstring, precedence=0, is_comparison=False, return_type=None
):
"""produce a generic operator function.
e.g.::
somecolumn.op("*")(5)
produces::
somecolumn * 5
This function can also be used to make bitwise operators explicit. For
example::
somecolumn.op('&')(0xff)
is a bitwise AND of the value in ``somecolumn``.
:param operator: a string which will be output as the infix operator
between this element and the expression passed to the
generated function.
:param precedence: precedence to apply to the operator, when
parenthesizing expressions. A lower number will cause the expression
to be parenthesized when applied against another operator with
higher precedence. The default value of ``0`` is lower than all
operators except for the comma (``,``) and ``AS`` operators.
A value of 100 will be higher or equal to all operators, and -100
will be lower than or equal to all operators.
:param is_comparison: if True, the operator will be considered as a
"comparison" operator, that is which evaluates to a boolean
true/false value, like ``==``, ``>``, etc. This flag should be set
so that ORM relationships can establish that the operator is a
comparison operator when used in a custom join condition.
.. versionadded:: 0.9.2 - added the
:paramref:`.Operators.op.is_comparison` flag.
:param return_type: a :class:`.TypeEngine` class or object that will
force the return type of an expression produced by this operator
to be of that type. By default, operators that specify
:paramref:`.Operators.op.is_comparison` will resolve to
:class:`.Boolean`, and those that do not will be of the same
type as the left-hand operand.
.. versionadded:: 1.2.0b3 - added the
:paramref:`.Operators.op.return_type` argument.
.. seealso::
:ref:`types_operators`
:ref:`relationship_custom_operator`
"""
operator = custom_op(opstring, precedence, is_comparison, return_type)
def against(other):
return operator(self, other)
return against
def bool_op(self, opstring, precedence=0):
"""Return a custom boolean operator.
This method is shorthand for calling
:meth:`.Operators.op` and passing the
:paramref:`.Operators.op.is_comparison`
flag with True.
.. versionadded:: 1.2.0b3
.. seealso::
:meth:`.Operators.op`
"""
return self.op(opstring, precedence=precedence, is_comparison=True)
def operate(self, op, *other, **kwargs):
r"""Operate on an argument.
This is the lowest level of operation, raises
:class:`NotImplementedError` by default.
Overriding this on a subclass can allow common
behavior to be applied to all operations.
For example, overriding :class:`.ColumnOperators`
to apply ``func.lower()`` to the left and right
side::
class MyComparator(ColumnOperators):
def operate(self, op, other):
return op(func.lower(self), func.lower(other))
:param op: Operator callable.
:param \*other: the 'other' side of the operation. Will
be a single scalar for most operations.
:param \**kwargs: modifiers. These may be passed by special
operators such as :meth:`ColumnOperators.contains`.
"""
raise NotImplementedError(str(op))
def reverse_operate(self, op, other, **kwargs):
"""Reverse operate on an argument.
Usage is the same as :meth:`operate`.
"""
raise NotImplementedError(str(op))
class custom_op(object):
"""Represent a 'custom' operator.
:class:`.custom_op` is normally instantiated when the
:meth:`.Operators.op` or :meth:`.Operators.bool_op` methods
are used to create a custom operator callable. The class can also be
used directly when programmatically constructing expressions. E.g.
to represent the "factorial" operation::
from sqlalchemy.sql import UnaryExpression
from sqlalchemy.sql import operators
from sqlalchemy import Numeric
unary = UnaryExpression(table.c.somecolumn,
modifier=operators.custom_op("!"),
type_=Numeric)
.. seealso::
:meth:`.Operators.op`
:meth:`.Operators.bool_op`
"""
__name__ = "custom_op"
def __init__(
self,
opstring,
precedence=0,
is_comparison=False,
return_type=None,
natural_self_precedent=False,
eager_grouping=False,
):
self.opstring = opstring
self.precedence = precedence
self.is_comparison = is_comparison
self.natural_self_precedent = natural_self_precedent
self.eager_grouping = eager_grouping
self.return_type = (
return_type._to_instance(return_type) if return_type else None
)
def __eq__(self, other):
return isinstance(other, custom_op) and other.opstring == self.opstring
def __hash__(self):
return id(self)
def __call__(self, left, right, **kw):
return left.operate(self, right, **kw)
class ColumnOperators(Operators):
"""Defines boolean, comparison, and other operators for
:class:`.ColumnElement` expressions.
By default, all methods call down to
:meth:`.operate` or :meth:`.reverse_operate`,
passing in the appropriate operator function from the
Python builtin ``operator`` module or
a SQLAlchemy-specific operator function from
:mod:`sqlalchemy.expression.operators`. For example
the ``__eq__`` function::
def __eq__(self, other):
return self.operate(operators.eq, other)
Where ``operators.eq`` is essentially::
def eq(a, b):
return a == b
The core column expression unit :class:`.ColumnElement`
overrides :meth:`.Operators.operate` and others
to return further :class:`.ColumnElement` constructs,
so that the ``==`` operation above is replaced by a clause
construct.
.. seealso::
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
:class:`.ColumnOperators`
:class:`.PropComparator`
"""
__slots__ = ()
timetuple = None
"""Hack, allows datetime objects to be compared on the LHS."""
def __lt__(self, other):
"""Implement the ``<`` operator.
In a column context, produces the clause ``a < b``.
"""
return self.operate(lt, other)
def __le__(self, other):
"""Implement the ``<=`` operator.
In a column context, produces the clause ``a <= b``.
"""
return self.operate(le, other)
__hash__ = Operators.__hash__
def __eq__(self, other):
"""Implement the ``==`` operator.
In a column context, produces the clause ``a = b``.
If the target is ``None``, produces ``a IS NULL``.
"""
return self.operate(eq, other)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a column context, produces the clause ``a != b``.
If the target is ``None``, produces ``a IS NOT NULL``.
"""
return self.operate(ne, other)
def is_distinct_from(self, other):
"""Implement the ``IS DISTINCT FROM`` operator.
Renders "a IS DISTINCT FROM b" on most platforms;
on some such as SQLite may render "a IS NOT b".
.. versionadded:: 1.1
"""
return self.operate(is_distinct_from, other)
def isnot_distinct_from(self, other):
"""Implement the ``IS NOT DISTINCT FROM`` operator.
Renders "a IS NOT DISTINCT FROM b" on most platforms;
on some such as SQLite may render "a IS b".
.. versionadded:: 1.1
"""
return self.operate(isnot_distinct_from, other)
def __gt__(self, other):
"""Implement the ``>`` operator.
In a column context, produces the clause ``a > b``.
"""
return self.operate(gt, other)
def __ge__(self, other):
"""Implement the ``>=`` operator.
In a column context, produces the clause ``a >= b``.
"""
return self.operate(ge, other)
def __neg__(self):
"""Implement the ``-`` operator.
In a column context, produces the clause ``-a``.
"""
return self.operate(neg)
def __contains__(self, other):
return self.operate(contains, other)
def __getitem__(self, index):
"""Implement the [] operator.
This can be used by some database-specific types
such as PostgreSQL ARRAY and HSTORE.
"""
return self.operate(getitem, index)
def __lshift__(self, other):
"""implement the << operator.
Not used by SQLAlchemy core, this is provided
for custom operator systems which want to use
<< as an extension point.
"""
return self.operate(lshift, other)
def __rshift__(self, other):
"""implement the >> operator.
Not used by SQLAlchemy core, this is provided
for custom operator systems which want to use
>> as an extension point.
"""
return self.operate(rshift, other)
def concat(self, other):
"""Implement the 'concat' operator.
In a column context, produces the clause ``a || b``,
or uses the ``concat()`` operator on MySQL.
"""
return self.operate(concat_op, other)
def like(self, other, escape=None):
r"""Implement the ``like`` operator.
In a column context, produces the expression::
a LIKE other
E.g.::
stmt = select([sometable]).\
where(sometable.c.column.like("%foobar%"))
:param other: expression to be compared
:param escape: optional escape character, renders the ``ESCAPE``
keyword, e.g.::
somecolumn.like("foo/%bar", escape="/")
.. seealso::
:meth:`.ColumnOperators.ilike`
"""
return self.operate(like_op, other, escape=escape)
def ilike(self, other, escape=None):
r"""Implement the ``ilike`` operator, e.g. case insensitive LIKE.
In a column context, produces an expression either of the form::
lower(a) LIKE lower(other)
Or on backends that support the ILIKE operator::
a ILIKE other
E.g.::
stmt = select([sometable]).\
where(sometable.c.column.ilike("%foobar%"))
:param other: expression to be compared
:param escape: optional escape character, renders the ``ESCAPE``
keyword, e.g.::
somecolumn.ilike("foo/%bar", escape="/")
.. seealso::
:meth:`.ColumnOperators.like`
"""
return self.operate(ilike_op, other, escape=escape)
def in_(self, other):
"""Implement the ``in`` operator.
In a column context, produces the clause ``column IN <other>``.
The given parameter ``other`` may be:
* A list of literal values, e.g.::
stmt.where(column.in_([1, 2, 3]))
In this calling form, the list of items is converted to a set of
bound parameters the same length as the list given::
WHERE COL IN (?, ?, ?)
* An empty list, e.g.::
stmt.where(column.in_([]))
In this calling form, the expression renders a "false" expression,
e.g.::
WHERE 1 != 1
This "false" expression has historically had different behaviors
in older SQLAlchemy versions, see
:paramref:`.create_engine.empty_in_strategy` for behavioral options.
.. versionchanged:: 1.2 simplified the behavior of "empty in"
expressions
* A bound parameter, e.g. :func:`.bindparam`, may be used if it
includes the :paramref:`.bindparam.expanding` flag::
stmt.where(column.in_(bindparam('value', expanding=True)))
In this calling form, the expression renders a special non-SQL
placeholder expression that looks like::
WHERE COL IN ([EXPANDING_value])
This placeholder expression is intercepted at statement execution
time to be converted into the variable number of bound parameter
form illustrated earlier. If the statement were executed as::
connection.execute(stmt, {"value": [1, 2, 3]})
The database would be passed a bound parameter for each value::
WHERE COL IN (?, ?, ?)
.. versionadded:: 1.2 added "expanding" bound parameters
If an empty list is passed, a special "empty list" expression,
which is specific to the database in use, is rendered. On
SQLite this would be::
WHERE COL IN (SELECT 1 FROM (SELECT 1) WHERE 1!=1)
.. versionadded:: 1.3 "expanding" bound parameters now support
empty lists
* a :func:`.select` construct, which is usually a correlated
scalar select::
stmt.where(
column.in_(
select([othertable.c.y]).
where(table.c.x == othertable.c.x)
)
)
In this calling form, :meth:`.ColumnOperators.in_` renders as given::
WHERE COL IN (SELECT othertable.y
FROM othertable WHERE othertable.x = table.x)
:param other: a list of literals, a :func:`.select` construct,
or a :func:`.bindparam` construct that includes the
:paramref:`.bindparam.expanding` flag set to True.
"""
return self.operate(in_op, other)
def notin_(self, other):
"""implement the ``NOT IN`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.in_`, i.e. ``~x.in_(y)``.
In the case that ``other`` is an empty sequence, the compiler
produces an "empty not in" expression. This defaults to the
expression "1 = 1" to produce true in all cases. The
:paramref:`.create_engine.empty_in_strategy` may be used to
alter this behavior.
.. versionchanged:: 1.2 The :meth:`.ColumnOperators.in_` and
:meth:`.ColumnOperators.notin_` operators
now produce a "static" expression for an empty IN sequence
by default.
.. seealso::
:meth:`.ColumnOperators.in_`
"""
return self.operate(notin_op, other)
def notlike(self, other, escape=None):
"""implement the ``NOT LIKE`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.like`, i.e. ``~x.like(y)``.
.. seealso::
:meth:`.ColumnOperators.like`
"""
return self.operate(notlike_op, other, escape=escape)
def notilike(self, other, escape=None):
"""implement the ``NOT ILIKE`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.ilike`, i.e. ``~x.ilike(y)``.
.. seealso::
:meth:`.ColumnOperators.ilike`
"""
return self.operate(notilike_op, other, escape=escape)
def is_(self, other):
"""Implement the ``IS`` operator.
Normally, ``IS`` is generated automatically when comparing to a
value of ``None``, which resolves to ``NULL``. However, explicit
usage of ``IS`` may be desirable if comparing to boolean values
on certain platforms.
.. seealso:: :meth:`.ColumnOperators.isnot`
"""
return self.operate(is_, other)
def isnot(self, other):
"""Implement the ``IS NOT`` operator.
Normally, ``IS NOT`` is generated automatically when comparing to a
value of ``None``, which resolves to ``NULL``. However, explicit
usage of ``IS NOT`` may be desirable if comparing to boolean values
on certain platforms.
.. seealso:: :meth:`.ColumnOperators.is_`
"""
return self.operate(isnot, other)
def startswith(self, other, **kwargs):
r"""Implement the ``startswith`` operator.
Produces a LIKE expression that tests against a match for the start
of a string value::
column LIKE <other> || '%'
E.g.::
stmt = select([sometable]).\
where(sometable.c.column.startswith("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
will behave like wildcards as well. For literal string
values, the :paramref:`.ColumnOperators.startswith.autoescape` flag
may be set to ``True`` to apply escaping to occurrences of these
characters within the string value so that they match as themselves
and not as wildcard characters. Alternatively, the
:paramref:`.ColumnOperators.startswith.escape` parameter will establish
a given character as an escape character which can be of use when
the target expression is not a literal string.
:param other: expression to be compared. This is usually a plain
string value, but can also be an arbitrary SQL expression. LIKE
wildcard characters ``%`` and ``_`` are not escaped by default unless
the :paramref:`.ColumnOperators.startswith.autoescape` flag is
set to True.
:param autoescape: boolean; when True, establishes an escape character
within the LIKE expression, then applies it to all occurrences of
``"%"``, ``"_"`` and the escape character itself within the
comparison value, which is assumed to be a literal string and not a
SQL expression.
An expression such as::
somecolumn.startswith("foo%bar", autoescape=True)
Will render as::
somecolumn LIKE :param || '%' ESCAPE '/'
With the value of :param as ``"foo/%bar"``.
.. versionadded:: 1.2
.. versionchanged:: 1.2.0 The
:paramref:`.ColumnOperators.startswith.autoescape` parameter is
now a simple boolean rather than a character; the escape
character itself is also escaped, and defaults to a forwards
slash, which itself can be customized using the
:paramref:`.ColumnOperators.startswith.escape` parameter.
:param escape: a character which when given will render with the
``ESCAPE`` keyword to establish that character as the escape
character. This character can then be placed preceding occurrences
of ``%`` and ``_`` to allow them to act as themselves and not
wildcard characters.
An expression such as::
somecolumn.startswith("foo/%bar", escape="^")
Will render as::
somecolumn LIKE :param || '%' ESCAPE '^'
The parameter may also be combined with
:paramref:`.ColumnOperators.startswith.autoescape`::
somecolumn.startswith("foo%bar^bat", escape="^", autoescape=True)
Where above, the given literal parameter will be converted to
``"foo^%bar^^bat"`` before being passed to the database.
.. seealso::
:meth:`.ColumnOperators.endswith`
:meth:`.ColumnOperators.contains`
:meth:`.ColumnOperators.like`
"""
return self.operate(startswith_op, other, **kwargs)
def endswith(self, other, **kwargs):
r"""Implement the 'endswith' operator.
Produces a LIKE expression that tests against a match for the end
of a string value::
column LIKE '%' || <other>
E.g.::
stmt = select([sometable]).\
where(sometable.c.column.endswith("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
will behave like wildcards as well. For literal string
values, the :paramref:`.ColumnOperators.endswith.autoescape` flag
may be set to ``True`` to apply escaping to occurrences of these
characters within the string value so that they match as themselves
and not as wildcard characters. Alternatively, the
:paramref:`.ColumnOperators.endswith.escape` parameter will establish
a given character as an escape character which can be of use when
the target expression is not a literal string.
:param other: expression to be compared. This is usually a plain
string value, but can also be an arbitrary SQL expression. LIKE
wildcard characters ``%`` and ``_`` are not escaped by default unless
the :paramref:`.ColumnOperators.endswith.autoescape` flag is
set to True.
:param autoescape: boolean; when True, establishes an escape character
within the LIKE expression, then applies it to all occurrences of
``"%"``, ``"_"`` and the escape character itself within the
comparison value, which is assumed to be a literal string and not a
SQL expression.
An expression such as::
somecolumn.endswith("foo%bar", autoescape=True)
Will render as::
somecolumn LIKE '%' || :param ESCAPE '/'
With the value of :param as ``"foo/%bar"``.
.. versionadded:: 1.2
.. versionchanged:: 1.2.0 The
:paramref:`.ColumnOperators.endswith.autoescape` parameter is
now a simple boolean rather than a character; the escape
character itself is also escaped, and defaults to a forwards
slash, which itself can be customized using the
:paramref:`.ColumnOperators.endswith.escape` parameter.
:param escape: a character which when given will render with the
``ESCAPE`` keyword to establish that character as the escape
character. This character can then be placed preceding occurrences
of ``%`` and ``_`` to allow them to act as themselves and not
wildcard characters.
An expression such as::
somecolumn.endswith("foo/%bar", escape="^")
Will render as::
somecolumn LIKE '%' || :param ESCAPE '^'
The parameter may also be combined with
:paramref:`.ColumnOperators.endswith.autoescape`::
somecolumn.endswith("foo%bar^bat", escape="^", autoescape=True)
Where above, the given literal parameter will be converted to
``"foo^%bar^^bat"`` before being passed to the database.
.. seealso::
:meth:`.ColumnOperators.startswith`
:meth:`.ColumnOperators.contains`
:meth:`.ColumnOperators.like`
"""
return self.operate(endswith_op, other, **kwargs)
def contains(self, other, **kwargs):
r"""Implement the 'contains' operator.
Produces a LIKE expression that tests against a match for the middle
of a string value::
column LIKE '%' || <other> || '%'
E.g.::
stmt = select([sometable]).\
where(sometable.c.column.contains("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
will behave like wildcards as well. For literal string
values, the :paramref:`.ColumnOperators.contains.autoescape` flag
may be set to ``True`` to apply escaping to occurrences of these
characters within the string value so that they match as themselves
and not as wildcard characters. Alternatively, the
:paramref:`.ColumnOperators.contains.escape` parameter will establish
a given character as an escape character which can be of use when
the target expression is not a literal string.
:param other: expression to be compared. This is usually a plain
string value, but can also be an arbitrary SQL expression. LIKE
wildcard characters ``%`` and ``_`` are not escaped by default unless
the :paramref:`.ColumnOperators.contains.autoescape` flag is
set to True.
:param autoescape: boolean; when True, establishes an escape character
within the LIKE expression, then applies it to all occurrences of
``"%"``, ``"_"`` and the escape character itself within the
comparison value, which is assumed to be a literal string and not a
SQL expression.
An expression such as::
somecolumn.contains("foo%bar", autoescape=True)
Will render as::
somecolumn LIKE '%' || :param || '%' ESCAPE '/'
With the value of :param as ``"foo/%bar"``.
.. versionadded:: 1.2
.. versionchanged:: 1.2.0 The
:paramref:`.ColumnOperators.contains.autoescape` parameter is
now a simple boolean rather than a character; the escape
character itself is also escaped, and defaults to a forwards
slash, which itself can be customized using the
:paramref:`.ColumnOperators.contains.escape` parameter.
:param escape: a character which when given will render with the
``ESCAPE`` keyword to establish that character as the escape
character. This character can then be placed preceding occurrences
of ``%`` and ``_`` to allow them to act as themselves and not
wildcard characters.
An expression such as::
somecolumn.contains("foo/%bar", escape="^")
Will render as::
somecolumn LIKE '%' || :param || '%' ESCAPE '^'
The parameter may also be combined with
:paramref:`.ColumnOperators.contains.autoescape`::
somecolumn.contains("foo%bar^bat", escape="^", autoescape=True)
Where above, the given literal parameter will be converted to
``"foo^%bar^^bat"`` before being passed to the database.
.. seealso::
:meth:`.ColumnOperators.startswith`
:meth:`.ColumnOperators.endswith`
:meth:`.ColumnOperators.like`
"""
return self.operate(contains_op, other, **kwargs)
def match(self, other, **kwargs):
"""Implements a database-specific 'match' operator.
:meth:`~.ColumnOperators.match` attempts to resolve to
a MATCH-like function or operator provided by the backend.
Examples include:
* PostgreSQL - renders ``x @@ to_tsquery(y)``
* MySQL - renders ``MATCH (x) AGAINST (y IN BOOLEAN MODE)``
* Oracle - renders ``CONTAINS(x, y)``
* other backends may provide special implementations.
* Backends without any special implementation will emit
the operator as "MATCH". This is compatible with SQLite, for
example.
"""
return self.operate(match_op, other, **kwargs)
def desc(self):
"""Produce a :func:`~.expression.desc` clause against the
parent object."""
return self.operate(desc_op)
def asc(self):
"""Produce a :func:`~.expression.asc` clause against the
parent object."""
return self.operate(asc_op)
def nullsfirst(self):
"""Produce a :func:`~.expression.nullsfirst` clause against the
parent object."""
return self.operate(nullsfirst_op)
def nullslast(self):
"""Produce a :func:`~.expression.nullslast` clause against the
parent object."""
return self.operate(nullslast_op)
def collate(self, collation):
"""Produce a :func:`~.expression.collate` clause against
the parent object, given the collation string.
.. seealso::
:func:`~.expression.collate`
"""
return self.operate(collate, collation)
def __radd__(self, other):
"""Implement the ``+`` operator in reverse.
See :meth:`.ColumnOperators.__add__`.
"""
return self.reverse_operate(add, other)
def __rsub__(self, other):
"""Implement the ``-`` operator in reverse.
See :meth:`.ColumnOperators.__sub__`.
"""
return self.reverse_operate(sub, other)
def __rmul__(self, other):
"""Implement the ``*`` operator in reverse.
See :meth:`.ColumnOperators.__mul__`.
"""
return self.reverse_operate(mul, other)
def __rdiv__(self, other):
"""Implement the ``/`` operator in reverse.
See :meth:`.ColumnOperators.__div__`.
"""
return self.reverse_operate(div, other)
def __rmod__(self, other):
"""Implement the ``%`` operator in reverse.
See :meth:`.ColumnOperators.__mod__`.
"""
return self.reverse_operate(mod, other)
def between(self, cleft, cright, symmetric=False):
"""Produce a :func:`~.expression.between` clause against
the parent object, given the lower and upper range.
"""
return self.operate(between_op, cleft, cright, symmetric=symmetric)
def distinct(self):
"""Produce a :func:`~.expression.distinct` clause against the
parent object.
"""
return self.operate(distinct_op)
def any_(self):
"""Produce a :func:`~.expression.any_` clause against the
parent object.
This operator is only appropriate against a scalar subquery
object, or for some backends an column expression that is
against the ARRAY type, e.g.::
# postgresql '5 = ANY (somearray)'
expr = 5 == mytable.c.somearray.any_()
# mysql '5 = ANY (SELECT value FROM table)'
expr = 5 == select([table.c.value]).scalar_subquery().any_()
.. seealso::
:func:`~.expression.any_` - standalone version
:func:`~.expression.all_` - ALL operator
.. versionadded:: 1.1
"""
return self.operate(any_op)
def all_(self):
"""Produce a :func:`~.expression.all_` clause against the
parent object.
This operator is only appropriate against a scalar subquery
object, or for some backends an column expression that is
against the ARRAY type, e.g.::
# postgresql '5 = ALL (somearray)'
expr = 5 == mytable.c.somearray.all_()
# mysql '5 = ALL (SELECT value FROM table)'
expr = 5 == select([table.c.value]).scalar_subquery().all_()
.. seealso::
:func:`~.expression.all_` - standalone version
:func:`~.expression.any_` - ANY operator
.. versionadded:: 1.1
"""
return self.operate(all_op)
def __add__(self, other):
"""Implement the ``+`` operator.
In a column context, produces the clause ``a + b``
if the parent object has non-string affinity.
If the parent object has a string affinity,
produces the concatenation operator, ``a || b`` -
see :meth:`.ColumnOperators.concat`.
"""
return self.operate(add, other)
def __sub__(self, other):
"""Implement the ``-`` operator.
In a column context, produces the clause ``a - b``.
"""
return self.operate(sub, other)
def __mul__(self, other):
"""Implement the ``*`` operator.
In a column context, produces the clause ``a * b``.
"""
return self.operate(mul, other)
def __div__(self, other):
"""Implement the ``/`` operator.
In a column context, produces the clause ``a / b``.
"""
return self.operate(div, other)
def __mod__(self, other):
"""Implement the ``%`` operator.
In a column context, produces the clause ``a % b``.
"""
return self.operate(mod, other)
def __truediv__(self, other):
"""Implement the ``//`` operator.
In a column context, produces the clause ``a / b``.
"""
return self.operate(truediv, other)
def __rtruediv__(self, other):
"""Implement the ``//`` operator in reverse.
See :meth:`.ColumnOperators.__truediv__`.
"""
return self.reverse_operate(truediv, other)
_commutative = {eq, ne, add, mul}
_comparison = {eq, ne, lt, gt, ge, le}
def commutative_op(fn):
_commutative.add(fn)
return fn
def comparison_op(fn):
_comparison.add(fn)
return fn
def from_():
raise NotImplementedError()
@comparison_op
def function_as_comparison_op():
raise NotImplementedError()
def as_():
raise NotImplementedError()
def exists():
raise NotImplementedError()
def istrue(a):
raise NotImplementedError()
def isfalse(a):
raise NotImplementedError()
@comparison_op
def is_distinct_from(a, b):
return a.is_distinct_from(b)
@comparison_op
def isnot_distinct_from(a, b):
return a.isnot_distinct_from(b)
@comparison_op
def is_(a, b):
return a.is_(b)
@comparison_op
def isnot(a, b):
return a.isnot(b)
def collate(a, b):
return a.collate(b)
def op(a, opstring, b):
return a.op(opstring)(b)
@comparison_op
def like_op(a, b, escape=None):
return a.like(b, escape=escape)
@comparison_op
def notlike_op(a, b, escape=None):
return a.notlike(b, escape=escape)
@comparison_op
def ilike_op(a, b, escape=None):
return a.ilike(b, escape=escape)
@comparison_op
def notilike_op(a, b, escape=None):
return a.notilike(b, escape=escape)
@comparison_op
def between_op(a, b, c, symmetric=False):
return a.between(b, c, symmetric=symmetric)
@comparison_op
def notbetween_op(a, b, c, symmetric=False):
return a.notbetween(b, c, symmetric=symmetric)
@comparison_op
def in_op(a, b):
return a.in_(b)
@comparison_op
def notin_op(a, b):
return a.notin_(b)
def distinct_op(a):
return a.distinct()
def any_op(a):
return a.any_()
def all_op(a):
return a.all_()
def _escaped_like_impl(fn, other, escape, autoescape):
if autoescape:
if autoescape is not True:
util.warn(
"The autoescape parameter is now a simple boolean True/False"
)
if escape is None:
escape = "/"
if not isinstance(other, util.compat.string_types):
raise TypeError("String value expected when autoescape=True")
if escape not in ("%", "_"):
other = other.replace(escape, escape + escape)
other = other.replace("%", escape + "%").replace("_", escape + "_")
return fn(other, escape=escape)
@comparison_op
def startswith_op(a, b, escape=None, autoescape=False):
return _escaped_like_impl(a.startswith, b, escape, autoescape)
@comparison_op
def notstartswith_op(a, b, escape=None, autoescape=False):
return ~_escaped_like_impl(a.startswith, b, escape, autoescape)
@comparison_op
def endswith_op(a, b, escape=None, autoescape=False):
return _escaped_like_impl(a.endswith, b, escape, autoescape)
@comparison_op
def notendswith_op(a, b, escape=None, autoescape=False):
return ~_escaped_like_impl(a.endswith, b, escape, autoescape)
@comparison_op
def contains_op(a, b, escape=None, autoescape=False):
return _escaped_like_impl(a.contains, b, escape, autoescape)
@comparison_op
def notcontains_op(a, b, escape=None, autoescape=False):
return ~_escaped_like_impl(a.contains, b, escape, autoescape)
@comparison_op
def match_op(a, b, **kw):
return a.match(b, **kw)
@comparison_op
def notmatch_op(a, b, **kw):
return a.notmatch(b, **kw)
def comma_op(a, b):
raise NotImplementedError()
@comparison_op
def empty_in_op(a, b):
raise NotImplementedError()
@comparison_op
def empty_notin_op(a, b):
raise NotImplementedError()
def filter_op(a, b):
raise NotImplementedError()
def concat_op(a, b):
return a.concat(b)
def desc_op(a):
return a.desc()
def asc_op(a):
return a.asc()
def nullsfirst_op(a):
return a.nullsfirst()
def nullslast_op(a):
return a.nullslast()
def json_getitem_op(a, b):
raise NotImplementedError()
def json_path_getitem_op(a, b):
raise NotImplementedError()
def is_comparison(op):
return op in _comparison or isinstance(op, custom_op) and op.is_comparison
def is_commutative(op):
return op in _commutative
def is_ordering_modifier(op):
return op in (asc_op, desc_op, nullsfirst_op, nullslast_op)
def is_natural_self_precedent(op):
return (
op in _natural_self_precedent
or isinstance(op, custom_op)
and op.natural_self_precedent
)
_booleans = (inv, istrue, isfalse, and_, or_)
def is_boolean(op):
return is_comparison(op) or op in _booleans
_mirror = {gt: lt, ge: le, lt: gt, le: ge}
def mirror(op):
"""rotate a comparison operator 180 degrees.
Note this is not the same as negation.
"""
return _mirror.get(op, op)
_associative = _commutative.union([concat_op, and_, or_]).difference([eq, ne])
def is_associative(op):
return op in _associative
_natural_self_precedent = _associative.union(
[getitem, json_getitem_op, json_path_getitem_op]
)
"""Operators where if we have (a op b) op c, we don't want to
parenthesize (a op b).
"""
_asbool = util.symbol("_asbool", canonical=-10)
_smallest = util.symbol("_smallest", canonical=-100)
_largest = util.symbol("_largest", canonical=100)
_PRECEDENCE = {
from_: 15,
function_as_comparison_op: 15,
any_op: 15,
all_op: 15,
getitem: 15,
json_getitem_op: 15,
json_path_getitem_op: 15,
mul: 8,
truediv: 8,
div: 8,
mod: 8,
neg: 8,
add: 7,
sub: 7,
concat_op: 6,
filter_op: 6,
match_op: 5,
notmatch_op: 5,
ilike_op: 5,
notilike_op: 5,
like_op: 5,
notlike_op: 5,
in_op: 5,
notin_op: 5,
is_: 5,
isnot: 5,
eq: 5,
ne: 5,
is_distinct_from: 5,
isnot_distinct_from: 5,
empty_in_op: 5,
empty_notin_op: 5,
gt: 5,
lt: 5,
ge: 5,
le: 5,
between_op: 5,
notbetween_op: 5,
distinct_op: 5,
inv: 5,
istrue: 5,
isfalse: 5,
and_: 3,
or_: 2,
comma_op: -1,
desc_op: 3,
asc_op: 3,
collate: 4,
as_: -1,
exists: 0,
_asbool: -10,
_smallest: _smallest,
_largest: _largest,
}
def is_precedent(operator, against):
if operator is against and is_natural_self_precedent(operator):
return False
else:
return _PRECEDENCE.get(
operator, getattr(operator, "precedence", _smallest)
) <= _PRECEDENCE.get(against, getattr(against, "precedence", _largest))
| 28.137059 | 79 | 0.616458 |
4a1a0ef30a4704b7be77d56ae5580839aa326a59
| 3,467 |
py
|
Python
|
formee/formTools/deploy.py
|
Arpan-206/formee
|
beffdaf9bcac0cde99e627c3602ee9ee4b01e300
|
[
"MIT"
] | 2 |
2022-03-28T06:51:28.000Z
|
2022-03-30T10:15:51.000Z
|
formee/formTools/deploy.py
|
Arpan-206/formee
|
beffdaf9bcac0cde99e627c3602ee9ee4b01e300
|
[
"MIT"
] | 1 |
2022-03-31T04:18:10.000Z
|
2022-03-31T04:21:13.000Z
|
formee/formTools/deploy.py
|
Arpan-206/formee
|
beffdaf9bcac0cde99e627c3602ee9ee4b01e300
|
[
"MIT"
] | 1 |
2022-03-29T17:01:08.000Z
|
2022-03-29T17:01:08.000Z
|
from typing import Any
from formee.auth.user_jwt import get_user_jwt
from formee.formTools.read import read_form
from gql import Client, gql
from gql.transport.aiohttp import AIOHTTPTransport
from rich import print
transport = AIOHTTPTransport(url="https://hrbt-portal.hasura.app/v1/graphql",
headers={'Authorization': 'Bearer ' + get_user_jwt()})
# Create a GraphQL client using the defined transport
client = Client(transport=transport, fetch_schema_from_transport=True)
initial_form_creation_query = gql("""
mutation CreateForm($description: String!, $title: String!) {
insert_Form_one(object: {title: $title, description: $description}) {
id
description
title
}
}
""")
add_confirm_ques = gql("""
mutation AddConfirmQues($title: String!, $form: Int!) {
insert_ques_confirm(objects: {title: $title, form: $form}) {
returning {
form
id
title
}
}
}
""")
add_number_ques = gql("""
mutation AddNumberQues($title: String!, $form: Int!) {
insert_ques_number(objects: {title: $title, form: $form}) {
returning {
form
id
title
}
}
}
""")
add_options_ques = gql("""
mutation AddOptionsQues($form: Int!, $title: String!) {
insert_ques_options(objects: {form: $form, title: $title}) {
returning {
form
id
title
}
}
}
""")
add_text_ques = gql("""
mutation AddTextQues($form: Int!, $title: String!) {
insert_ques_text(objects: {form: $form, title: $title}) {
returning {
form
title
id
}
}
}
""")
add_choices = gql("""
mutation AddChoices($question: uuid!, $title: String!) {
insert_option(objects: {title: $title, question: $question}) {
returning {
id
question
title
}
}
}
""")
def deploy() -> Any:
"""
Returns:
Any: None if unsuccessful, else the form id
"""
form_data = read_form()
if form_data is None:
print("[red]No form found. Exiting.")
return
print(form_data)
retured_form_id = client.execute(initial_form_creation_query, variable_values={
'title': form_data['name'], 'description': form_data['description']})['insert_Form_one']['id']
for question in form_data['questions']:
if question['type'] == 'Text' or question['type'] == 'Short Text':
client.execute(add_text_ques, variable_values={
'form': retured_form_id, 'title': question['question']})
elif question['type'] == 'Options':
ques_id = client.execute(add_options_ques, variable_values={
'form': retured_form_id, 'title': question['question']})
for choice in question['options']:
client.execute(add_choices, variable_values={
'question': ques_id['insert_ques_options']['returning'][0]['id'], 'title': choice})
elif question['type'] == 'Number':
client.execute(add_number_ques, variable_values={
'form': retured_form_id, 'title': question['question']})
elif question['type'] == 'Confirm':
client.execute(add_confirm_ques, variable_values={
'form': retured_form_id, 'title': question['question']})
else:
print("[red]Invalid question type.")
return
print("[green]Form deployed successfully.")
print("[green]Form ID: " + str(retured_form_id))
return retured_form_id
| 28.652893 | 114 | 0.616095 |
4a1a0efe9e639741906be01ecaeeb0a61ddee500
| 1,727 |
py
|
Python
|
seqpos/lib/python2.7/site-packages/mercurial/pushkey.py
|
guanjue/seqpos
|
ab9308ad128547ca968a1d944490710e583703bc
|
[
"MIT"
] | 1 |
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
mercurial/pushkey.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
mercurial/pushkey.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
# pushkey.py - dispatching for pushing and pulling keys
#
# Copyright 2010 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from . import (
bookmarks,
encoding,
obsolete,
phases,
)
def _nslist(repo):
n = {}
for k in _namespaces:
n[k] = ""
if not obsolete.isenabled(repo, obsolete.exchangeopt):
n.pop('obsolete')
return n
_namespaces = {"namespaces": (lambda *x: False, _nslist),
"bookmarks": (bookmarks.pushbookmark, bookmarks.listbookmarks),
"phases": (phases.pushphase, phases.listphases),
"obsolete": (obsolete.pushmarker, obsolete.listmarkers),
}
def register(namespace, pushkey, listkeys):
_namespaces[namespace] = (pushkey, listkeys)
def _get(namespace):
return _namespaces.get(namespace, (lambda *x: False, lambda *x: {}))
def push(repo, namespace, key, old, new):
'''should succeed iff value was old'''
pk = _get(namespace)[0]
return pk(repo, key, old, new)
def list(repo, namespace):
'''return a dict'''
lk = _get(namespace)[1]
return lk(repo)
encode = encoding.fromlocal
decode = encoding.tolocal
def encodekeys(keys):
"""encode the content of a pushkey namespace for exchange over the wire"""
return '\n'.join(['%s\t%s' % (encode(k), encode(v)) for k, v in keys])
def decodekeys(data):
"""decode the content of a pushkey namespace from exchange over the wire"""
result = {}
for l in data.splitlines():
k, v = l.split('\t')
result[decode(k)] = decode(v)
return result
| 27.854839 | 79 | 0.649102 |
4a1a0f2d4f7edb467a22d112a1bea3040637e00c
| 353 |
py
|
Python
|
tool_requirements.py
|
satnam6502/opentitan
|
17fa1c5c51fbc554562d5e9db750610dac75909f
|
[
"Apache-2.0"
] | 1 |
2022-03-17T23:30:49.000Z
|
2022-03-17T23:30:49.000Z
|
tool_requirements.py
|
mmicko/opentitan
|
0394fabc87ae7a9f361c00ce4954c67499d4bf32
|
[
"Apache-2.0"
] | null | null | null |
tool_requirements.py
|
mmicko/opentitan
|
0394fabc87ae7a9f361c00ce4954c67499d4bf32
|
[
"Apache-2.0"
] | 1 |
2021-03-17T04:14:07.000Z
|
2021-03-17T04:14:07.000Z
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
# Version requirements for various tools. Checked by tooling (e.g. fusesoc),
# and inserted into the documentation.
__TOOL_REQUIREMENTS__ = {
'verilator': '4.028',
'verible': '0.0-375-g7d55b5b',
}
| 32.090909 | 76 | 0.733711 |
4a1a0faab38dcdbfb49ee7a2fb6d270e6311e08b
| 1,801 |
py
|
Python
|
src/data_preprocess.py
|
Oviyashri/Mushroom_Classification_pr
|
bc03cbb0351ef56e8fc903444c263ec248aefb95
|
[
"MIT"
] | null | null | null |
src/data_preprocess.py
|
Oviyashri/Mushroom_Classification_pr
|
bc03cbb0351ef56e8fc903444c263ec248aefb95
|
[
"MIT"
] | null | null | null |
src/data_preprocess.py
|
Oviyashri/Mushroom_Classification_pr
|
bc03cbb0351ef56e8fc903444c263ec248aefb95
|
[
"MIT"
] | null | null | null |
import pandas as pd
from re import split
import argparse
import yaml
from sklearn.preprocessing import LabelEncoder
from logger import App_Logger
file_object=open("Training_Logs/Loggings.txt", 'a+')
logger_object=App_Logger()
def read_params(config_path):
with open(config_path) as yaml_file:
config = yaml.safe_load(yaml_file)
return config
def encoding(config_path):
config = read_params(config_path)
train_data_path=config["encoder"]["train_path"]
test_data_path=config["encoder"]["test_path"]
source_train_data_path=config["split_data"]["train_path"]
source_test_data_path=config["split_data"]["test_path"]
train=pd.read_csv(source_train_data_path)
test=pd.read_csv(source_test_data_path)
try:
encoder=LabelEncoder()
for column in range(len(train.columns)):
train[train.columns[column]]= encoder.fit_transform(train[train.columns[column]])
train.to_csv(train_data_path,index=False)
logger_object.log(file_object,'Label encoding was successful for train data')
encoder=LabelEncoder()
for column in range(len(test.columns)):
test[test.columns[column]]= encoder.fit_transform(test[test.columns[column]])
test.to_csv(test_data_path,index=False)
logger_object.log(file_object,'Label encoding was successful for test data')
except Exception as e:
logger_object.log(file_object,'Exception occurred in encoding. Exception message: '+str(e))
logger_object.log(file_object,'Label encoding was unsuccessful')
raise Exception()
if __name__ =="__main__":
args = argparse.ArgumentParser()
args.add_argument("--config", default="params.yaml")
parsed_args = args.parse_args()
data=encoding(config_path=parsed_args.config)
| 37.520833 | 99 | 0.727929 |
4a1a10a324fd582de4b23efc4934a861877c06ce
| 4,194 |
py
|
Python
|
freelancefinder/jobs/admin.py
|
ScorpionResponse/freelancefinder
|
7882aa8ed42afe689e594a3e10c9fc6369f70bf5
|
[
"BSD-3-Clause"
] | 2 |
2017-03-18T04:28:58.000Z
|
2017-04-25T10:01:28.000Z
|
freelancefinder/jobs/admin.py
|
ScorpionResponse/freelancefinder
|
7882aa8ed42afe689e594a3e10c9fc6369f70bf5
|
[
"BSD-3-Clause"
] | 20 |
2017-03-18T04:29:33.000Z
|
2017-06-13T22:37:42.000Z
|
freelancefinder/jobs/admin.py
|
ScorpionResponse/freelancefinder
|
7882aa8ed42afe689e594a3e10c9fc6369f70bf5
|
[
"BSD-3-Clause"
] | 1 |
2019-01-14T19:02:06.000Z
|
2019-01-14T19:02:06.000Z
|
"""Admin site configuration for the jobs app."""
import logging
from django.contrib import admin
from .models import Post, Job, TagVariant, UserJob
logger = logging.getLogger(__name__)
def remove_tags(modeladmin, request, queryset):
"""Remove tags."""
logger.debug('MA: %s, request: %s', modeladmin, request)
for obj in queryset:
obj.tags.clear()
def set_is_removed(modeladmin, request, queryset):
"""Soft Delete objects."""
logger.debug('MA: %s, request: %s', modeladmin, request)
queryset.update(is_removed=True)
def set_as_garbage(modeladmin, request, queryset):
"""Set posts as garbage."""
logger.debug('MA: %s, request: %s', modeladmin, request)
queryset.update(garbage=True)
def set_as_freelance(modeladmin, request, queryset):
"""Set posts as freelance."""
logger.debug('MA: %s, request: %s', modeladmin, request)
queryset.update(is_freelance=True)
remove_tags.short_description = "Remove Tags"
set_is_removed.short_description = "Soft Delete"
set_as_garbage.short_description = 'Mark Garbage'
set_as_freelance.short_description = 'Mark Freelance'
class JobAdmin(admin.ModelAdmin):
"""The Job model admin has some special tag handling."""
model = Job
actions = [remove_tags]
# List fields
list_display = ('title', 'tag_list', 'created', 'modified')
search_fields = ('title',)
# Detail screen fields
fields = ('title', 'description', 'tags', 'created', 'modified', 'fingerprint')
readonly_fields = ('created', 'modified', 'fingerprint')
def get_queryset(self, request):
"""Prefetch the tags data to make this more efficient."""
return super(JobAdmin, self).get_queryset(request).prefetch_related('tags')
def tag_list(self, obj):
"""Concatenate all tags for each job."""
logger.debug('Called Tag_list in admin: %s', self)
return u", ".join(o.name for o in obj.tags.all())
class UserJobAdmin(admin.ModelAdmin):
"""The UserJob model admin."""
model = UserJob
actions = [set_is_removed]
# List fields
list_display = ('job', 'user', 'is_removed', 'created', 'modified')
search_fields = ('job__title', 'user__username')
list_filter = ('user__username', 'is_removed')
# Detail screen fields
fields = ('job', 'user', 'is_removed', 'created', 'modified')
readonly_fields = ('created', 'modified')
def get_queryset(self, request):
"""Don't use the default manager."""
querys = self.model.all_objects.get_queryset()
ordering = self.get_ordering(request)
if ordering:
querys = querys.order_by(*ordering)
return querys
class PostAdmin(admin.ModelAdmin):
"""The Post model needs no special admin configuration."""
model = Post
actions = [remove_tags, set_as_garbage, set_as_freelance]
# List fields
list_display = ('title', 'source', 'subarea', 'tag_list', 'is_freelance', 'processed', 'garbage', 'created')
search_fields = ('title',)
list_filter = ('source__name', 'garbage', 'is_freelance')
# Detail screen fields
fields = ('title', 'url', 'source', 'subarea', 'description', 'unique', 'tags', 'is_freelance', 'processed', 'garbage', 'created', 'modified')
readonly_fields = ('created', 'modified')
def get_queryset(self, request):
"""Prefetch the tags data to make this more efficient."""
querys = self.model.all_objects.get_queryset()
ordering = self.get_ordering(request)
if ordering:
querys = querys.order_by(*ordering)
return querys.prefetch_related('tags')
def tag_list(self, obj): # pylint: disable=no-self-use
"""Concatenate all tags for each post."""
return u", ".join(o.name for o in obj.tags.all())
class TagVariantAdmin(admin.ModelAdmin):
"""The TagVariant admin lets the user put in new tags."""
model = TagVariant
list_display = ('variant', 'tag')
search_fields = ('variant', 'tag')
fields = ('variant', 'tag')
admin.site.register(Job, JobAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(TagVariant, TagVariantAdmin)
admin.site.register(UserJob, UserJobAdmin)
| 32.015267 | 146 | 0.668336 |
4a1a111ff0b3b315ecb33db2c135622903bb86e9
| 917 |
py
|
Python
|
ursina/models/procedural/circle.py
|
RedHenDev/ursina
|
56e66f066171d0e6a0fe4898ce4183f81160181d
|
[
"MIT"
] | 1,431 |
2019-04-19T07:15:08.000Z
|
2022-03-31T15:33:18.000Z
|
ursina/models/procedural/circle.py
|
RedHenDev/ursina
|
56e66f066171d0e6a0fe4898ce4183f81160181d
|
[
"MIT"
] | 239 |
2019-07-27T13:51:16.000Z
|
2022-03-27T16:44:30.000Z
|
ursina/models/procedural/circle.py
|
RedHenDev/ursina
|
56e66f066171d0e6a0fe4898ce4183f81160181d
|
[
"MIT"
] | 297 |
2019-07-27T12:21:49.000Z
|
2022-03-30T19:28:37.000Z
|
from ursina import *
class Circle(Mesh):
def __init__(self, resolution=16, radius=.5, rotate=True, mode='ngon', **kwargs):
origin = Entity()
point = Entity(parent=origin)
point.y = radius
self.vertices = list()
for i in range(resolution):
origin.rotation_z -= 360 / resolution
self.vertices.append(point.world_position)
if mode == 'line': # add the first point to make the circle whole
self.vertices.append(verts[0])
destroy(origin)
super().__init__(vertices=self.vertices, mode=mode, **kwargs)
if __name__ == '__main__':
app = Ursina()
e = Entity(model=Circle(8, mode='line', thickness=10), color=color.color(60,1,1,.3))
print(e.model.recipe)
origin = Entity(model='quad', color=color.orange, scale=(.05, .05))
ed = EditorCamera(rotation_speed = 200, panning_speed=200)
app.run()
| 31.62069 | 88 | 0.621592 |
4a1a1156885f7f284170376761d5f7fd9c1ad692
| 2,015 |
py
|
Python
|
my_classes.py
|
onepunchdan/fatigue-api-public
|
16ee6b7d7d3208bc046e97329aa4e7986df580a2
|
[
"MIT"
] | null | null | null |
my_classes.py
|
onepunchdan/fatigue-api-public
|
16ee6b7d7d3208bc046e97329aa4e7986df580a2
|
[
"MIT"
] | null | null | null |
my_classes.py
|
onepunchdan/fatigue-api-public
|
16ee6b7d7d3208bc046e97329aa4e7986df580a2
|
[
"MIT"
] | 1 |
2021-07-03T02:05:47.000Z
|
2021-07-03T02:05:47.000Z
|
import numpy as np
class DataGenerator(object):
'Generates data for Keras'
def __init__(self, seq_dim = 11, seq_len = 200, batch_size = 20, shuffle = False):
'Initialization'
self.seq_dim = seq_dim
self.seq_len = seq_len
self.batch_size = batch_size
self.shuffle = shuffle
def generate(self, labels, list_IDs):
'Generates batches of samples'
# Infinite loop
while 1:
# Generate order of exploration of dataset
indexes = self.__get_exploration_order(list_IDs)
# Generate batches
imax = int(len(indexes)/self.batch_size)
for i in range(imax):
# Find list of IDs
list_IDs_temp = [list_IDs[k] for k in indexes[i*self.batch_size:(i+1)*self.batch_size]]
# Generate data
X, y = self.__data_generation(labels, list_IDs_temp)
yield X, y
def __get_exploration_order(self, list_IDs):
'Generates order of exploration'
# Find exploration order
indexes = np.arange(len(list_IDs))
if self.shuffle == True:
np.random.shuffle(indexes)
return indexes
def __data_generation(self, labels, list_IDs_temp):
'Generates data of batch_size samples' # X : (n_samples, v_size, v_size, v_size, n_channels)
# Initialization
X = np.empty((self.batch_size, self.seq_len, self.seq_dim))
y = np.empty((self.batch_size), dtype = int)
# Generate data
for i, ID in enumerate(list_IDs_temp):
# Store volume
X[i, :, :] = np.load(ID)[:,:11]
# Store class
y[i] = labels[ID]
return X, sparsify(y)
def sparsify(y):
'Returns labels in binary NumPy array'
n_classes = 2# Enter number of classes
return np.array([[1 if y[i] == j else 0 for j in range(n_classes)]
for i in range(y.shape[0])])
| 34.152542 | 104 | 0.575682 |
4a1a1394aec1b2ab8d9a1144470f90cc72f9a02a
| 1,879 |
py
|
Python
|
google finder.py
|
chaudharypraveen98/python_beginner
|
fbde604f47bbc64e3e386d8ee758a1c336f8a3d6
|
[
"MIT"
] | null | null | null |
google finder.py
|
chaudharypraveen98/python_beginner
|
fbde604f47bbc64e3e386d8ee758a1c336f8a3d6
|
[
"MIT"
] | null | null | null |
google finder.py
|
chaudharypraveen98/python_beginner
|
fbde604f47bbc64e3e386d8ee758a1c336f8a3d6
|
[
"MIT"
] | null | null | null |
poss_ways=[]
g_1=["g","G"]
o_1=["O","0","o","()","[]","<>"]
o_2=["O","0","o","()","[]","<>"]
g_2=["g","G"]
l_1=["l","L","I"]
e_1=["e","E","3"]
for i in g_1:
for j in o_1:
for k in o_2:
for x in g_2:
for y in l_1:
for z in e_1:
poss_ways.append([i,j,k,x,y,z])
check=[]
to_che=str(input())
for i in to_che:
check.append(i)
if len(check)==6:
if check in poss_ways:
print(True)
else:
print(False)
elif len(check)==7:
if check[1]=="<" or check[1]=="[" or check[1]=="(" and check[2]==">" or check[2]==")" or check[2]=="]":
to_join="".join(check[1:3])
check.pop(1)
check.pop(1)
check.insert(1,to_join)
if check in poss_ways:
print(True)
else:
print(False)
elif check[2]=="<" or check[2]=="[" or check[2]=="(" and check[3]==">" or check[3]==")" or check[3]=="]":
to_join="".join(check[2:4])
check.pop(2)
check.pop(2)
check.insert(2,to_join)
if check in poss_ways:
print(True)
else:
print(False)
else:
print(False)
elif len(check)==8:
if check[1]=="<" or check[1]=="[" or check[1]=="(" and check[2]==")" or check[2]==">" or check[2]=="]" and check[3]=="<" or check[3]=="[" or check[3]=="(" and check[4]==">" or check[4]==")" or check[4]=="]":
to_join="".join(check[1:3])
to_join1="".join(check[3:5])
check.pop(1)
check.pop(1)
check.pop(1)
check.pop(1)
check.insert(1,to_join)
check.insert(2,to_join1)
if check in poss_ways:
print(True)
else:
print(False)
else:
print(False)
else:
print(False)
| 28.907692 | 213 | 0.439063 |
4a1a14b83bd6750eb4e4ef9781770d6cfe891358
| 84 |
py
|
Python
|
utils/models/__init__.py
|
roshanr11/Research-DCST
|
225461e6ffd7ca5a48b9688946eb36b2d98f358e
|
[
"MIT"
] | 5 |
2020-04-29T08:48:53.000Z
|
2020-12-23T10:11:39.000Z
|
utils/models/__init__.py
|
roshanr11/Research-DCST
|
225461e6ffd7ca5a48b9688946eb36b2d98f358e
|
[
"MIT"
] | 2 |
2020-01-11T08:31:06.000Z
|
2021-06-09T12:41:32.000Z
|
utils/models/__init__.py
|
roshanr11/Research-DCST
|
225461e6ffd7ca5a48b9688946eb36b2d98f358e
|
[
"MIT"
] | 5 |
2019-11-20T02:49:03.000Z
|
2020-09-17T15:27:34.000Z
|
from .parsing import *
from .parsing_gating import *
from .sequence_tagger import *
| 21 | 30 | 0.785714 |
4a1a14c921cb48cafcede12d5f70947b31793f41
| 2,623 |
py
|
Python
|
Projects/Online Workouts/w3resource/Collections/program-14.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | 1 |
2019-09-23T15:51:45.000Z
|
2019-09-23T15:51:45.000Z
|
Projects/Online Workouts/w3resource/Collections/program-14.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | 5 |
2021-02-08T20:47:19.000Z
|
2022-03-12T00:35:44.000Z
|
Projects/Online Workouts/w3resource/Collections/program-14.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
############################################################################################
# #
# Program purpose: Rotates (negative direction) a deque object a specified number #
# of times. #
# Program Author : Happi Yvan <ivensteinpoker@gmail.com> #
# Creation Date : December 27, 2019 #
# #
############################################################################################
from collections import deque
from random import randint
from sys import stderr
from copy import deepcopy
def obtain_user_input(input_mess: str) -> int:
user_data, valid = int(-1), False
while not valid:
try:
user_data = int(input(input_mess))
if user_data < 0:
raise ValueError(f'Invalid number of rotations. Must be > 0')
valid = True
except ValueError as ve:
print(f'[ERROR]: {ve}', file=stderr)
return user_data
def create_new_deque_data(low: int, high: int, size: int) -> deque:
if size < 0:
raise ValueError(f'Invalid size [{size}] for new deque data')
return deque([randint(low, high) for _ in range(size)])
def do_rotations_A(source_deque: deque, rot_cnt: int) -> None:
print('-' * 60)
tmp_cnt = 1
while rot_cnt > 0:
print(f'Rotation-A #{tmp_cnt}: ', end='')
temp = source_deque.popleft()
source_deque.append(temp)
print(f'{source_deque}')
rot_cnt -= 1
tmp_cnt += 1
def do_rotations_B(source_deque: deque, rot_cnt: int) -> None:
print('-' * 60)
tmp_cnt = 1
while rot_cnt > 0:
source_deque.rotate(-1)
print(f'Rotation-B #{tmp_cnt}: {source_deque}')
tmp_cnt += 1
rot_cnt -= 1
if __name__ == "__main__":
# Generate random deque
deque_data = create_new_deque_data(low=0, high=20, size=10)
print(f'Generated deque data: {deque_data}')
# obtain number of rotations from user
num_rotations = obtain_user_input(input_mess='Enter number of rotations: ')
# make copy for new deque
temp_deque_copy = deepcopy(deque_data)
# do the rotations and display it to user
do_rotations_A(source_deque=deque_data, rot_cnt=num_rotations)
do_rotations_B(source_deque=temp_deque_copy, rot_cnt=num_rotations)
| 37.471429 | 92 | 0.516965 |
4a1a151c952d00049f3a9c356cd8c65d98821169
| 260 |
py
|
Python
|
backend/ohq/migrations/0011_merge_20210415_2110.py
|
pennlabs/Office-Hours-Queue
|
f73ec90223c43595c9a167162d9d74abfb11ca42
|
[
"MIT"
] | 8 |
2020-09-05T21:12:25.000Z
|
2022-01-30T18:25:12.000Z
|
backend/ohq/migrations/0011_merge_20210415_2110.py
|
pennlabs/Office-Hours-Queue
|
f73ec90223c43595c9a167162d9d74abfb11ca42
|
[
"MIT"
] | 161 |
2020-08-05T17:05:56.000Z
|
2022-03-27T17:44:51.000Z
|
backend/ohq/migrations/0011_merge_20210415_2110.py
|
pennlabs/office-hours-queue
|
fce98d3a1b83d1459f61e6d9c3347ef619ee384e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-04-15 21:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("ohq", "0010_auto_20210407_0145"),
("ohq", "0010_auto_20210405_1720"),
]
operations = []
| 18.571429 | 47 | 0.65 |
4a1a15453e252e713289bb6a73069a5076d7aaec
| 2,334 |
py
|
Python
|
esCUITValida.py
|
eliluminado/esCUITValida
|
5c8c1b74a62d4e822907a7685e050d817d4e610c
|
[
"W3C"
] | 1 |
2018-08-12T22:48:34.000Z
|
2018-08-12T22:48:34.000Z
|
esCUITValida.py
|
eliluminado/esCUITValida
|
5c8c1b74a62d4e822907a7685e050d817d4e610c
|
[
"W3C"
] | null | null | null |
esCUITValida.py
|
eliluminado/esCUITValida
|
5c8c1b74a62d4e822907a7685e050d817d4e610c
|
[
"W3C"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# <Aplicacion en Python para verificar la valides de la CUIT.>
# Copyright (C) 2012 Alejandro Alvarez <contacto@codigopython.com.ar>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# http://www.codigopython.com.ar <contacto@codigopython.com.ar>
def esCUITValida(cuit):
"""
Funcion destinada a la validacion de CUIT
"""
# Convertimos el valor a una cadena
cuit = str(cuit)
# Aca removemos guiones, espacios y puntos para poder trabajar
cuit = cuit.replace("-", "") # Borramos los guiones
cuit = cuit.replace(" ", "") # Borramos los espacios
cuit = cuit.replace(".", "") # Borramos los puntos
# Si no tiene 11 caracteres lo descartamos
if len(cuit) != 11:
return False, cuit
# Solo resta analizar si todos los caracteres son numeros
if not cuit.isdigit():
return False, cuit
# Despues de estas validaciones podemos afirmar
# que contamos con 11 numeros
# Aca comienza la magia
base = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2]
aux = 0
for i in xrange(10):
aux += int(cuit[i]) * base[i]
aux = 11 - (aux % 11)
if aux == 11:
aux = 0
elif aux == 10:
aux = 9
if int(cuit[10]) == aux:
return True, cuit
else:
return False, cuit
if __name__ == "__main__":
while True:
print "Para salir ingrese 'Salir'"
cuit = raw_input('Ingrese la CUIT:\t')
if cuit == 'Salir' or cuit == 'salir':
break
demo = esCUITValida(cuit)
if not demo[0]:
print demo[1], "No parece ser un CUIT valido, por favor vuelva a ingresarlo"
else:
print demo[1], "Es un numero de CUIT valido"
break
| 34.835821 | 88 | 0.638389 |
4a1a171be03090bc0e5e6b50448cdf26f614857d
| 2,520 |
py
|
Python
|
vendor/pip-1.2.1/tests/local_repos.py
|
hmoody87/heroku-buildpack-python-ffmpeg-lame
|
ba7f092f0f341dfb274da311ebc8a1ff43ac2e0a
|
[
"MIT"
] | 2 |
2017-07-14T06:55:03.000Z
|
2021-06-03T19:54:37.000Z
|
vendor/pip-1.2.1/tests/local_repos.py
|
hmoody87/heroku-buildpack-python-ffmpeg-lame
|
ba7f092f0f341dfb274da311ebc8a1ff43ac2e0a
|
[
"MIT"
] | null | null | null |
vendor/pip-1.2.1/tests/local_repos.py
|
hmoody87/heroku-buildpack-python-ffmpeg-lame
|
ba7f092f0f341dfb274da311ebc8a1ff43ac2e0a
|
[
"MIT"
] | 3 |
2015-03-26T17:19:04.000Z
|
2020-11-11T13:50:47.000Z
|
import os
import subprocess
from pip.vcs import subversion, git, bazaar, mercurial
from pip.backwardcompat import urlretrieve
from tests.test_pip import path_to_url
from tests.pypi_server import PyPIProxy
if hasattr(subprocess, "check_call"):
subprocess_call = subprocess.check_call
else:
subprocess_call = subprocess.call
def _create_initools_repository():
subprocess_call('svnadmin create INITools'.split(), cwd=_get_vcs_folder())
def _dump_initools_repository():
filename, _ = urlretrieve('http://bitbucket.org/hltbra/pip-initools-dump/raw/8b55c908a320/INITools_modified.dump')
initools_folder = os.path.join(_get_vcs_folder(), 'INITools')
devnull = open(os.devnull, 'w')
dump = open(filename)
subprocess_call(['svnadmin', 'load', initools_folder], stdin=dump, stdout=devnull)
dump.close()
devnull.close()
os.remove(filename)
def _create_svn_repository_for_initools():
tests_cache = _get_vcs_folder()
if not os.path.exists(os.path.join(tests_cache, 'INITools')):
_create_initools_repository()
_dump_initools_repository()
def _get_vcs_folder():
folder_name = PyPIProxy.CACHE_PATH
if not os.path.exists(folder_name):
os.mkdir(folder_name)
return folder_name
def _get_vcs_and_checkout_url(remote_repository):
tests_cache = _get_vcs_folder()
vcs_classes = {'svn': subversion.Subversion,
'git': git.Git,
'bzr': bazaar.Bazaar,
'hg': mercurial.Mercurial}
default_vcs = 'svn'
if '+' not in remote_repository:
remote_repository = '%s+%s' % (default_vcs, remote_repository)
vcs, repository_path = remote_repository.split('+', 1)
vcs_class = vcs_classes[vcs]
branch = ''
if vcs == 'svn':
branch = os.path.basename(remote_repository)
repository_name = os.path.basename(remote_repository[:-len(branch)-1]) # remove the slash
else:
repository_name = os.path.basename(remote_repository)
destination_path = os.path.join(tests_cache, repository_name)
if not os.path.exists(destination_path):
vcs_class(remote_repository).obtain(destination_path)
return '%s+%s' % (vcs, path_to_url('/'.join([tests_cache, repository_name, branch])))
def local_checkout(remote_repo):
if remote_repo.startswith('svn'):
_create_svn_repository_for_initools()
return _get_vcs_and_checkout_url(remote_repo)
def local_repo(remote_repo):
return local_checkout(remote_repo).split('+', 1)[1]
| 33.157895 | 118 | 0.712302 |
4a1a1746db7ec18226ad45bf9e98ecdf133faf45
| 49,464 |
py
|
Python
|
setup.py
|
kellydunn/MIDItoOBS
|
25b61c6777cdcf37cbf9d2516effb8ad9d058c1a
|
[
"MIT"
] | 1 |
2021-07-16T09:55:30.000Z
|
2021-07-16T09:55:30.000Z
|
setup.py
|
kellydunn/MIDItoOBS
|
25b61c6777cdcf37cbf9d2516effb8ad9d058c1a
|
[
"MIT"
] | null | null | null |
setup.py
|
kellydunn/MIDItoOBS
|
25b61c6777cdcf37cbf9d2516effb8ad9d058c1a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import mido, threading, sys, atexit, json, time, signal, argparse
from tinydb import TinyDB, Query
from websocket import create_connection
parser = argparse.ArgumentParser(description='MIDItoOBS Config Setup')
parser.add_argument('--config',
default='config.json',
help='Path to config file. Default: ./config.json')
parser.add_argument('--port',
default=4444,
type=int,
help='Set port. Default: 4444')
parser.add_argument('--host',
default='localhost',
help='Hostname. Default: localhost')
args = parser.parse_args()
####Change IP and Port here
serverIP = args.host
serverPort = args.port
####
database = TinyDB(args.config, indent=4)
db = database.table("keys", cache_size=0)
devdb = database.table("devices", cache_size=0)
buttonActions = ["SetCurrentScene", "SetPreviewScene", "TransitionToProgram", "SetCurrentTransition", "SetSourceVisibility", "ToggleSourceVisibility", "ToggleMute", "SetMute",
"StartStopStreaming", "StartStreaming", "StopStreaming", "StartStopRecording", "StartRecording", "StopRecording", "StartStopReplayBuffer",
"StartReplayBuffer", "StopReplayBuffer", "SaveReplayBuffer", "PauseRecording", "ResumeRecording", "SetTransitionDuration", "SetCurrentProfile","SetCurrentSceneCollection",
"ResetSceneItem", "SetTextGDIPlusText", "SetBrowserSourceURL", "ReloadBrowserSource", "TakeSourceScreenshot", "EnableSourceFilter", "DisableSourceFilter", "ToggleSourceFilter", "SetAudioMonitor",
"EnableStudioMode", "DisableStudioMode", "ToggleStudioMode"]
faderActions = ["SetVolume", "SetSyncOffset", "SetSourcePosition", "SetSourceRotation", "SetSourceScale", "SetTransitionDuration", "SetGainFilter", "SetOpacity"]
jsonArchive = {"SetCurrentScene": """{"request-type": "SetCurrentScene", "message-id" : "1", "scene-name" : "%s"}""",
"SetPreviewScene": """{"request-type": "SetPreviewScene", "message-id" : "1","scene-name" : "%s"}""",
"TransitionToProgram": """{"request-type": "TransitionToProgram", "message-id" : "1"%s}""",
"SetCurrentTransition": """{"request-type": "SetCurrentTransition", "message-id" : "1", "transition-name" : "%s"}""",
"StartStopStreaming": """{"request-type": "StartStopStreaming", "message-id" : "1"}""",
"StartStreaming": """{"request-type": "StartStreaming", "message-id" : "1"}""",
"StopStreaming": """{"request-type": "StopStreaming", "message-id" : "1"}""",
"StartStopRecording": """{"request-type": "StartStopRecording", "message-id" : "1"}""",
"StartRecording": """{"request-type": "StartRecording", "message-id" : "1"}""",
"StopRecording": """{"request-type": "StopRecording", "message-id" : "1"}""",
"ToggleMute": """{"request-type": "ToggleMute", "message-id" : "1", "source": "%s"}""",
"SetMute": """{"request-type": "SetMute", "message-id" : "1", "source": "%s", "mute": %s}""",
"StartStopReplayBuffer": """{"request-type": "StartStopReplayBuffer", "message-id" : "1"}""",
"StartReplayBuffer": """{"request-type": "StartReplayBuffer", "message-id" : "1"}""",
"StopReplayBuffer": """{"request-type": "StopReplayBuffer", "message-id" : "1"}""",
"SaveReplayBuffer": """{"request-type": "SaveReplayBuffer", "message-id" : "1"}""",
"SetTransitionDuration": """{"request-type": "SetTransitionDuration", "message-id" : "1", "duration": %s}""",
"SetVolume": """{"request-type": "SetVolume", "message-id" : "1", "source": "%s", "volume": %s}""",
"SetSyncOffset": """{"request-type": "SetSyncOffset", "message-id" : "1", "source": "%s", "offset": %s}""",
"SetCurrentProfile": """{"request-type": "SetCurrentProfile", "message-id" : "1", "profile-name": "%s"}""",
"SetCurrentSceneCollection": """{"request-type": "SetCurrentSceneCollection", "message-id" : "1", "sc-name": "%s"}""",
"ResetSceneItem": """{"request-type": "ResetSceneItem", "message-id" : "1", "item": %s}""",
"SetTextGDIPlusText": """{"request-type": "SetTextGDIPlusProperties", "message-id" : "1", "source": "%s", "text": "%s"}""",
"SetBrowserSourceURL": """{"request-type": "SetSourceSettings", "message-id" : "1", "sourceName": "%s", "sourceSettings": {"url": "%s"}}""",
"SetSourcePosition": """{"request-type": "SetSceneItemProperties", "message-id" : "1", "scene-name": "%s", "item": "%s", "position": {"%s": %s}}""",
"SetSourceRotation": """{"request-type": "SetSceneItemProperties", "message-id" : "1", "scene-name": "%s", "item": "%s", "rotation": %s}""",
"SetSourceVisibility": """{"request-type": "SetSceneItemProperties", "message-id" : "1", "item": "%s", "visible": %s}""",
"ToggleSourceVisibility": """{"request-type": "SetSceneItemProperties", "message-id" : "1", "item": "%s", "visible": %s}""",
"SetSourceScale": """{{"request-type": "SetSceneItemProperties", "message-id" : "1", "scene-name": "%s", "item": "%s", "scale": {{"%s": %s%s}}}}""",
"ReloadBrowserSource": """{"request-type": "SetSourceSettings", "message-id" : "1", "sourceName": "%s", "sourceSettings": {"url": "%s"}}""",
"TakeSourceScreenshot": """{"request-type": "TakeSourceScreenshot", "message-id" : "MIDItoOBSscreenshot","sourceName" : "%s", "embedPictureFormat": "png"}""",
"SetGainFilter": """{"request-type": "SetSourceFilterSettings", "message-id" : "1","sourceName" : "%s", "filterName": "%s", "filterSettings": {"db": %s}}""",
"EnableSourceFilter": """{"request-type": "SetSourceFilterVisibility", "sourceName": "%s", "filterName": "%s", "filterEnabled": true, "message-id": "MIDItoOBS-EnableSourceFilter"}""",
"DisableSourceFilter": """{"request-type": "SetSourceFilterVisibility", "sourceName": "%s", "filterName": "%s", "filterEnabled": false, "message-id": "MIDItoOBS-DisableSourceFilter"}""",
"PauseRecording": """{"request-type": "PauseRecording", "message-id" : "MIDItoOBS-PauseRecording"}""",
"ResumeRecording": """{"request-type": "ResumeRecording", "message-id" : "MIDItoOBS-ResumeRecording"}""",
"ToggleSourceFilter": """{"request-type": "SetSourceFilterVisibility", "sourceName": "%s", "filterName": "%s", "filterEnabled": %s, "message-id": "MIDItoOBS-EnableSourceFilter"}""",
"SetOpacity": """{"request-type": "SetSourceFilterSettings", "message-id" : "1","sourceName" : "%s", "filterName": "%s", "filterSettings": {"opacity": %s}}""",
"SetAudioMonitorType": """{"request-type": "SetAudioMonitorType", "message-id" : "1","sourceName" : "%s", "monitorType": "%s"}""",
"EnableStudioMode": """{"request-type": "EnableStudioMode", "message-id" : "1"}""",
"DisableStudioMode": """{"request-type": "DisableStudioMode", "message-id" : "1"}""",
"ToggleStudioMode": """{"request-type": "ToggleStudioMode", "message-id" : "1"}"""}
sceneListShort = []
sceneListLong = []
transitionList = []
specialSourcesList = []
profilesList = []
sceneCollectionList = []
gdisourcesList = []
midiports = []
OBS_ALIGN_CENTER = (0)
OBS_ALIGN_LEFT = (1 << 0)
OBS_ALIGN_RIGHT = (1 << 1)
OBS_ALIGN_TOP = (1 << 2)
OBS_ALIGN_BOTTOM = (1 << 3)
ignore = 255
savetime1 = time.time()
def ScriptExit(signal, frame):
print("Closing midi ports...")
for port in midiports:
port["object"].close()
print("Closing database...")
database.close()
print("Exiting...")
sys.exit(0)
def midicallback(message, deviceID, deviceName):
global ignore
print()
print("Received message", message)
print("from device", deviceName)
print()
if message.type == "note_on": #button only
ignore = message.note
print("Select Action:")
counter = 0
for action in buttonActions:
print("%s: %s" % (counter, action))
counter += 1
input_select = int(input("Select 0-%s: " % str(len(buttonActions)-1)))
if input_select in range(0, len(buttonActions)):
action = buttonActions[input_select]
setupButtonEvents(action, message.channel, message.note, message.velocity, message.type, deviceID)
elif message.type == "program_change": #button only
ignore = message.program
print("Select Action:")
counter = 0
for action in buttonActions:
print("%s: %s" % (counter, action))
counter += 1
input_select = int(input("Select 0-%s: " % str(len(buttonActions)-1)))
if input_select in range(0, len(buttonActions)):
action = buttonActions[input_select]
setupButtonEvents(action, message.channel, message.program, message.value, message.type, deviceID)
elif message.type == "control_change": #button or fader
ignore = message.control
print("Select input type:\n0: Button\n1: Fader/Knob\n2: Ignore")
try:
input_select = int(input("Select 0-2: "))
if input_select in range(0, 3):
if input_select == 0:
print()
print("Select Action:")
counter = 0
for action in buttonActions:
print("%s: %s" % (counter, action))
counter += 1
input_select = int(input("Select 0-%s: " % str(len(buttonActions)-1)))
if input_select in range(0, len(buttonActions)):
action = buttonActions[input_select]
setupButtonEvents(action, message.channel, message.control, message.value, message.type, deviceID)
elif input_select == 1:
print()
print("Select Action:")
counter = 0
for action in faderActions:
print("%s: %s" % (counter, action))
counter += 1
input_select = int(input("Select 0-%s: " % str(len(faderActions)-1)))
if input_select in range(0, len(faderActions)):
action = faderActions[input_select]
setupFaderEvents(action, message.channel, message.control, message.value, message.type, deviceID)
except ValueError:
print("Please try again and enter a valid number")
#I know this is very messy, but i challange you to make a better version(as a native plugin or pull request to obs-studio)
def setupFaderEvents(action, channel, NoC, VoV, msgType, deviceID):
print()
print("You selected: %s" % action)
if action == "SetVolume":
updateSceneList()
updateSpecialSources()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList:
tempSceneList.append(line["name"])
for item in specialSourcesList:
tempSceneList.append(item)
source = printArraySelect(tempSceneList)
scale = (0,1)
action = jsonArchive["SetVolume"] % (source, "%s")
saveFaderToFile(channel, msgType, NoC, VoV, "fader" , action, scale, "SetVolume", deviceID)
elif action == "SetSyncOffset":
updateSceneList()
updateSpecialSources()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList:
tempSceneList.append(line["name"])
for item in specialSourcesList:
tempSceneList.append(item)
source = printArraySelect(tempSceneList)
scale = askForInputScaling()
action = jsonArchive["SetSyncOffset"] % (source, "%s")
saveFaderToFile(channel, msgType, NoC, VoV, "fader" , action, scale, "SetSyncOffset", deviceID)
elif action == "SetSourcePosition":
updateSceneList()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
tmpOBJ = {"scene": scene["name"], "source": line["name"]}
tempSceneList.append(tmpOBJ)
counter = 0
for line in tempSceneList:
print("%s: Source '%s' in scene '%s'" % (counter, line["source"], line["scene"]))
counter += 1
selected = tempSceneList[int(input("Select 0-%s: " % str(len(tempSceneList)-1)))]
tempTargetList = ["x", "y"]
target = int(input("\n0: X\n1: Y\nSelect Target to change (0-1): "))
if target in range(0, 2):
scale = askForInputScaling()
action = jsonArchive["SetSourcePosition"] % (selected["scene"], selected["source"], tempTargetList[target], "%s")
saveFaderToFile(channel, msgType, NoC, VoV, "fader" , action, scale, "SetSourcePosition", deviceID)
elif action == "SetSourceRotation":
updateSceneList()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
tmpOBJ = {"scene": scene["name"], "source": line["name"]}
tempSceneList.append(tmpOBJ)
counter = 0
for line in tempSceneList:
print("%s: Source '%s' in scene '%s'" % (counter, line["source"], line["scene"]))
counter += 1
selected = tempSceneList[int(input("Select 0-%s: " % str(len(tempSceneList)-1)))]
scale = askForInputScaling()
action = jsonArchive["SetSourceRotation"] % (selected["scene"], selected["source"], "%s")
saveFaderToFile(channel, msgType, NoC, VoV, "fader" , action, scale, "SetSourceRotation", deviceID)
elif action == "SetTransitionDuration":
scale = askForInputScaling()
action = jsonArchive["SetTransitionDuration"]
saveFaderToFile(channel, msgType, NoC, VoV, "fader" , action, scale, "SetTransitionDuration", deviceID)
elif action == "SetSourceScale":
updateSceneList()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
tmpOBJ = {"scene": scene["name"], "source": line["name"]}
tempSceneList.append(tmpOBJ)
counter = 0
for line in tempSceneList:
print("%s: Source '%s' in scene '%s'" % (counter, line["source"], line["scene"]))
counter += 1
selected = tempSceneList[int(input("Select 0-%s: " % str(len(tempSceneList)-1)))]
tempTargetList = ["x", "y", 'x": {0}, "y']
target = int(input("\n0: X\n1: Y\n2: Both\nSelect Target to change (0-2): "))
if target in range(0, 3):
scale = askForInputScaling()
alignmentlist = ["NONE", "Top Left", "Top Center", "Top Right", "Center Left", "Center", "Center Right", "Bottom Left", "Bottom Center", "Bottom Right"]
alignmentvaluelist = ["NONE", OBS_ALIGN_TOP | OBS_ALIGN_LEFT, OBS_ALIGN_TOP | OBS_ALIGN_CENTER, OBS_ALIGN_TOP | OBS_ALIGN_RIGHT,
OBS_ALIGN_CENTER | OBS_ALIGN_LEFT, OBS_ALIGN_CENTER | OBS_ALIGN_CENTER, OBS_ALIGN_CENTER | OBS_ALIGN_RIGHT,
OBS_ALIGN_BOTTOM | OBS_ALIGN_LEFT, OBS_ALIGN_BOTTOM | OBS_ALIGN_CENTER, OBS_ALIGN_BOTTOM | OBS_ALIGN_RIGHT]
counter = 0
print()
for line in alignmentlist:
print("%s: %s" % (counter, line))
counter += 1
alignment = int(input("Select source alignment (0-{}): ".format(len(alignmentlist)-1)))
if alignment in range(0, len(alignmentlist)-1):
alignmentplaceholder = ""
if type(alignmentvaluelist[alignment]) == int:
alignmentplaceholder = '}}, "position": {{"alignment": %d' % alignmentvaluelist[alignment]
action = jsonArchive["SetSourceScale"] % (selected["scene"], selected["source"], tempTargetList[target], "{0}", alignmentplaceholder)
saveFaderToFile(channel, msgType, NoC, VoV, "fader" , action, scale, "SetSourceScale", deviceID)
elif action == "SetGainFilter":
updateSceneList()
updateSpecialSources()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList:
tempSceneList.append(line["name"])
for item in specialSourcesList:
tempSceneList.append(item)
source = printArraySelect(tempSceneList)
filtername = checkIfSourceHasGainFilter(source)
if filtername:
print("You will now be asked for the input scaling. The valid range for the gain filter is -30 (db) to 30 (db). You can select any range inside -30 to 30")
scale = askForInputScaling()
action = jsonArchive["SetGainFilter"] % (source, filtername, "%s")
saveFaderToFile(channel, msgType, NoC, VoV, "fader" , action, scale, "SetGainFilter", deviceID)
else:
print("The selected source has no gain filter. Please add it in the source filter dialog and try again.")
elif action == "SetOpacity":
updateSceneList()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList:
tempSceneList.append(line["name"])
source = printArraySelect(tempSceneList)
filtername = checkIfSourceHasColorCorrectionFilter(source)
if filtername:
action = jsonArchive["SetOpacity"] % (source, filtername, "%s")
saveFaderToFile(channel, msgType, NoC, VoV, "fader" , action, [0, 100], "SetOpacity", deviceID)
else:
print("The selected source has no Color Correction filter with the name 'miditoobs-opacity'. Please add it in the source filter dialog and try again.")
def setupButtonEvents(action, channel, NoC, VoV, msgType, deviceID):
print()
print("You selected: %s" % action)
if action == "SetCurrentScene":
updateSceneList()
scene = printArraySelect(sceneListShort)
bidirectional = askForBidirectional()
action = jsonArchive["SetCurrentScene"] % scene
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID, bidirectional)
elif action == "SetPreviewScene":
updateSceneList()
scene = printArraySelect(sceneListShort)
bidirectional = askForBidirectional()
action = jsonArchive["SetPreviewScene"] % scene
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID, bidirectional)
elif action == "TransitionToProgram":
updateTransitionList()
print("Please select a transition to be used:")
transitionList.append("--Current--")
transition = printArraySelect(transitionList)
print(transition)
if transition != "--Current--":
tmp = ' , "with-transition": {"name": "' + transition + '"}'
action = jsonArchive["TransitionToProgram"] % tmp
else:
action = jsonArchive["TransitionToProgram"] % ""
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "SetCurrentTransition":
updateTransitionList()
transition = printArraySelect(transitionList)
action = jsonArchive["SetCurrentTransition"] % transition
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "StartStopStreaming":
action = jsonArchive["StartStopStreaming"]
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "StartStreaming":
action = jsonArchive["StartStreaming"]
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "StopStreaming":
action = jsonArchive["StopStreaming"]
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "StartStopRecording":
action = jsonArchive["StartStopRecording"]
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "StartRecording":
action = jsonArchive["StartRecording"]
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "StopRecording":
action = jsonArchive["StopRecording"]
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "StartStopReplayBuffer":
action = jsonArchive["StartStopReplayBuffer"]
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "StartReplayBuffer":
action = jsonArchive["StartReplayBuffer"]
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "StopReplayBuffer":
action = jsonArchive["StopReplayBuffer"]
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "SaveReplayBuffer":
action = jsonArchive["SaveReplayBuffer"]
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "PauseRecording":
action = jsonArchive["PauseRecording"]
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "ResumeRecording":
action = jsonArchive["ResumeRecording"]
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "SetSourceVisibility":
updateSceneList()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList:
tempSceneList.append(line["name"])
source = printArraySelect(tempSceneList)
renderArray = ["0 (Invisible)", "1 (Visible)"]
render = printArraySelect(renderArray)
if render == "0 (Invisible)":
render = "false"
else:
render = "true"
sceneListShort.append("--Current--")
scene = printArraySelect(sceneListShort)
if scene != "--Current--":
source = source + '", "scene-name": "' + scene
action = jsonArchive["SetSourceVisibility"] % (source, str(render))
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "ToggleSourceVisibility":
updateSceneList()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList:
tempSceneList.append(line["name"])
source = source1 = printArraySelect(tempSceneList)
sceneListShort.append("--Current--")
scene = printArraySelect(sceneListShort)
if scene != "--Current--":
source = source + '", "scene": "' + scene
action = jsonArchive["ToggleSourceVisibility"] % (source, "%s")
saveTODOButtonToFile(channel, msgType, NoC, VoV, "button" , action, "ToggleSourceVisibility", source1, "" , deviceID)
elif action == "ToggleMute":
updateSceneList()
updateSpecialSources()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList:
tempSceneList.append(line["name"])
for item in specialSourcesList:
tempSceneList.append(item)
source = printArraySelect(tempSceneList)
action = jsonArchive["ToggleMute"] % source
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "SetMute":
updateSceneList()
updateSpecialSources()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList:
tempSceneList.append(line["name"])
for item in specialSourcesList:
tempSceneList.append(item)
source = printArraySelect(tempSceneList)
tempArray = ["0 (Muted)", "1 (Unmuted)"]
muted = printArraySelect(tempArray)
if muted == "0 (Muted)":
muted = "true"
else:
muted = "false"
action = jsonArchive["SetMute"] % (source, muted)
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "SetTransitionDuration":
time = int(input("Input the desired time(in milliseconds): "))
action = jsonArchive["SetTransitionDuration"] % time
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "SetCurrentProfile":
updateProfileList()
profilename = printArraySelect(profilesList)
action = jsonArchive["SetCurrentProfile"] % profilename
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "SetRecordingFolder":
recpath = str(input("Input the desired path: "))
action = jsonArchive["SetRecordingFolder"] % recpath
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "SetCurrentSceneCollection":
updatesceneCollectionList()
scenecollection = printArraySelect(sceneCollectionList)
action = jsonArchive["SetCurrentSceneCollection"] % scenecollection
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "ResetSceneItem":
updateSceneList()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList:
tempSceneList.append(line["name"])
source = printArraySelect(tempSceneList)
sceneListShort.append("--Current--")
scene = printArraySelect(sceneListShort)
if scene != "--Current--":
render = '"' + str(source) + '", "scene-name": "' + scene + '"'
else:
render = '"' + str(source) + '"'
action = jsonArchive["ResetSceneItem"] % (render)
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "SetTextGDIPlusText":
updateSceneList()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList and line["type"] == "text_gdiplus":
tempSceneList.append(line["name"])
source = printArraySelect(tempSceneList)
text = str(input("Input the desired text: "))
action = jsonArchive["SetTextGDIPlusText"] % (source, text)
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "SetBrowserSourceURL":
updateSceneList()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList and line["type"] == "browser_source":
tempSceneList.append(line["name"])
source = printArraySelect(tempSceneList)
url = str(input("Input the desired URL: "))
action = jsonArchive["SetBrowserSourceURL"] % (source, url)
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "ReloadBrowserSource":
updateSceneList()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList and line["type"] == "browser_source":
tempSceneList.append(line["name"])
source = printArraySelect(tempSceneList)
action = jsonArchive["ReloadBrowserSource"] % (source, "%s")
saveTODOButtonToFile(channel, msgType, NoC, VoV, "button" , action, "ReloadBrowserSource", source, "", deviceID)
elif action == "TakeSourceScreenshot":
updateSceneList()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList:
tempSceneList.append(line["name"])
for scene in sceneListShort:
tempSceneList.append(scene)
source = printArraySelect(tempSceneList)
action = jsonArchive["TakeSourceScreenshot"] % (source)
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "EnableSourceFilter":
updateSceneList()
updateSpecialSources()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList:
tempSceneList.append(line["name"])
for item in specialSourcesList:
tempSceneList.append(item)
source = printArraySelect(tempSceneList)
filters = getSourceFilters(source)
if filters:
tempFilterList = []
for line in filters:
tempFilterList.append(line["name"])
selectedFilter = printArraySelect(tempFilterList)
action = jsonArchive["EnableSourceFilter"] % (source, selectedFilter)
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
else:
print("\nThis source has no filters")
elif action == "DisableSourceFilter":
updateSceneList()
updateSpecialSources()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList:
tempSceneList.append(line["name"])
for item in specialSourcesList:
tempSceneList.append(item)
source = printArraySelect(tempSceneList)
filters = getSourceFilters(source)
if filters:
tempFilterList = []
for line in filters:
tempFilterList.append(line["name"])
selectedFilter = printArraySelect(tempFilterList)
action = jsonArchive["DisableSourceFilter"] % (source, selectedFilter)
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
else:
print("\nThis source has no filters")
elif action == "ToggleSourceFilter":
updateSceneList()
updateSpecialSources()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList:
tempSceneList.append(line["name"])
for item in specialSourcesList:
tempSceneList.append(item)
source = printArraySelect(tempSceneList)
filters = getSourceFilters(source)
if filters:
tempFilterList = []
for line in filters:
tempFilterList.append(line["name"])
selectedFilter = printArraySelect(tempFilterList)
action = jsonArchive["ToggleSourceFilter"] % (source, selectedFilter, "%s")
saveTODOButtonToFile(channel, msgType, NoC, VoV, "button" , action, "ToggleSourceFilter", source, selectedFilter, deviceID)
elif action == "SetAudioMonitorType":
updateSceneList()
updateSpecialSources()
tempSceneList = []
for scene in sceneListLong:
for line in scene["sources"]:
if line["name"] not in tempSceneList:
tempSceneList.append(line["name"])
for item in specialSourcesList:
tempSceneList.append(item)
source = printArraySelect(tempSceneList)
tempArray = ["None", "Monitor Only", "Monitor and Output"]
typeOfMonitor = printArraySelect(tempArray)
if typeOfMonitor == "None":
typeOfMonitor = "none"
elif typeOfMonitor == "Monitor Only":
typeOfMonitor = "monitorOnly"
else:
typeOfMonitor = "monitorAndOutput"
action = jsonArchive["SetAudioMonitorType"] % (source, typeOfMonitor)
saveButtonToFile(channel, msgType, NoC, VoV, "button", action, deviceID)
elif action == "EnableStudioMode":
action = jsonArchive["EnableStudioMode"]
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "DisableStudioMode":
action = jsonArchive["DisableStudioMode"]
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
elif action == "ToggleStudioMode":
action = jsonArchive["ToggleStudioMode"]
saveButtonToFile(channel, msgType, NoC, VoV, "button" , action, deviceID)
def saveFaderToFile(msg_channel, msg_type, msgNoC, VoV, input_type, action, scale, cmd, deviceID):
print("Saved %s with control %s for action %s on device %s channel %s" % (msg_type, msgNoC, cmd, deviceID, msg_channel))
Search = Query()
result = db.search((Search.msg_type == msg_type) & (Search.msgNoC == msgNoC) & (Search.deviceID == deviceID) & (Search.msg_channel == msg_channel))
if result:
db.remove((Search.msgNoC == msgNoC) & (Search.deviceID == deviceID) & (Search.msg_channel == msg_channel))
db.insert({"msg_channel": msg_channel, "msg_type": msg_type, "msgNoC": msgNoC, "msgVoV": VoV, "input_type": input_type, "scale_low": scale[0], "scale_high": scale[1], "action": action, "cmd": cmd, "deviceID": deviceID})
else:
db.insert({"msg_channel": msg_channel, "msg_type": msg_type, "msgNoC": msgNoC, "msgVoV": VoV, "input_type": input_type, "scale_low": scale[0], "scale_high": scale[1], "action": action, "cmd": cmd, "deviceID": deviceID})
def saveButtonToFile(msg_channel, msg_type, msgNoC, VoV, input_type, action, deviceID, bidirectional=False):
print("Saved %s with note/control %s for action %s on device %s channel %s, bidirectional: %d" % (msg_type, msgNoC, action, deviceID, msg_channel, bidirectional))
Search = Query()
result = db.search((Search.msg_type == msg_type) & (Search.msgNoC == msgNoC) & (Search.deviceID == deviceID) & (Search.msg_channel == msg_channel))
if result:
db.remove((Search.msgNoC == msgNoC) & (Search.deviceID == deviceID) & (Search.msg_channel == msg_channel))
db.insert({"msg_channel": msg_channel, "msg_type": msg_type, "msgNoC": msgNoC, "msgVoV": VoV, "input_type": input_type, "action" : action, "deviceID": deviceID, "bidirectional": bidirectional})
def saveTODOButtonToFile(msg_channel, msg_type, msgNoC, VoV, input_type, action, request, target, field2, deviceID):
print("Saved %s with note/control %s for action %s on device %s channel %s" % (msg_type, msgNoC, action, deviceID, msg_channel))
Search = Query()
result = db.search((Search.msg_type == msg_type) & (Search.msgNoC == msgNoC) & (Search.deviceID == deviceID) & (Search.msg_channel == msg_channel))
if result:
db.remove((Search.msgNoC == msgNoC) & (Search.deviceID == deviceID) & (Search.msg_channel == msg_channel))
db.insert({"msg_channel": msg_channel, "msg_type": msg_type, "msgNoC": msgNoC, "msgVoV": VoV, "input_type": input_type, "action" : action, "request": request, "target": target, "deviceID": deviceID, "field2": field2})
else:
db.insert({"msg_channel": msg_channel, "msg_type": msg_type, "msgNoC": msgNoC, "msgVoV": VoV, "input_type": input_type, "action" : action, "request": request, "target": target, "deviceID": deviceID, "field2": field2})
def printArraySelect(array):
counter = 0
for line in array:
print("%s: %s" % (counter, line))
counter += 1
if counter > 1:
return array[int(input("Select 0-%s: " % str(len(array)-1)))]
else:
return array[int(input("Select 0: "))]
def askForInputScaling():
print("Setup input scale")
low = int(input("Select lower output value: "))
high = int(input("Select higher output value: "))
return low, high
def askForBidirectional():
print("Do you want the control to be bidirectional?\n1: Yes\n2: No")
bidirectional = int(input("Select 1 or 2: "))
return bidirectional == 1
def updateTransitionList():
global transitionList
ws = create_connection("ws://{0}:{1}".format(serverIP, serverPort))
print("\nUpdating transition list, plase wait")
ws.send("""{"request-type": "GetTransitionList", "message-id": "999999"}""")
result = ws.recv()
jsn = json.loads(result)
transitionList = []
if jsn["message-id"] == "999999":
for item in jsn["transitions"]:
transitionList.append(item["name"])
print("Transitions updated")
else:
print("Failed to update")
ws.close()
def updateSceneList():
global sceneListShort
global sceneListLong
ws = create_connection("ws://{0}:{1}".format(serverIP, serverPort))
print("\nUpdating scene list, plase wait")
ws.send("""{"request-type": "GetSceneList", "message-id": "9999999"}""")
result = ws.recv()
jsn = json.loads(result)
sceneListShort = []
sceneListLong = []
if jsn["message-id"] == "9999999":
sceneListLong = jsn["scenes"]
for item in jsn["scenes"]:
sceneListShort.append(item["name"])
print("Scenes updated")
else:
print("Failed to update")
ws.close()
def updateSpecialSources():
global specialSourcesList
ws = create_connection("ws://{0}:{1}".format(serverIP, serverPort))
print("\nUpdating special sources, plase wait")
ws.send("""{"request-type": "GetSpecialSources", "message-id": "99999999"}""")
result = ws.recv()
jsn = json.loads(result)
specialSourcesList = []
if jsn["message-id"] == "99999999":
for line in jsn:
if line == "status" or line == "message-id":
pass
else:
specialSourcesList.append(jsn[line])
print("Special sources updated")
else:
print("Failed to update")
ws.close()
def updateProfileList():
global profilesList
ws = create_connection("ws://{0}:{1}".format(serverIP, serverPort))
print("Updating Profiles List, plase wait")
ws.send("""{"request-type": "ListProfiles", "message-id": "99999999"}""")
result = ws.recv()
jsn = json.loads(result)
profilesList = []
if jsn["message-id"] == "99999999":
for line in jsn["profiles"]:
profilesList.append(line["profile-name"])
print("Profiles List updated")
else:
print("Failed to update")
ws.close()
def updatesceneCollectionList():
global sceneCollectionList
ws = create_connection("ws://{0}:{1}".format(serverIP, serverPort))
print("\nUpdating Scene Collection List, plase wait")
ws.send("""{"request-type": "ListSceneCollections", "message-id": "99999999"}""")
result = ws.recv()
jsn = json.loads(result)
sceneCollectionList = []
if jsn["message-id"] == "99999999":
for line in jsn["scene-collections"]:
sceneCollectionList.append(line["sc-name"])
print("Scene Collection List updated")
else:
print("Failed to update")
ws.close()
def checkIfSourceHasGainFilter(sourcename):
ws = create_connection("ws://{0}:{1}".format(serverIP, serverPort))
print("\nChecking source filters, plase wait")
ws.send('{"request-type": "GetSourceFilters", "message-id": "MIDItoOBS-checksourcegainfilter", "sourceName": "' + sourcename + '"}')
result = ws.recv()
ws.close()
jsn = json.loads(result)
if jsn["message-id"] == "MIDItoOBS-checksourcegainfilter":
for line in jsn["filters"]:
if line["type"] == "gain_filter":
return line["name"]
return False
def checkIfSourceHasColorCorrectionFilter(sourcename):
ws = create_connection("ws://{0}:{1}".format(serverIP, serverPort))
print("\nChecking source filters, plase wait")
ws.send('{"request-type": "GetSourceFilters", "message-id": "MIDItoOBS-checksourcecolorcorrectionfilter", "sourceName": "' + sourcename + '"}')
result = ws.recv()
ws.close()
jsn = json.loads(result)
if jsn["message-id"] == "MIDItoOBS-checksourcecolorcorrectionfilter":
for line in jsn["filters"]:
if line["type"] == "color_filter" and line["name"] == "miditoobs-opacity":
return line["name"]
return False
def getSourceFilters(sourcename):
ws = create_connection("ws://{0}:{1}".format(serverIP, serverPort))
print("\nChecking source filters, plase wait")
ws.send('{"request-type": "GetSourceFilters", "message-id": "MIDItoOBS-getSourceFilters", "sourceName": "' + sourcename + '"}')
result = ws.recv()
ws.close()
jsn = json.loads(result)
if jsn["message-id"] == "MIDItoOBS-getSourceFilters":
return jsn["filters"]
else:
return False
def configureDevices(switch):
dbresult = devdb.all()
if switch:
print("\nTell me: What do you want to do?\n1: Rename a device and transfer their action assignments (because you plugged it into another USB port and windows decided to give the device a new name now)\n2: Delete all devices from config and re-add (Warning: this will dereference all button and fader actions(so they will no longer work). This might cause device confusion later.\n3: Remove a single device from the configuration INCLUDING their midi assignments\n4: Add new device\n5: Skip device configuration (Warning: If no device has been configured before, MIDItoOBS will NOT work)")
action_select = int(input("Select 1-4: "))
if action_select == 1:
renameDevice()
return
elif action_select == 2:
print("Removing all devices from the database....")
devdb.purge() #purge database table before adding new devices
elif action_select == 3:
removeDevice()
return
elif action_select == 4:
pass
else:
return
print("\nWhich device do you want to add?")
exitflag = 0
while not exitflag:
availableDeviceList = mido.get_input_names()
deviceList = []
counter = 0
inUseDeviceList = devdb.all()
for device in availableDeviceList:
if devInDB(device, inUseDeviceList):
pass
else:
print("%s: %s" % (counter, device))
counter += 1
deviceList.append(device)
if len(deviceList) == 0:
print("No midi input device available")
return
if len(deviceList) < 2:
input_select = int(input("Select 0: "))
else:
input_select = int(input("Select 0-%s: " % str(len(deviceList)-1)))
print("Adding:", deviceList[input_select])
result = devdb.search(Query().devicename == deviceList[input_select])
if not result:
deviceID = devdb.insert({"devicename": deviceList[input_select]})
print("Do you want to add another device?\n1: Yes\n2: No")
action_select = int(input("Select 1 or 2: "))
if action_select == 2:
exitflag = 1
def devInDB(devicename, devicedatabase):
for entry in devicedatabase:
if entry["devicename"] == devicename:
return True
return False
def removeDevice():
devices = devdb.all()
print("So you want to remove a device. Please keep in mind that this will not only remove the device but remove every action assigned to the device.\nWhich device and configuration do you want to remove?")
counter = 0
for (index, device) in enumerate(devices):
print("%s: %s" % (counter, device["devicename"]))
counter += 1
device_select = int(input("Select 0-%s: " % str(len(devices)-1)))
print("Selected:", devices[device_select]["devicename"])
yousure = input("Are you really sure you want to remove the devices and all it's assignments?\nType 'YES' and press enter: ")
if yousure == "YES":
print("As you wish. Deleting now......")
try:
result = devdb.get(Query().devicename == devices[device_select]["devicename"])
devdb.remove(doc_ids=[result.doc_id])
db.remove(Query().deviceID == result.doc_id)
except:
print("There was an error removing the device")
def renameDevice():
devices = devdb.all()
counter = 0
print("Please select a device for your configuration that you want to \"rename\" to another device:")
for (index, device) in enumerate(devices):
print("%s: %s" % (counter, device["devicename"]))
counter += 1
old_device_select = int(input("Select 0-%s: " % str(len(devices)-1)))
old_device_name = devices[old_device_select]["devicename"]
print("Selected:", old_device_name)
print("Please select the new device name now:")
availableDeviceList = mido.get_input_names()
deviceList = []
for device in availableDeviceList:
if devInDB(device, devices):
pass
else:
deviceList.append(device)
if len(deviceList) > 0:
counter = 0
for (index, device) in enumerate(deviceList):
print("%s: %s" % (counter, device))
counter += 1
new_device_select = int(input("Select 0-%s: " % str(len(deviceList)-1)))
new_device_name = deviceList[new_device_select]
print("Selected:", new_device_name, "as the new device name")
print("Updating \"", old_device_name, "\" to \"", new_device_name, "\" now", sep="")
try:
devdb.update({"devicename": new_device_name}, Query().devicename == old_device_name)
print("Sucessfully renamed the device")
except:
print("There was an error renaming the device")
else:
print("There is no other device available to switch over to. Aborting...")
def mainLoop():
global ignore
global savetime1
while True:
for device in midiports:
try:
msg = device["object"].poll()
if msg:
if msg.type == "note_on":
if msg.note != ignore:
midicallback(msg, device["id"], device["devicename"])
savetime1 = time.time()
if msg.type == "program_change":
if msg.program != ignore:
midicallback(msg, device["id"], device["devicename"])
savetime1 = time.time()
if msg.type == "control_change":
if msg.control != ignore:
midicallback(msg, device["id"], device["devicename"])
savetime1 = time.time()
if time.time() - savetime1 > 3:
savetime1 = time.time()
ignore = 255
except KeyboardInterrupt:
ScriptExit(0, 0)
break
if __name__ == "__main__":
print("MIDItoOBS made by https://github.com/lebaston100\n")
print("This setup assistant will guide you though the initial setup. If you experience any problems that you can not solve on your own feel free to open an issue on Github\n")
print("!!Important!!")
print("!!MAKE SURE OBS IS RUNNING OR THIS SCRIPT WILL CRASH!!")
print("!!MAKE SURE THAT THE MIDI DEVICE(S) ARE NOT IN USE BY ANOTHER APPLICATION!!\n")
signal.signal(signal.SIGINT, ScriptExit)
#search if config available and a device configuration is present
result = devdb.all()
if result:
print("Please select the number of what you want to do:\n1: Re-Setup the midi devices that are used.\n2: Leave the selected midi devices as-is and just edit button/fader assignment")
action_select = int(input("Select 1 or 2: "))
if action_select == 1:
configureDevices(1) #start device settings dialog because user choice
elif action_select == 2:
pass #leave configuration as is
else:
print("Invalid selection")
ScriptExit(0, 0)
else:
configureDevices(0) #start device settings dialog because nothing is set up yet
#the functions will return and we'll continue here
devices = devdb.all()
for device in devices: #gave up on documentation here
try:
tempmidiport = mido.open_input(device["devicename"])
tempobj = {"id": device.doc_id, "object": tempmidiport, "devicename": device["devicename"]}
midiports.append(tempobj)
except:
print("\nCould not open device", device["devicename"])
print("The midi device might be used by another application/not plugged in/have a different name.")
print("Please close the device in the other application/plug it in/edit the name in the config.json and restart this script.\n")
database.close()
sys.exit(5)
print("\nPlease press key or move fader/knob on midi controller")
mainLoop()
ScriptExit(0, 0)
| 51.258031 | 596 | 0.60911 |
4a1a177c88b14b37e2a0507a59274b5dd83b09c3
| 1,337 |
py
|
Python
|
cartpole/cp_r.py
|
Luca-Mueller/drl-transfer
|
6b6f3c4dc8540e42b42d25523f948ea9de4f4ed1
|
[
"MIT"
] | null | null | null |
cartpole/cp_r.py
|
Luca-Mueller/drl-transfer
|
6b6f3c4dc8540e42b42d25523f948ea9de4f4ed1
|
[
"MIT"
] | null | null | null |
cartpole/cp_r.py
|
Luca-Mueller/drl-transfer
|
6b6f3c4dc8540e42b42d25523f948ea9de4f4ed1
|
[
"MIT"
] | null | null | null |
import pickle
import sys
import numpy as np
from pathlib import Path
HISTORY = Path("history")
KEYS = ["buffer_transfer", "model_transfer", "double_transfer"]
def calc_r(default_score, transfer_score):
mean_default = np.mean(default_score, axis=0)
mean_transfer = np.mean(transfer_score, axis=0)
return (sum(mean_transfer) - sum(mean_default)) / sum(mean_transfer)
if __name__ == "__main__":
eps = sys.argv[1]
history = Path("history")
print("Area Ratio Scores")
with open("r_scores.txt", "w") as file:
for task in ("cp_v0", "cp_vL"):
for agent in ("DQN", "DDQN", "DQV"):
try:
filename = f"{task}_{agent}_{eps}eps_hist.pickle"
path = history / task / filename
with open(path, "rb") as f:
data = pickle.load(f)
print(f"\n{filename[:-12]}")
file.write(filename[:-12] + '\n')
for key in KEYS:
line = f"{key:<20}:{round(calc_r(data['default'], data[key]), 2)}"
print(line)
file.write(line + '\n')
file.write('\n')
except FileNotFoundError:
print(f"\nNo entry found for {task}_{agent}_{eps}eps")
| 31.833333 | 90 | 0.521316 |
4a1a17c51a82a81a9a3d0efacc75835d627e37d7
| 2,897 |
py
|
Python
|
test/orm/test_selectable.py
|
lambdanis/sqlalchemy
|
f94648bb922a73423d73f17c2148253fcc5893b9
|
[
"MIT"
] | 6 |
2019-02-18T12:42:44.000Z
|
2020-11-11T23:10:17.000Z
|
test/orm/test_selectable.py
|
KonstantinKlepikov/sqlalchemy-1
|
2c34d2503a17316cae3282192405b9b9d60df6fe
|
[
"MIT"
] | null | null | null |
test/orm/test_selectable.py
|
KonstantinKlepikov/sqlalchemy-1
|
2c34d2503a17316cae3282192405b9b9d60df6fe
|
[
"MIT"
] | 2 |
2021-06-12T01:38:00.000Z
|
2021-09-05T21:18:29.000Z
|
"""Generic mapping to Select statements"""
import sqlalchemy as sa
from sqlalchemy import column
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import mapper
from sqlalchemy.orm import Session
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
# TODO: more tests mapping to selects
class SelectableNoFromsTest(fixtures.MappedTest, AssertsCompiledSQL):
@classmethod
def define_tables(cls, metadata):
Table(
"common",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", Integer),
Column("extra", String(45)),
)
@classmethod
def setup_classes(cls):
class Subset(cls.Comparable):
pass
def test_no_tables(self):
Subset = self.classes.Subset
selectable = select([column("x"), column("y"), column("z")]).alias()
mapper(Subset, selectable, primary_key=[selectable.c.x])
self.assert_compile(
Session().query(Subset),
"SELECT anon_1.x AS anon_1_x, anon_1.y AS anon_1_y, "
"anon_1.z AS anon_1_z FROM (SELECT x, y, z) AS anon_1",
use_default_dialect=True,
)
def test_no_table_needs_pl(self):
Subset = self.classes.Subset
selectable = select([column("x"), column("y"), column("z")]).alias()
assert_raises_message(
sa.exc.ArgumentError,
"could not assemble any primary key columns",
mapper,
Subset,
selectable,
)
def test_no_selects(self):
Subset, common = self.classes.Subset, self.tables.common
subset_select = select([common.c.id, common.c.data])
assert_raises(sa.exc.ArgumentError, mapper, Subset, subset_select)
def test_basic(self):
Subset, common = self.classes.Subset, self.tables.common
subset_select = select([common.c.id, common.c.data]).alias()
mapper(Subset, subset_select)
sess = Session(bind=testing.db)
sess.add(Subset(data=1))
sess.flush()
sess.expunge_all()
eq_(sess.query(Subset).all(), [Subset(data=1)])
eq_(sess.query(Subset).filter(Subset.data == 1).one(), Subset(data=1))
eq_(sess.query(Subset).filter(Subset.data != 1).first(), None)
subset_select = sa.orm.class_mapper(Subset).persist_selectable
eq_(
sess.query(Subset).filter(subset_select.c.data == 1).one(),
Subset(data=1),
)
| 32.188889 | 78 | 0.646186 |
4a1a17d57daa4fc6f69a812aa82eedd50f320d3d
| 1,798 |
py
|
Python
|
utils/common_info.py
|
huaweiatlasTest/AtkEval
|
9d1f494e59f979c739ef6ec557a053dac44058e4
|
[
"BSD-3-Clause"
] | null | null | null |
utils/common_info.py
|
huaweiatlasTest/AtkEval
|
9d1f494e59f979c739ef6ec557a053dac44058e4
|
[
"BSD-3-Clause"
] | null | null | null |
utils/common_info.py
|
huaweiatlasTest/AtkEval
|
9d1f494e59f979c739ef6ec557a053dac44058e4
|
[
"BSD-3-Clause"
] | 1 |
2020-01-06T13:51:25.000Z
|
2020-01-06T13:51:25.000Z
|
import os
class CommonInfo(object):
'''class for temporary path info'''
__base = '.tmp'
__sub_model = '.tmp/sub_model'
__golden_data = '.tmp/golden_data'
__davinci_model = '.tmp/davinci_model'
__davinci_data = '.tmp/davinci_data'
__tf_black_list = {'Identity', 'Const', 'Reshape', 'Shape'}
__caffe_black_list = {'Reshape'}
@classmethod
def get_tmp_base_path(cls):
'''get the top temporary path'''
return os.path.abspath(cls.__base)
@classmethod
def get_sub_model_path(cls):
'''get the temporary caffe or tensorflow model path'''
return os.path.abspath(cls.__sub_model)
@classmethod
def get_golden_data_path(cls):
'''get the temporary golden data path'''
return os.path.abspath(cls.__golden_data)
@classmethod
def get_davinci_model_path(cls):
'''get the temporary davinci model path'''
return os.path.abspath(cls.__davinci_model)
@classmethod
def get_davinci_data_path(cls):
'''get the temporary davinci infer output data path'''
return os.path.abspath(cls.__davinci_data)
@classmethod
def get_tf_black_list(cls):
'''get the tensorflow ops which will be ignored'''
return cls.__tf_black_list
@classmethod
def get_caffe_black_list(cls):
'''get the caffe ops which will be ignored'''
return cls.__caffe_black_list
@classmethod
def is_skip_tf_op(cls, op):
'''skip the op in tensorflow black list'''
if op in cls.__tf_black_list:
return True
return False
@classmethod
def is_skip_caffe_layer(cls, layer):
'''skip the op in caffe black list'''
if layer in cls.__caffe_black_list:
return True
return False
| 29 | 63 | 0.650167 |
4a1a1823dd18d1a5cd6214e0f267997e74df93ed
| 27,204 |
py
|
Python
|
lib/sqlalchemy/orm/interfaces.py
|
paylogic/sqlalchemy
|
876a487bf06a038efde7d46ce09e253b9247aae5
|
[
"MIT"
] | 2 |
2015-11-07T11:55:45.000Z
|
2017-09-04T07:56:34.000Z
|
lib/sqlalchemy/orm/interfaces.py
|
mitsuhiko/sqlalchemy
|
5a6895471fb6bf9afe9bdf017f1fa2c6246ae303
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/orm/interfaces.py
|
mitsuhiko/sqlalchemy
|
5a6895471fb6bf9afe9bdf017f1fa2c6246ae303
|
[
"MIT"
] | null | null | null |
# orm/interfaces.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Contains various base classes used throughout the ORM.
Defines the now deprecated ORM extension classes as well
as ORM internals.
Other than the deprecated extensions, this module and the
classes within should be considered mostly private.
"""
from __future__ import absolute_import
from .. import exc as sa_exc, util, inspect
from ..sql import operators
from collections import deque
from .base import _is_aliased_class, _class_to_mapper
from .base import ONETOMANY, MANYTOONE, MANYTOMANY, EXT_CONTINUE, EXT_STOP, NOT_EXTENSION
from .base import _InspectionAttr, _MappedAttribute
from .path_registry import PathRegistry
import collections
__all__ = (
'AttributeExtension',
'EXT_CONTINUE',
'EXT_STOP',
'ExtensionOption',
'InstrumentationManager',
'LoaderStrategy',
'MapperExtension',
'MapperOption',
'MapperProperty',
'PropComparator',
'PropertyOption',
'SessionExtension',
'StrategizedOption',
'StrategizedProperty',
)
class MapperProperty(_MappedAttribute, _InspectionAttr):
"""Manage the relationship of a ``Mapper`` to a single class
attribute, as well as that attribute as it appears on individual
instances of the class, including attribute instrumentation,
attribute access, loading behavior, and dependency calculations.
The most common occurrences of :class:`.MapperProperty` are the
mapped :class:`.Column`, which is represented in a mapping as
an instance of :class:`.ColumnProperty`,
and a reference to another class produced by :func:`.relationship`,
represented in the mapping as an instance of
:class:`.RelationshipProperty`.
"""
cascade = frozenset()
"""The set of 'cascade' attribute names.
This collection is checked before the 'cascade_iterator' method is called.
"""
is_property = True
def setup(self, context, entity, path, adapter, **kwargs):
"""Called by Query for the purposes of constructing a SQL statement.
Each MapperProperty associated with the target mapper processes the
statement referenced by the query context, adding columns and/or
criterion as appropriate.
"""
pass
def create_row_processor(self, context, path,
mapper, row, adapter):
"""Return a 3-tuple consisting of three row processing functions.
"""
return None, None, None
def cascade_iterator(self, type_, state, visited_instances=None,
halt_on=None):
"""Iterate through instances related to the given instance for
a particular 'cascade', starting with this MapperProperty.
Return an iterator3-tuples (instance, mapper, state).
Note that the 'cascade' collection on this MapperProperty is
checked first for the given type before cascade_iterator is called.
See PropertyLoader for the related instance implementation.
"""
return iter(())
def set_parent(self, parent, init):
self.parent = parent
def instrument_class(self, mapper): # pragma: no-coverage
raise NotImplementedError()
@util.memoized_property
def info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.MapperProperty`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`.relationship`, or :func:`.composite`
functions.
.. versionadded:: 0.8 Added support for .info to all
:class:`.MapperProperty` subclasses.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
_configure_started = False
_configure_finished = False
def init(self):
"""Called after all mappers are created to assemble
relationships between mappers and perform other post-mapper-creation
initialization steps.
"""
self._configure_started = True
self.do_init()
self._configure_finished = True
@property
def class_attribute(self):
"""Return the class-bound descriptor corresponding to this
:class:`.MapperProperty`.
This is basically a ``getattr()`` call::
return getattr(self.parent.class_, self.key)
I.e. if this :class:`.MapperProperty` were named ``addresses``,
and the class to which it is mapped is ``User``, this sequence
is possible::
>>> from sqlalchemy import inspect
>>> mapper = inspect(User)
>>> addresses_property = mapper.attrs.addresses
>>> addresses_property.class_attribute is User.addresses
True
>>> User.addresses.property is addresses_property
True
"""
return getattr(self.parent.class_, self.key)
def do_init(self):
"""Perform subclass-specific initialization post-mapper-creation
steps.
This is a template method called by the ``MapperProperty``
object's init() method.
"""
pass
def post_instrument_class(self, mapper):
"""Perform instrumentation adjustments that need to occur
after init() has completed.
"""
pass
def is_primary(self):
"""Return True if this ``MapperProperty``'s mapper is the
primary mapper for its class.
This flag is used to indicate that the ``MapperProperty`` can
define attribute instrumentation for the class at the class
level (as opposed to the individual instance level).
"""
return not self.parent.non_primary
def merge(self, session, source_state, source_dict, dest_state,
dest_dict, load, _recursive):
"""Merge the attribute represented by this ``MapperProperty``
from source to destination object"""
pass
def compare(self, operator, value, **kw):
"""Return a compare operation for the columns represented by
this ``MapperProperty`` to the given value, which may be a
column value or an instance. 'operator' is an operator from
the operators module, or from sql.Comparator.
By default uses the PropComparator attached to this MapperProperty
under the attribute name "comparator".
"""
return operator(self.comparator, value)
def __repr__(self):
return '<%s at 0x%x; %s>' % (
self.__class__.__name__,
id(self), getattr(self, 'key', 'no key'))
class PropComparator(operators.ColumnOperators):
"""Defines boolean, comparison, and other operators for
:class:`.MapperProperty` objects.
SQLAlchemy allows for operators to
be redefined at both the Core and ORM level. :class:`.PropComparator`
is the base class of operator redefinition for ORM-level operations,
including those of :class:`.ColumnProperty`,
:class:`.RelationshipProperty`, and :class:`.CompositeProperty`.
.. note:: With the advent of Hybrid properties introduced in SQLAlchemy
0.7, as well as Core-level operator redefinition in
SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator`
instances is extremely rare. See :ref:`hybrids_toplevel` as well
as :ref:`types_operators`.
User-defined subclasses of :class:`.PropComparator` may be created. The
built-in Python comparison and math operator methods, such as
:meth:`.operators.ColumnOperators.__eq__`,
:meth:`.operators.ColumnOperators.__lt__`, and
:meth:`.operators.ColumnOperators.__add__`, can be overridden to provide
new operator behavior. The custom :class:`.PropComparator` is passed to
the :class:`.MapperProperty` instance via the ``comparator_factory``
argument. In each case,
the appropriate subclass of :class:`.PropComparator` should be used::
# definition of custom PropComparator subclasses
from sqlalchemy.orm.properties import \\
ColumnProperty,\\
CompositeProperty,\\
RelationshipProperty
class MyColumnComparator(ColumnProperty.Comparator):
def __eq__(self, other):
return self.__clause_element__() == other
class MyRelationshipComparator(RelationshipProperty.Comparator):
def any(self, expression):
"define the 'any' operation"
# ...
class MyCompositeComparator(CompositeProperty.Comparator):
def __gt__(self, other):
"redefine the 'greater than' operation"
return sql.and_(*[a>b for a, b in
zip(self.__clause_element__().clauses,
other.__composite_values__())])
# application of custom PropComparator subclasses
from sqlalchemy.orm import column_property, relationship, composite
from sqlalchemy import Column, String
class SomeMappedClass(Base):
some_column = column_property(Column("some_column", String),
comparator_factory=MyColumnComparator)
some_relationship = relationship(SomeOtherClass,
comparator_factory=MyRelationshipComparator)
some_composite = composite(
Column("a", String), Column("b", String),
comparator_factory=MyCompositeComparator
)
Note that for column-level operator redefinition, it's usually
simpler to define the operators at the Core level, using the
:attr:`.TypeEngine.comparator_factory` attribute. See
:ref:`types_operators` for more detail.
See also:
:class:`.ColumnProperty.Comparator`
:class:`.RelationshipProperty.Comparator`
:class:`.CompositeProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
def __init__(self, prop, parentmapper, adapt_to_entity=None):
self.prop = self.property = prop
self._parentmapper = parentmapper
self._adapt_to_entity = adapt_to_entity
def __clause_element__(self):
raise NotImplementedError("%r" % self)
def adapt_to_entity(self, adapt_to_entity):
"""Return a copy of this PropComparator which will use the given
:class:`.AliasedInsp` to produce corresponding expressions.
"""
return self.__class__(self.prop, self._parentmapper, adapt_to_entity)
@property
def adapter(self):
"""Produce a callable that adapts column expressions
to suit an aliased version of this comparator.
"""
if self._adapt_to_entity is None:
return None
else:
return self._adapt_to_entity._adapt_element
@util.memoized_property
def info(self):
return self.property.info
@staticmethod
def any_op(a, b, **kwargs):
return a.any(b, **kwargs)
@staticmethod
def has_op(a, b, **kwargs):
return a.has(b, **kwargs)
@staticmethod
def of_type_op(a, class_):
return a.of_type(class_)
def of_type(self, class_):
"""Redefine this object in terms of a polymorphic subclass.
Returns a new PropComparator from which further criterion can be
evaluated.
e.g.::
query.join(Company.employees.of_type(Engineer)).\\
filter(Engineer.name=='foo')
:param \class_: a class or mapper indicating that criterion will be
against this specific subclass.
"""
return self.operate(PropComparator.of_type_op, class_)
def any(self, criterion=None, **kwargs):
"""Return true if this collection contains any member that meets the
given criterion.
The usual implementation of ``any()`` is
:meth:`.RelationshipProperty.Comparator.any`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.any_op, criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Return true if this element references a member which meets the
given criterion.
The usual implementation of ``has()`` is
:meth:`.RelationshipProperty.Comparator.has`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.has_op, criterion, **kwargs)
class StrategizedProperty(MapperProperty):
"""A MapperProperty which uses selectable strategies to affect
loading behavior.
There is a single strategy selected by default. Alternate
strategies can be selected at Query time through the usage of
``StrategizedOption`` objects via the Query.options() method.
"""
strategy_wildcard_key = None
@util.memoized_property
def _wildcard_path(self):
if self.strategy_wildcard_key:
return ('loaderstrategy', (self.strategy_wildcard_key,))
else:
return None
def _get_context_strategy(self, context, path):
strategy_cls = path._inlined_get_for(self, context, 'loaderstrategy')
if not strategy_cls:
wc_key = self._wildcard_path
if wc_key and wc_key in context.attributes:
strategy_cls = context.attributes[wc_key]
if strategy_cls:
try:
return self._strategies[strategy_cls]
except KeyError:
return self.__init_strategy(strategy_cls)
return self.strategy
def _get_strategy(self, cls):
try:
return self._strategies[cls]
except KeyError:
return self.__init_strategy(cls)
def __init_strategy(self, cls):
self._strategies[cls] = strategy = cls(self)
return strategy
def setup(self, context, entity, path, adapter, **kwargs):
self._get_context_strategy(context, path).\
setup_query(context, entity, path,
adapter, **kwargs)
def create_row_processor(self, context, path, mapper, row, adapter):
return self._get_context_strategy(context, path).\
create_row_processor(context, path,
mapper, row, adapter)
def do_init(self):
self._strategies = {}
self.strategy = self.__init_strategy(self.strategy_class)
def post_instrument_class(self, mapper):
if self.is_primary() and \
not mapper.class_manager._attr_has_impl(self.key):
self.strategy.init_class_attribute(mapper)
_strategies = collections.defaultdict(dict)
@classmethod
def _strategy_for(cls, *keys):
def decorate(dec_cls):
for key in keys:
key = tuple(sorted(key.items()))
cls._strategies[cls][key] = dec_cls
return dec_cls
return decorate
@classmethod
def _strategy_lookup(cls, **kw):
key = tuple(sorted(kw.items()))
for prop_cls in cls.__mro__:
if prop_cls in cls._strategies:
strategies = cls._strategies[prop_cls]
try:
return strategies[key]
except KeyError:
pass
raise Exception("can't locate strategy for %s %s" % (cls, kw))
class MapperOption(object):
"""Describe a modification to a Query."""
propagate_to_loaders = False
"""if True, indicate this option should be carried along
Query object generated by scalar or object lazy loaders.
"""
def process_query(self, query):
pass
def process_query_conditionally(self, query):
"""same as process_query(), except that this option may not
apply to the given query.
Used when secondary loaders resend existing options to a new
Query."""
self.process_query(query)
class PropertyOption(MapperOption):
"""A MapperOption that is applied to a property off the mapper or
one of its child mappers, identified by a dot-separated key
or list of class-bound attributes. """
def __init__(self, key, mapper=None):
self.key = key
self.mapper = mapper
def process_query(self, query):
self._process(query, True)
def process_query_conditionally(self, query):
self._process(query, False)
def _process(self, query, raiseerr):
paths = self._process_paths(query, raiseerr)
if paths:
self.process_query_property(query, paths)
def process_query_property(self, query, paths):
pass
def __getstate__(self):
d = self.__dict__.copy()
d['key'] = ret = []
for token in util.to_list(self.key):
if isinstance(token, PropComparator):
ret.append((token._parentmapper.class_, token.key))
else:
ret.append(token)
return d
def __setstate__(self, state):
ret = []
for key in state['key']:
if isinstance(key, tuple):
cls, propkey = key
ret.append(getattr(cls, propkey))
else:
ret.append(key)
state['key'] = tuple(ret)
self.__dict__ = state
def _find_entity_prop_comparator(self, query, token, mapper, raiseerr):
if _is_aliased_class(mapper):
searchfor = mapper
else:
searchfor = _class_to_mapper(mapper)
for ent in query._mapper_entities:
if ent.corresponds_to(searchfor):
return ent
else:
if raiseerr:
if not list(query._mapper_entities):
raise sa_exc.ArgumentError(
"Query has only expression-based entities - "
"can't find property named '%s'."
% (token, )
)
else:
raise sa_exc.ArgumentError(
"Can't find property '%s' on any entity "
"specified in this Query. Note the full path "
"from root (%s) to target entity must be specified."
% (token, ",".join(str(x) for
x in query._mapper_entities))
)
else:
return None
def _find_entity_basestring(self, query, token, raiseerr):
for ent in query._mapper_entities:
# return only the first _MapperEntity when searching
# based on string prop name. Ideally object
# attributes are used to specify more exactly.
return ent
else:
if raiseerr:
raise sa_exc.ArgumentError(
"Query has only expression-based entities - "
"can't find property named '%s'."
% (token, )
)
else:
return None
@util.dependencies("sqlalchemy.orm.util")
def _process_paths(self, orm_util, query, raiseerr):
"""reconcile the 'key' for this PropertyOption with
the current path and entities of the query.
Return a list of affected paths.
"""
path = PathRegistry.root
entity = None
paths = []
no_result = []
# _current_path implies we're in a
# secondary load with an existing path
current_path = list(query._current_path.path)
tokens = deque(self.key)
while tokens:
token = tokens.popleft()
if isinstance(token, str):
# wildcard token
if token.endswith(':*'):
return [path.token(token)]
sub_tokens = token.split(".", 1)
token = sub_tokens[0]
tokens.extendleft(sub_tokens[1:])
# exhaust current_path before
# matching tokens to entities
if current_path:
if current_path[1].key == token:
current_path = current_path[2:]
continue
else:
return no_result
if not entity:
entity = self._find_entity_basestring(
query,
token,
raiseerr)
if entity is None:
return no_result
path_element = entity.entity_zero
mapper = entity.mapper
if hasattr(mapper.class_, token):
prop = getattr(mapper.class_, token).property
else:
if raiseerr:
raise sa_exc.ArgumentError(
"Can't find property named '%s' on the "
"mapped entity %s in this Query. " % (
token, mapper)
)
else:
return no_result
elif isinstance(token, PropComparator):
prop = token.property
# exhaust current_path before
# matching tokens to entities
if current_path:
if current_path[0:2] == \
[token._parententity, prop]:
current_path = current_path[2:]
continue
else:
return no_result
if not entity:
entity = self._find_entity_prop_comparator(
query,
prop.key,
token._parententity,
raiseerr)
if not entity:
return no_result
path_element = entity.entity_zero
mapper = entity.mapper
else:
raise sa_exc.ArgumentError(
"mapper option expects "
"string key or list of attributes")
assert prop is not None
if raiseerr and not prop.parent.common_parent(mapper):
raise sa_exc.ArgumentError("Attribute '%s' does not "
"link from element '%s'" % (token, path_element))
path = path[path_element][prop]
paths.append(path)
if getattr(token, '_of_type', None):
ac = token._of_type
ext_info = inspect(ac)
path_element = mapper = ext_info.mapper
if not ext_info.is_aliased_class:
ac = orm_util.with_polymorphic(
ext_info.mapper.base_mapper,
ext_info.mapper, aliased=True,
_use_mapper_path=True)
ext_info = inspect(ac)
path.set(query._attributes, "path_with_polymorphic", ext_info)
else:
path_element = mapper = getattr(prop, 'mapper', None)
if mapper is None and tokens:
raise sa_exc.ArgumentError(
"Attribute '%s' of entity '%s' does not "
"refer to a mapped entity" %
(token, entity)
)
if current_path:
# ran out of tokens before
# current_path was exhausted.
assert not tokens
return no_result
return paths
class StrategizedOption(PropertyOption):
"""A MapperOption that affects which LoaderStrategy will be used
for an operation by a StrategizedProperty.
"""
chained = False
def process_query_property(self, query, paths):
strategy = self.get_strategy_class()
if self.chained:
for path in paths:
path.set(
query._attributes,
"loaderstrategy",
strategy
)
else:
paths[-1].set(
query._attributes,
"loaderstrategy",
strategy
)
def get_strategy_class(self):
raise NotImplementedError()
class LoaderStrategy(object):
"""Describe the loading behavior of a StrategizedProperty object.
The ``LoaderStrategy`` interacts with the querying process in three
ways:
* it controls the configuration of the ``InstrumentedAttribute``
placed on a class to handle the behavior of the attribute. this
may involve setting up class-level callable functions to fire
off a select operation when the attribute is first accessed
(i.e. a lazy load)
* it processes the ``QueryContext`` at statement construction time,
where it can modify the SQL statement that is being produced.
Simple column attributes may add their represented column to the
list of selected columns, *eager loading* properties may add
``LEFT OUTER JOIN`` clauses to the statement.
* It produces "row processor" functions at result fetching time.
These "row processor" functions populate a particular attribute
on a particular mapped instance.
"""
def __init__(self, parent):
self.parent_property = parent
self.is_class_level = False
self.parent = self.parent_property.parent
self.key = self.parent_property.key
def init_class_attribute(self, mapper):
pass
def setup_query(self, context, entity, path, adapter, **kwargs):
pass
def create_row_processor(self, context, path, mapper,
row, adapter):
"""Return row processing functions which fulfill the contract
specified by MapperProperty.create_row_processor.
StrategizedProperty delegates its create_row_processor method
directly to this method. """
return None, None, None
def __str__(self):
return str(self.parent_property)
| 33.751861 | 89 | 0.597192 |
4a1a18620178fca8f329b0dcfaf1eeea87cc5027
| 2,268 |
py
|
Python
|
2017/Day1-2.py
|
josephgruber/AoC
|
8b103ab9f4ac396b7a0ccf04667443a43bef704d
|
[
"MIT"
] | null | null | null |
2017/Day1-2.py
|
josephgruber/AoC
|
8b103ab9f4ac396b7a0ccf04667443a43bef704d
|
[
"MIT"
] | null | null | null |
2017/Day1-2.py
|
josephgruber/AoC
|
8b103ab9f4ac396b7a0ccf04667443a43bef704d
|
[
"MIT"
] | null | null | null |
inputData = '3294199471327195994824832197564859876682638188889768298894243832665654681412886862234525991553276578641265589959178414218389329361496673991614673626344552179413995562266818138372393213966143124914469397692587251112663217862879233226763533911128893354536353213847122251463857894159819828724827969576432191847787772732881266875469721189331882228146576832921314638221317393256471998598117289632684663355273845983933845721713497811766995367795857965222183668765517454263354111134841334631345111596131682726196574763165187889337599583345634413436165539744188866156771585647718555182529936669683581662398618765391487164715724849894563314426959348119286955144439452731762666568741612153254469131724137699832984728937865956711925592628456617133695259554548719328229938621332325125972547181236812263887375866231118312954369432937359357266467383318326239572877314765121844831126178173988799765218913178825966268816476559792947359956859989228917136267178571776316345292573489873792149646548747995389669692188457724414468727192819919448275922166321158141365237545222633688372891451842434458527698774342111482498999383831492577615154591278719656798277377363284379468757998373193231795767644654155432692988651312845433511879457921638934877557575241394363721667237778962455961493559848522582413748218971212486373232795878362964873855994697149692824917183375545192119453587398199912564474614219929345185468661129966379693813498542474732198176496694746111576925715493967296487258237854152382365579876894391815759815373319159213475555251488754279888245492373595471189191353244684697662848376529881512529221627313527441221459672786923145165989611223372241149929436247374818467481641931872972582295425936998535194423916544367799522276914445231582272368388831834437562752119325286474352863554693373718848649568451797751926315617575295381964426843625282819524747119726872193569785611959896776143539915299968276374712996485367853494734376257511273443736433464496287219615697341973131715166768916149828396454638596713572963686159214116763'
halfway = int(len(inputData)/2)
sumTotal = 0
for index, digit in enumerate(inputData):
if index >= halfway:
pos = index - halfway
else:
pos = index + halfway
if digit == inputData[pos]:
sumTotal += int(digit)
print(sumTotal)
| 151.2 | 2,028 | 0.962963 |
4a1a18bcd9d83d8b6360a5d75f79158fd5411607
| 1,764 |
py
|
Python
|
src/FFEAT/ffeat/pso/neighborhood/Nearest.py
|
PatrikValkovic/MasterThesis
|
6e9f3b186541db6c8395ebc96ace7289d01c805b
|
[
"MIT"
] | null | null | null |
src/FFEAT/ffeat/pso/neighborhood/Nearest.py
|
PatrikValkovic/MasterThesis
|
6e9f3b186541db6c8395ebc96ace7289d01c805b
|
[
"MIT"
] | null | null | null |
src/FFEAT/ffeat/pso/neighborhood/Nearest.py
|
PatrikValkovic/MasterThesis
|
6e9f3b186541db6c8395ebc96ace7289d01c805b
|
[
"MIT"
] | null | null | null |
###############################
#
# Created by Patrik Valkovic
# 3/18/2021
#
###############################
from typing import Union, Callable
import torch as t
from .Neighborhood import Neighborhood
_IFU = Union[float, int]
class Nearest(Neighborhood):
"""
Closest neighbors neighborhood. This neighborhood is very costly.
"""
def __init__(self,
size: Union[_IFU, Callable[..., _IFU]],
norm: int = 2):
"""
Closest neighbors neighborhood. This neighborhood is very costly.
:param size: Size of the neighborhood. May be float (then it is fraction of the original population to select),
or integer (then it is number of individuals to select).
:param norm: Which p-norm to use. By default euclidean norm norm=2.
"""
self._size = self._handle_parameter(size)
self._norm = norm
def __call__(self, fitnesses, position, **kwargs) -> t.Tensor:
"""
Creates closest neighbors neighborhood and returns it. This operation is very costly.
:param fitnesses: Current particles' fitness.
:param position: Current particle's positions.
:param kwargs: Keyword arguments.
:return: Tensor of indices assigning each particle its neighborhood.
"""
pop_size = len(fitnesses)
size = self._handle_size(self._size(fitnesses, position, **kwargs), pop_size)
distances = t.subtract(position[:,None,:], position[None,:,:])
distances = t.abs(distances, out=distances)
distances = t.pow(distances, self._norm, out=distances)
distances = t.sum(distances, dim=list(range(2, len(position.shape)+1)))
min = t.argsort(distances, dim=-1)[:,:size]
return min
| 38.347826 | 119 | 0.62415 |
4a1a19bf6ba34c6cfbcb836d8e4b4aac818c7c3d
| 4,153 |
py
|
Python
|
py3-old/generateData.py
|
adrianhindes/cavity-sml
|
ad8905bdecfb9f106f6149da4edf31eb04ac9630
|
[
"MIT"
] | 1 |
2017-05-01T10:29:22.000Z
|
2017-05-01T10:29:22.000Z
|
py3-old/generateData.py
|
adrianhindes/cavity-sml
|
ad8905bdecfb9f106f6149da4edf31eb04ac9630
|
[
"MIT"
] | null | null | null |
py3-old/generateData.py
|
adrianhindes/cavity-sml
|
ad8905bdecfb9f106f6149da4edf31eb04ac9630
|
[
"MIT"
] | null | null | null |
# Take raw Finesse data
# Transform through rotations, shears, rescalings and flips to
# generate training and validation data sets
from keras.preprocessing.image import ImageDataGenerator, array_to_img, \
img_to_array, load_img
from skimage import util, io
from PIL import Image
import os
from shutil import copyfile
# rotation range randomly rotates pictures (0-180)
# width/height_shift ranges (frac of total w/h) to randomly translate
# rescale multiplies data before other processing
# shear_range randomly applies shearing transforms
# zoom_range randomly zooms inside pictures
# horizontal flip does what it says
# fill mode fills newly created pixels, can appear after transforms
trainDatagen = ImageDataGenerator(
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
testDatagen = ImageDataGenerator(
rotation_range=25,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.3,
zoom_range=0.1,
horizontal_flip=True,
fill_mode='nearest')
#
#
maxMode = 6
dataFolder = 'rawData/'
ext = '.png'
os.chdir('rawData')
files = os.listdir()
imageList = [x for x in files if '.png' in x]
os.chdir('..')
possibleModes = [str(m)+str(n) for m in range(maxMode) for n in range(maxMode)]
# Create file structure if it does not exist
if not os.path.exists('newData'): os.mkdir('newData')
os.chdir('newData')
for mode in possibleModes:
if not os.path.exists(mode): os.mkdir(mode)
os.chdir('..')
if not os.path.exists('training'): os.mkdir('training')
os.chdir('training')
for mode in possibleModes:
if not os.path.exists(mode): os.mkdir(mode)
os.chdir('..')
# Generators which read pictures in subfolders of training and validation,
# indefinitely generate batches of augmented cavity images
print('Generating training data set')
for folder in possibleModes:
filteredImages = [x for x in files if 'cavity'+folder in x]
for image in filteredImages:
loaded = load_img('rawData/'+image)
array = img_to_array(loaded)
array = array.reshape((1,)+array.shape)
i = 0
for batch in trainDatagen.flow(array, batch_size=1, save_to_dir='training/'+folder,
save_prefix=folder, save_format='png'):
i += 1
if i > 10: break
print('generated '+image+' set')
print('Generating validation data set')
for folder in possibleModes:
filteredImages = [x for x in files if 'cavity'+folder in x]
for image in filteredImages:
loaded = load_img('rawData/'+image)
array = img_to_array(loaded)
array = array.reshape((1,)+array.shape)
i = 0
for batch in testDatagen.flow(array, batch_size=1, save_to_dir='validation/'+folder,
save_prefix=folder, save_format='png'):
i += 1
if i > 10: break
print('generated '+image+' set')
#Noisify folders
for folder in possibleModes:
imagesTrain = os.listdir('training/'+folder)
os.chdir('training/'+folder)
for image in imagesTrain:
loaded = io.imread(image)
noisy = util.random_noise(loaded, mode='gaussian', clip=True)
io.imsave(image, noisy)
os.chdir('../../')
imagesValidate = os.listdir('validation/'+folder)
os.chdir('validation/'+folder)
for image in imagesValidate:
loaded = io.imread(image)
noisy = util.random_noise(loaded, mode='gaussian', clip=True)
io.imsave(image, noisy)
os.chdir('../../')
print('generated noise for '+folder)
# for noiseNum in range(2):
# noisyName = (image.rstrip(ext))+'-'+str(noiseNum)+'.png'
# noisyCavity1 = util.random_noise(cavityImage, mode='gaussian', clip=True, seed=(noiseNum+1))
# noisyCavity2 = util.random_noise(cavityImage, mode='gaussian', clip=True, seed=(noiseNum+5))
# io.imsave('training/'+mode+'/'+noisyName, noisyCavity1)
# io.imsave('validation/'+mode+'/'+noisyName, noisyCavity2)
| 33.764228 | 105 | 0.660005 |
4a1a1a42b0c689f209aae28c3895be22830275f4
| 1,511 |
py
|
Python
|
em.py
|
Fatman13/gta_swarm
|
1c4603f39cd7831f5907fd619594452b3320f75f
|
[
"MIT"
] | null | null | null |
em.py
|
Fatman13/gta_swarm
|
1c4603f39cd7831f5907fd619594452b3320f75f
|
[
"MIT"
] | null | null | null |
em.py
|
Fatman13/gta_swarm
|
1c4603f39cd7831f5907fd619594452b3320f75f
|
[
"MIT"
] | null | null | null |
# down vote
def send_email(user, pwd, recipient, subject, body):
import smtplib
gmail_user = user
gmail_pwd = pwd
FROM = user
TO = recipient if type(recipient) is list else [recipient]
SUBJECT = subject
TEXT = body
# Prepare actual message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
# try:
# server = smtplib.SMTP("smtp.gmail.com", 587)
# server.ehlo()
# server.starttls()
# server.login(gmail_user, gmail_pwd)
# server.sendmail(FROM, TO, message)
# server.close()
# print('successfully sent the mail')
# except:
# print("failed to send mail")
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(gmail_user, gmail_pwd)
server.sendmail(FROM, TO, message)
server.close()
print('successfully sent the mail')
print("failed to send mail")
send_email('tctctcly@gmail.com', 'Lyulyu!13', 'yu.leng@gta-travel.com', 'Test from pyhton', 'TEST, TEST, TEST.')
# # SMTP_SSL Example
# server_ssl = smtplib.SMTP_SSL("smtp.gmail.com", 465)
# server_ssl.ehlo() # optional, called by login()
# server_ssl.login('tctctcly@gmail.com', 'Lyulyu!13')
# # ssl server doesn't support or need tls, so don't call server_ssl.starttls()
# server_ssl.sendmail(FROM, TO, message)
# #server_ssl.quit()
# server_ssl.close()
# print 'successfully sent the mail'
| 32.847826 | 113 | 0.622105 |
4a1a1ba82cba5b4e37e55fa50cb7dbbf9f7489e4
| 5,631 |
py
|
Python
|
ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py
|
fangxingli/mambari
|
6da9f6090d4d42623529b73413c8feb8b7f6fe45
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py
|
fangxingli/mambari
|
6da9f6090d4d42623529b73413c8feb8b7f6fe45
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py
|
fangxingli/mambari
|
6da9f6090d4d42623529b73413c8feb8b7f6fe45
|
[
"Apache-2.0",
"MIT"
] | 3 |
2017-10-31T11:42:31.000Z
|
2021-04-26T07:17:53.000Z
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import logging
from unittest import TestCase
from mock.mock import Mock, MagicMock, patch
from resource_management.libraries.functions import dfs_datanode_helper
from resource_management.core.logger import Logger
class StubParams(object):
"""
Dummy class to fake params where params.x performs a get on params.dict["x"]
"""
def __init__(self):
self.dict = {}
def __getattr__(self, name):
return self.dict[name]
def __repr__(self):
name = self.__class__.__name__
mocks = set(dir(self))
mocks = [x for x in mocks if not str(x).startswith("__")] # Exclude private methods
return "<StubParams: {0}; mocks: {1}>".format(name, str(mocks))
def fake_create_dir(directory, other):
"""
Fake function used as function pointer.
"""
print "Fake function to create directory {0}".format(directory)
class TestDatanodeHelper(TestCase):
"""
Test the functionality of the dfs_datanode_helper.py
"""
logger = logging.getLogger('TestDatanodeHelper')
grid0 = "/grid/0/data"
grid1 = "/grid/1/data"
grid2 = "/grid/2/data"
params = StubParams()
params.data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
params.dfs_data_dir = "{0},{1},{2}".format(grid0, grid1, grid2)
@patch.object(Logger, "info")
@patch.object(Logger, "error")
def test_normalized(self, log_error, log_info):
"""
Test that the data dirs are normalized by removing leading and trailing whitespace, and case sensitive.
"""
params = StubParams()
params.data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
params.dfs_data_dir = "/grid/0/data , /grid/1/data ,/GRID/2/Data/"
# Function under test
dfs_datanode_helper.handle_dfs_data_dir(fake_create_dir, params, update_cache=False)
for (name, args, kwargs) in log_info.mock_calls:
print args[0]
for (name, args, kwargs) in log_error.mock_calls:
print args[0]
log_info.assert_any_call("Forcefully creating directory: /grid/0/data")
log_info.assert_any_call("Forcefully creating directory: /grid/1/data")
log_info.assert_any_call("Forcefully creating directory: /GRID/2/Data/")
self.assertEquals(0, log_error.call_count)
@patch.object(Logger, "info")
@patch.object(Logger, "error")
@patch.object(dfs_datanode_helper, "get_data_dir_to_mount_from_file")
@patch.object(dfs_datanode_helper, "get_mount_point_for_dir")
@patch.object(os.path, "isdir")
@patch.object(os.path, "exists")
def test_grid_becomes_unmounted(self, mock_os_exists, mock_os_isdir, mock_get_mount_point, mock_get_data_dir_to_mount_from_file, log_error, log_info):
"""
Test when grid2 becomes unmounted
"""
mock_os_exists.return_value = True # Indicate that history file exists
# Initially, all grids were mounted
mock_get_data_dir_to_mount_from_file.return_value = {self.grid0: "/dev0", self.grid1: "/dev1", self.grid2: "/dev2"}
# Grid2 then becomes unmounted
mock_get_mount_point.side_effect = ["/dev0", "/dev1", "/"] * 2
mock_os_isdir.side_effect = [False, False, False] + [True, True, True]
# Function under test
dfs_datanode_helper.handle_dfs_data_dir(fake_create_dir, self.params, update_cache=False)
for (name, args, kwargs) in log_info.mock_calls:
print args[0]
error_logs = []
for (name, args, kwargs) in log_error.mock_calls:
error_logs.append(args[0]) # this is a one-tuple
error_msg = "".join(error_logs)
self.assertEquals(1, log_error.call_count)
self.assertTrue("Directory /grid/2/data does not exist and became unmounted from /dev2" in error_msg)
@patch.object(Logger, "info")
@patch.object(Logger, "error")
@patch.object(dfs_datanode_helper, "get_data_dir_to_mount_from_file")
@patch.object(dfs_datanode_helper, "get_mount_point_for_dir")
@patch.object(os.path, "isdir")
@patch.object(os.path, "exists")
def test_grid_becomes_remounted(self, mock_os_exists, mock_os_isdir, mock_get_mount_point, mock_get_data_dir_to_mount_from_file, log_error, log_info):
"""
Test when grid2 becomes remounted
"""
mock_os_exists.return_value = True # Indicate that history file exists
# Initially, all grids were mounted
mock_get_data_dir_to_mount_from_file.return_value = {self.grid0: "/dev0", self.grid1: "/dev1", self.grid2: "/"}
# Grid2 then becomes remounted
mock_get_mount_point.side_effect = ["/dev0", "/dev1", "/dev2"] * 2
mock_os_isdir.side_effect = [False, False, False] + [True, True, True]
# Function under test
dfs_datanode_helper.handle_dfs_data_dir(fake_create_dir, self.params, update_cache=False)
for (name, args, kwargs) in log_info.mock_calls:
print args[0]
for (name, args, kwargs) in log_error.mock_calls:
print args[0]
self.assertEquals(0, log_error.call_count)
| 37.046053 | 152 | 0.731486 |
4a1a1bbb7ccf7af970e07d65191c2502a68fe265
| 2,525 |
py
|
Python
|
2019/fourier_xmas/xmas.py
|
jmenzelupb/MSMP-xmas
|
856a914de5699347ba17716edb8c02842651de24
|
[
"Unlicense"
] | 1 |
2018-01-09T09:39:20.000Z
|
2018-01-09T09:39:20.000Z
|
2019/fourier_xmas/xmas.py
|
jmenzelupb/MSMP-xmas
|
856a914de5699347ba17716edb8c02842651de24
|
[
"Unlicense"
] | null | null | null |
2019/fourier_xmas/xmas.py
|
jmenzelupb/MSMP-xmas
|
856a914de5699347ba17716edb8c02842651de24
|
[
"Unlicense"
] | 8 |
2017-12-20T13:50:49.000Z
|
2019-12-16T10:18:36.000Z
|
#MSMP Christmas Challenge, entry by Johannes Menzel
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
Box = [0.0, 0.0 + 3.5j, 11.0 + 3.5j, 11.0, 0.0]
X=[0.0, -1.0 + -1.0j, 1.0 + 1.0j, 0.0, -1.0 + 1.0j, 1.0 + -1.0j]
M=[-1.0 + -1.0j, -1.0 + 1.0j, 0.0, 1.0 + 1.0j, 1.0 + -1.0j]
A=[-0.6 + -1.0j, 1.0j,0.6 + -1.0j,-0.8 + -0.1j, 0.8 + -0.1j]
S=[0.0 + 1.0j,0.5 + 0.7j, 0.0 + 1.0j, -0.5 + 0.7j, 0.5 + -0.7j, 0.0 - 1.0j, -0.5 + -0.7j]
print(A)
X_offset=1.75 + 1.75j
M_offset=X_offset + 2.5
A_offset=M_offset + 2.5
S_offset=A_offset + 2.5
print(len(A))
for i in range(len(X)):
X[i] += X_offset
for i in range(len(M)):
M[i] += M_offset
for i in range(len(A)):
A[i] += A_offset
for i in range(len(S)):
S[i] += S_offset
underline=[1.0 +- 0.2j, -9.0 + -0.2j, 2.0 + -0.4j]
for i in range(len(underline)):
underline[i] += S[-2]
endpoints = [11.0 + 0.4j, 11.0, 0.0]
func_points = Box + X + M + A + S + underline + endpoints
intervals=[50,100,50,100,20,50,100,50,50,100,20,70,50,50,70,20,90,90,50,40,30,20,20,20,40,20,20,20,100,90,20,20,50]
print(func_points)
T=sum(intervals)
print(len(func_points), len(intervals))
x = np.linspace(func_points[0],func_points[1],intervals[0])
for i in range(1,len(func_points)-1):
tmp = np.linspace(func_points[i],func_points[i+1],intervals[i])
x = np.concatenate((x,tmp))
t = np.arange(0,T)
w0 = 2 * np.pi / T
coeff = []
e_func = []
for i in range(-60,60):
x_i = 1/T * np.trapz(x*(np.e ** (-1j * w0 * i * t)))
e_i = np.e ** (1j * w0 * i * t)
print(x_i)
coeff.append(x_i)
e_func.append(e_i)
coeff = np.array(coeff)
x_hat = coeff[0] * e_func[0]
for i in range(1,len(coeff)):
x_hat += coeff[i] * e_func[i]
coeff_anim = []
for j in range(T):
points = [0]
for i in range(len(coeff)):
new_point = points[-1] + coeff[i] * e_func[i][j]
points.append(new_point)
points = np.array(points)
coeff_anim.append(points)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
def animate(i):
if(i < T):
x_1 = np.real(x_hat[:i])
y_1 = np.imag(x_hat[:i])
x_2 = np.real(coeff_anim[i])
y_2 = np.imag(coeff_anim[i])
ax.clear()
#ax.set_xlim([-1.5,7.5])
#ax.set_ylim([-2.2,1.5])
ax.plot(x_1,y_1)
ax.plot(x_2,y_2)
else:
ax.clear()
#ax.set_xlim([-1.5,7.5])
#ax.set_ylim([-2.2,1.5])
ax.plot(np.real(x_hat),np.imag(x_hat))
anim = animation.FuncAnimation(fig,animate,interval=20)
plt.show()
| 27.445652 | 115 | 0.574653 |
4a1a1bc3d3980acdfea619990607b63eec45e291
| 1,845 |
py
|
Python
|
arakat-backend/validity/PipelineValidityChecker.py
|
ahmetfyildirim/arakat
|
01cfebdb77cdcec5723b0dc9d7d597a3b288f9e1
|
[
"Apache-2.0"
] | null | null | null |
arakat-backend/validity/PipelineValidityChecker.py
|
ahmetfyildirim/arakat
|
01cfebdb77cdcec5723b0dc9d7d597a3b288f9e1
|
[
"Apache-2.0"
] | null | null | null |
arakat-backend/validity/PipelineValidityChecker.py
|
ahmetfyildirim/arakat
|
01cfebdb77cdcec5723b0dc9d7d597a3b288f9e1
|
[
"Apache-2.0"
] | null | null | null |
from domain.ErrorTypes import ErrorTypes
from domain.NodeFamilyTypes import NodeFamilyTypes
def check_validity(pipeline_nodes, pipeline_edges):
for node_id in pipeline_nodes:
if(not pipeline_nodes[node_id]["compatible_with_spark_pipeline"]):
return ErrorTypes.INCOMPATIBLE_PIPELINE_NODE, []
dependents={}
with_no_reqs = set(pipeline_nodes.keys())
for edge_id in pipeline_edges:
if(pipeline_edges[edge_id]["type"] != "pipeline"):
return ErrorTypes.NOT_COMPATIBLE_EDGE_WITH_PIPELINE_ERROR, []
cur_node_ids=edge_id.split("-")
dependents[cur_node_ids[0]]=cur_node_ids[1]
if(cur_node_ids[1] in with_no_reqs):
with_no_reqs.remove(cur_node_ids[1])
else:
# Only handles multiple incoming edges case...
return ErrorTypes.NON_LINEAR_PIPELINE_ERROR, []
if(len(with_no_reqs) != 1):
return ErrorTypes.REQUIRES_EXACTLY_ONE_ENTRANCE_NODE_ERROR
if(len(dependents) != (len(pipeline_nodes) - 1)):
# If not match, it means that there are splits in pipeline
# This completes the non-linear pipeline check
return ErrorTypes.NON_LINEAR_PIPELINE_ERROR, []
cur_node_id=list(with_no_reqs)[0]
pipeline_order=[]
# We can now sort the ids for linear pipeline...
while(bool(dependents)):
pipeline_order.append(cur_node_id)
temp=cur_node_id
cur_node_id=dependents[cur_node_id]
del dependents[temp]
pipeline_order.append(cur_node_id)
for elem in pipeline_order:
cur_family=pipeline_nodes[elem]["family"]
if(not (cur_family == NodeFamilyTypes.Transformer.value or cur_family == NodeFamilyTypes.Estimator.value)):
return ErrorTypes.NOT_COMPATIBLE_NODES_IN_PIPELINE_ERROR, []
return ErrorTypes.NO_ERROR, pipeline_order
| 39.255319 | 115 | 0.704607 |
4a1a1c230882a509e671c46975619de635a6a57b
| 6,311 |
py
|
Python
|
src/hdusd/ui/hdrpr_render.py
|
bnagirniak/BlenderUSDHydraAddon
|
d71410e6795c749001985d61c104d62ee10d07dd
|
[
"Apache-2.0"
] | 212 |
2020-12-08T18:54:00.000Z
|
2022-03-31T21:52:12.000Z
|
src/hdusd/ui/hdrpr_render.py
|
bnagirniak/BlenderUSDHydraAddon
|
d71410e6795c749001985d61c104d62ee10d07dd
|
[
"Apache-2.0"
] | 62 |
2020-12-09T21:26:27.000Z
|
2022-03-29T12:10:36.000Z
|
src/hdusd/ui/hdrpr_render.py
|
bnagirniak/BlenderUSDHydraAddon
|
d71410e6795c749001985d61c104d62ee10d07dd
|
[
"Apache-2.0"
] | 23 |
2020-12-09T09:39:17.000Z
|
2022-03-21T19:59:54.000Z
|
#**********************************************************************
# Copyright 2020 Advanced Micro Devices, Inc
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#********************************************************************
from . import HdUSD_Panel
#
# FINAL RENDER SETTINGS
#
class HDUSD_RENDER_PT_hdrpr_settings_final(HdUSD_Panel):
bl_label = "RPR Settings"
bl_parent_id = 'HDUSD_RENDER_PT_render_settings_final'
@classmethod
def poll(cls, context):
return super().poll(context) and context.scene.hdusd.final.delegate == 'HdRprPlugin'
def draw(self, context):
hdrpr = context.scene.hdusd.final.hdrpr
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
col = layout.column()
col.prop(hdrpr, "device")
col.prop(hdrpr, "render_quality")
col.prop(hdrpr, "render_mode")
class HDUSD_RENDER_PT_hdrpr_settings_samples_final(HdUSD_Panel):
bl_label = "Samples"
bl_parent_id = 'HDUSD_RENDER_PT_hdrpr_settings_final'
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
hdrpr = context.scene.hdusd.final.hdrpr
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.prop(hdrpr, "max_samples")
col = layout.column(align=True)
col.prop(hdrpr, "variance_threshold")
row = col.row()
row.enabled = hdrpr.variance_threshold > 0.0
row.prop(hdrpr, "min_adaptive_samples")
class HDUSD_RENDER_PT_hdrpr_settings_quality_final(HdUSD_Panel):
bl_label = "Quality"
bl_parent_id = 'HDUSD_RENDER_PT_hdrpr_settings_final'
bl_space_type = 'PROPERTIES'
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
hdrpr = context.scene.hdusd.final.hdrpr
quality = hdrpr.quality
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
col = layout.column(align=True)
col.prop(quality, "max_ray_depth")
col.prop(quality, "max_ray_depth_diffuse")
col.prop(quality, "max_ray_depth_glossy")
col.prop(quality, "max_ray_depth_refraction")
col.prop(quality, "max_ray_depth_glossy_refraction")
layout.prop(quality, "raycast_epsilon")
col = layout.column(align=True)
col.prop(quality, "enable_radiance_clamping")
row = col.row()
row.enabled = quality.enable_radiance_clamping
row.prop(quality, "radiance_clamping")
class HDUSD_RENDER_PT_hdrpr_settings_denoise_final(HdUSD_Panel):
bl_label = ""
bl_parent_id = 'HDUSD_RENDER_PT_hdrpr_settings_final'
bl_options = {'DEFAULT_CLOSED'}
def draw_header(self, context):
denoise = context.scene.hdusd.final.hdrpr.denoise
self.layout.prop(denoise, "enable")
def draw(self, context):
denoise = context.scene.hdusd.final.hdrpr.denoise
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.enabled = denoise.enable
layout.prop(denoise, "min_iter")
layout.prop(denoise, "iter_step")
#
# VIEWPORT RENDER SETTINGS
#
class HDUSD_RENDER_PT_hdrpr_settings_viewport(HdUSD_Panel):
bl_label = "RPR Settings"
bl_parent_id = 'HDUSD_RENDER_PT_render_settings_viewport'
@classmethod
def poll(cls, context):
return super().poll(context) and context.scene.hdusd.viewport.delegate == 'HdRprPlugin'
def draw(self, context):
hdrpr = context.scene.hdusd.viewport.hdrpr
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout = layout.column()
layout.prop(hdrpr, "device")
layout.prop(hdrpr, "render_quality")
layout.prop(hdrpr, "render_mode")
class HDUSD_RENDER_PT_hdrpr_settings_samples_viewport(HdUSD_Panel):
bl_label = "Samples"
bl_parent_id = 'HDUSD_RENDER_PT_hdrpr_settings_viewport'
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
hdrpr = context.scene.hdusd.viewport.hdrpr
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.prop(hdrpr, "max_samples")
col = layout.column(align=True)
col.prop(hdrpr, "variance_threshold")
row = col.row()
row.enabled = hdrpr.variance_threshold > 0.0
row.prop(hdrpr, "min_adaptive_samples")
class HDUSD_RENDER_PT_hdrpr_settings_quality_viewport(HdUSD_Panel):
bl_label = "Quality"
bl_parent_id = 'HDUSD_RENDER_PT_hdrpr_settings_viewport'
bl_space_type = 'PROPERTIES'
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
hdrpr = context.scene.hdusd.viewport.hdrpr
quality = hdrpr.interactive_quality
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.prop(quality, "max_ray_depth")
# layout.prop(quality, "enable_downscale")
# layout.prop(quality, "resolution_downscale")
class HDUSD_RENDER_PT_hdrpr_settings_denoise_viewport(HdUSD_Panel):
bl_label = ""
bl_parent_id = 'HDUSD_RENDER_PT_hdrpr_settings_viewport'
bl_options = {'DEFAULT_CLOSED'}
def draw_header(self, context):
denoise = context.scene.hdusd.viewport.hdrpr.denoise
self.layout.prop(denoise, "enable")
def draw(self, context):
denoise = context.scene.hdusd.viewport.hdrpr.denoise
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.enabled = denoise.enable
layout.prop(denoise, "min_iter")
layout.prop(denoise, "iter_step")
| 32.035533 | 95 | 0.678181 |
4a1a1d1a32152282a5a04148a6c549f1be0ee1d1
| 1,397 |
py
|
Python
|
connector/facebook/utils.py
|
Ahmed0Sultan/cipher-chatbot
|
3a5613976ea9d88d98eb8cfee46a0472fb24aafd
|
[
"MIT"
] | 3 |
2021-05-06T15:09:49.000Z
|
2022-02-16T16:34:00.000Z
|
connector/facebook/utils.py
|
Ahmed0Sultan/cipher-chatbot
|
3a5613976ea9d88d98eb8cfee46a0472fb24aafd
|
[
"MIT"
] | null | null | null |
connector/facebook/utils.py
|
Ahmed0Sultan/cipher-chatbot
|
3a5613976ea9d88d98eb8cfee46a0472fb24aafd
|
[
"MIT"
] | null | null | null |
import hashlib
import hmac
import six
def validate_hub_signature(app_secret, request_payload, hub_signature_header):
"""
@inputs:
app_secret: Secret Key for application
request_payload: request body
hub_signature_header: X-Hub-Signature header sent with request
@outputs:
boolean indicated that hub signature is validated
"""
try:
hash_method, hub_signature = hub_signature_header.split('=')
except Exception:
pass
else:
digest_module = getattr(hashlib, hash_method)
hmac_object = hmac.new(str(app_secret), str(request_payload), digest_module)
generated_hash = hmac_object.hexdigest()
if hub_signature == generated_hash:
return True
return False
def generate_app_secret_proof(access_token, app_secret):
"""
@inputs:
access_token: page access token
app_secret_token: app secret key
@outputs:
appsecret_proof: HMAC-SHA256 hash of page access token
using app_secret as the key
"""
if six.PY2:
hmac_object = hmac.new(str(app_secret), str(access_token), hashlib.sha256)
else:
hmac_object = hmac.new(bytearray(app_secret, 'utf8'), str(access_token).encode('utf8'), hashlib.sha256)
generated_hash = hmac_object.hexdigest()
return generated_hash
| 33.261905 | 111 | 0.65927 |
4a1a1d370a5bf8fe2972d44b9b3e0b6cca38ed64
| 29,477 |
py
|
Python
|
hparams.py
|
gabrieldi95/Tacotron-2
|
766e3b3f7411f8f779ff60f2b6a0ac3bb55219cb
|
[
"MIT"
] | null | null | null |
hparams.py
|
gabrieldi95/Tacotron-2
|
766e3b3f7411f8f779ff60f2b6a0ac3bb55219cb
|
[
"MIT"
] | null | null | null |
hparams.py
|
gabrieldi95/Tacotron-2
|
766e3b3f7411f8f779ff60f2b6a0ac3bb55219cb
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
# Default hyperparameters
hparams = tf.contrib.training.HParams(
# Comma-separated list of cleaners to run on text prior to training and eval. For non-English
# text, you may want to use "basic_cleaners" or "transliteration_cleaners".
cleaners='english_cleaners',
#If you only have 1 GPU or want to use only one GPU, please set num_gpus=0 and specify the GPU idx on run. example:
#expample 1 GPU of index 2 (train on "/gpu2" only): CUDA_VISIBLE_DEVICES=2 python train.py --model='Tacotron' --hparams='tacotron_gpu_start_idx=2'
#If you want to train on multiple GPUs, simply specify the number of GPUs available, and the idx of the first GPU to use. example:
#example 4 GPUs starting from index 0 (train on "/gpu0"->"/gpu3"): python train.py --model='Tacotron' --hparams='tacotron_num_gpus=4, tacotron_gpu_start_idx=0'
#The hparams arguments can be directly modified on this hparams.py file instead of being specified on run if preferred!
#If one wants to train both Tacotron and WaveNet in parallel (provided WaveNet will be trained on True mel spectrograms), one needs to specify different GPU idxes.
#example Tacotron+WaveNet on a machine with 4 or more GPUs. Two GPUs for each model:
# CUDA_VISIBLE_DEVICES=0,1 python train.py --model='Tacotron' --hparams='tacotron_num_gpus=2'
# Cuda_VISIBLE_DEVICES=2,3 python train.py --model='WaveNet' --hparams='wavenet_num_gpus=2'
#IMPORTANT NOTES: The Multi-GPU performance highly depends on your hardware and optimal parameters change between rigs. Default are optimized for servers.
#If using N GPUs, please multiply the tacotron_batch_size by N below in the hparams! (tacotron_batch_size = 32 * N)
#Never use lower batch size than 32 on a single GPU!
#Same applies for Wavenet: wavenet_batch_size = 8 * N (wavenet_batch_size can be smaller than 8 if GPU is having OOM, minimum 2)
#Please also apply the synthesis batch size modification likewise. (if N GPUs are used for synthesis, minimal batch size must be N, minimum of 1 sample per GPU)
#We did not add an automatic multi-GPU batch size computation to avoid confusion in the user's mind and to provide more control to the user for
#resources related decisions.
#Acknowledgement:
# Many thanks to @MlWoo for his awesome work on multi-GPU Tacotron which showed to work a little faster than the original
# pipeline for a single GPU as well. Great work!
#Hardware setup: Default supposes user has only one GPU: "/gpu:0" (Both Tacotron and WaveNet can be trained on multi-GPU: data parallelization)
#Synthesis also uses the following hardware parameters for multi-GPU parallel synthesis.
tacotron_num_gpus = 1, #Determines the number of gpus in use for Tacotron training.
wavenet_num_gpus = 1, #Determines the number of gpus in use for WaveNet training.
split_on_cpu = True, #Determines whether to split data on CPU or on first GPU. This is automatically True when more than 1 GPU is used.
#(Recommend: False on slow CPUs/Disks, True otherwise for small speed boost)
###########################################################################################################################################
#Audio
#Audio parameters are the most important parameters to tune when using this work on your personal data. Below are the beginner steps to adapt
#this work to your personal data:
# 1- Determine my data sample rate: First you need to determine your audio sample_rate (how many samples are in a second of audio). This can be done using sox: "sox --i <filename>"
# (For this small tuto, I will consider 24kHz (24000 Hz), and defaults are 22050Hz, so there are plenty of examples to refer to)
# 2- set sample_rate parameter to your data correct sample rate
# 3- Fix win_size and and hop_size accordingly: (Supposing you will follow our advice: 50ms window_size, and 12.5ms frame_shift(hop_size))
# a- win_size = 0.05 * sample_rate. In the tuto example, 0.05 * 24000 = 1200
# b- hop_size = 0.25 * win_size. Also equal to 0.0125 * sample_rate. In the tuto example, 0.25 * 1200 = 0.0125 * 24000 = 300 (Can set frame_shift_ms=12.5 instead)
# 4- Fix n_fft, num_freq and upsample_scales parameters accordingly.
# a- n_fft can be either equal to win_size or the first power of 2 that comes after win_size. I usually recommend using the latter
# to be more consistent with signal processing friends. No big difference to be seen however. For the tuto example: n_fft = 2048 = 2**11
# b- num_freq = (n_fft / 2) + 1. For the tuto example: num_freq = 2048 / 2 + 1 = 1024 + 1 = 1025.
# c- For WaveNet, upsample_scales products must be equal to hop_size. For the tuto example: upsample_scales=[15, 20] where 15 * 20 = 300
# it is also possible to use upsample_scales=[3, 4, 5, 5] instead. One must only keep in mind that upsample_kernel_size[0] = 2*upsample_scales[0]
# so the training segments should be long enough (2.8~3x upsample_scales[0] * hop_size or longer) so that the first kernel size can see the middle
# of the samples efficiently. The length of WaveNet training segments is under the parameter "max_time_steps".
# 5- Finally comes the silence trimming. This very much data dependent, so I suggest trying preprocessing (or part of it, ctrl-C to stop), then use the
# .ipynb provided in the repo to listen to some inverted mel/linear spectrograms. That will first give you some idea about your above parameters, and
# it will also give you an idea about trimming. If silences persist, try reducing trim_top_db slowly. If samples are trimmed mid words, try increasing it.
# 6- If audio quality is too metallic or fragmented (or if linear spectrogram plots are showing black silent regions on top), then restart from step 2.
num_mels = 80, #Number of mel-spectrogram channels and local conditioning dimensionality
num_freq = 1025, # (= n_fft / 2 + 1) only used when adding linear spectrograms post processing network
rescale = True, #Whether to rescale audio prior to preprocessing
rescaling_max = 0.999, #Rescaling value
#train samples of lengths between 3sec and 14sec are more than enough to make a model capable of generating consistent speech.
clip_mels_length = True, #For cases of OOM (Not really recommended, only use if facing unsolvable OOM errors, also consider clipping your samples to smaller chunks)
max_mel_frames = 900, #Only relevant when clip_mels_length = True, please only use after trying output_per_steps=3 and still getting OOM errors.
# Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction
# It's preferred to set True to use with https://github.com/r9y9/wavenet_vocoder
# Does not work if n_ffit is not multiple of hop_size!!
use_lws=False, #Only used to set as True if using WaveNet, no difference in performance is observed in either cases.
silence_threshold=2, #silence threshold used for sound trimming for wavenet preprocessing
#Mel spectrogram
n_fft = 2048, #Extra window size is filled with 0 paddings to match this parameter
hop_size = 275, #For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate)
win_size = 1100, #For 22050Hz, 1100 ~= 50 ms (If None, win_size = n_fft) (0.05 * sample_rate)
sample_rate = 22050, #22050 Hz (corresponding to ljspeech dataset) (sox --i <filename>)
frame_shift_ms = None, #Can replace hop_size parameter. (Recommended: 12.5)
magnitude_power = 2., #The power of the spectrogram magnitude (1. for energy, 2. for power)
#M-AILABS (and other datasets) trim params (there parameters are usually correct for any data, but definitely must be tuned for specific speakers)
trim_silence = True, #Whether to clip silence in Audio (at beginning and end of audio only, not the middle)
trim_fft_size = 2048, #Trimming window size
trim_hop_size = 512, #Trimmin hop length
trim_top_db = 40, #Trimming db difference from reference db (smaller==harder trim.)
#Mel and Linear spectrograms normalization/scaling and clipping
signal_normalization = True, #Whether to normalize mel spectrograms to some predefined range (following below parameters)
allow_clipping_in_normalization = True, #Only relevant if mel_normalization = True
symmetric_mels = True, #Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2, faster and cleaner convergence)
max_abs_value = 4., #max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not be too big to avoid gradient explosion,
#not too small for fast convergence)
normalize_for_wavenet = True, #whether to rescale to [0, 1] for wavenet. (better audio quality)
clip_for_wavenet = True, #whether to clip [-max, max] before training/synthesizing with wavenet (better audio quality)
wavenet_pad_sides = 1, #Can be 1 or 2. 1 for pad right only, 2 for both sides padding.
#Contribution by @begeekmyfriend
#Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude levels. Also allows for better G&L phase reconstruction)
preemphasize = True, #whether to apply filter
preemphasis = 0.97, #filter coefficient.
#Limits
min_level_db = -100,
ref_level_db = 20,
fmin = 95, #Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
fmax = 7600, #To be increased/reduced depending on data.
#Griffin Lim
power = 1.5, #Only used in G&L inversion, usually values between 1.2 and 1.5 are a good choice.
griffin_lim_iters = 60, #Number of G&L iterations, typically 30 is enough but we use 60 to ensure convergence.
GL_on_GPU = True, #Whether to use G&L GPU version as part of tensorflow graph. (Usually much faster than CPU but slightly worse quality too).
###########################################################################################################################################
#Tacotron
#Model general type
outputs_per_step = 1, #number of frames to generate at each decoding step (increase to speed up computation and allows for higher batch size, decreases G&L audio quality)
stop_at_any = True, #Determines whether the decoder should stop when predicting <stop> to any frame or to all of them (True works pretty well)
batch_norm_position = 'after', #Can be in ('before', 'after'). Determines whether we use batch norm before or after the activation function (relu). Matter for debate.
clip_outputs = True, #Whether to clip spectrograms to T2_output_range (even in loss computation). ie: Don't penalize model for exceeding output range and bring back to borders.
lower_bound_decay = 0.1, #Small regularizer for noise synthesis by adding small range of penalty for silence regions. Set to 0 to clip in Tacotron range.
#Input parameters
embedding_dim = 512, #dimension of embedding space
#Encoder parameters
enc_conv_num_layers = 3, #number of encoder convolutional layers
enc_conv_kernel_size = (5, ), #size of encoder convolution filters for each layer
enc_conv_channels = 512, #number of encoder convolutions filters for each layer
encoder_lstm_units = 256, #number of lstm units for each direction (forward and backward)
#Attention mechanism
smoothing = False, #Whether to smooth the attention normalization function
attention_dim = 128, #dimension of attention space
attention_filters = 32, #number of attention convolution filters
attention_kernel = (31, ), #kernel size of attention convolution
cumulative_weights = True, #Whether to cumulate (sum) all previous attention weights or simply feed previous weights (Recommended: True)
#Attention synthesis constraints
#"Monotonic" constraint forces the model to only look at the forwards attention_win_size steps.
#"Window" allows the model to look at attention_win_size neighbors, both forward and backward steps.
synthesis_constraint = False, #Whether to use attention windows constraints in synthesis only (Useful for long utterances synthesis)
synthesis_constraint_type = 'window', #can be in ('window', 'monotonic').
attention_win_size = 7, #Side of the window. Current step does not count. If mode is window and attention_win_size is not pair, the 1 extra is provided to backward part of the window.
#Decoder
prenet_layers = [256, 256], #number of layers and number of units of prenet
decoder_layers = 2, #number of decoder lstm layers
decoder_lstm_units = 1024, #number of decoder lstm units on each layer
max_iters = 10000, #Max decoder steps during inference (Just for safety from infinite loop cases)
#Residual postnet
postnet_num_layers = 5, #number of postnet convolutional layers
postnet_kernel_size = (5, ), #size of postnet convolution filters for each layer
postnet_channels = 512, #number of postnet convolution filters for each layer
#CBHG mel->linear postnet
cbhg_kernels = 8, #All kernel sizes from 1 to cbhg_kernels will be used in the convolution bank of CBHG to act as "K-grams"
cbhg_conv_channels = 128, #Channels of the convolution bank
cbhg_pool_size = 2, #pooling size of the CBHG
cbhg_projection = 256, #projection channels of the CBHG (1st projection, 2nd is automatically set to num_mels)
cbhg_projection_kernel_size = 3, #kernel_size of the CBHG projections
cbhg_highwaynet_layers = 4, #Number of HighwayNet layers
cbhg_highway_units = 128, #Number of units used in HighwayNet fully connected layers
cbhg_rnn_units = 128, #Number of GRU units used in bidirectional RNN of CBHG block. CBHG output is 2x rnn_units in shape
#Loss params
mask_encoder = True, #whether to mask encoder padding while computing attention. Set to True for better prosody but slower convergence.
mask_decoder = False, #Whether to use loss mask for padded sequences (if False, <stop_token> loss function will not be weighted, else recommended pos_weight = 20)
cross_entropy_pos_weight = 1, #Use class weights to reduce the stop token classes imbalance (by adding more penalty on False Negatives (FN)) (1 = disabled)
predict_linear = True, #Whether to add a post-processing network to the Tacotron to predict linear spectrograms (True mode Not tested!!)
###########################################################################################################################################
#Wavenet
# Input type:
# 1. raw [-1, 1]
# 2. mulaw [-1, 1]
# 3. mulaw-quantize [0, mu]
# If input_type is raw or mulaw, network assumes scalar input and
# discretized mixture of logistic distributions output, otherwise one-hot
# input and softmax output are assumed.
#Model general type
input_type="raw", #Raw has better quality but harder to train. mulaw-quantize is easier to train but has lower quality.
quantize_channels=2**16, # 65536 (16-bit) (raw) or 256 (8-bit) (mulaw or mulaw-quantize) // number of classes = 256 <=> mu = 255
use_bias = True, #Whether to use bias in convolutional layers of the Wavenet
legacy = True, #Whether to use legacy mode: Multiply all skip outputs but the first one with sqrt(0.5) (True for more early training stability, especially for large models)
residual_legacy = True, #Whether to scale residual blocks outputs by a factor of sqrt(0.5) (True for input variance preservation early in training and better overall stability)
#Model Losses parmeters
#Minimal scales ranges for MoL and Gaussian modeling
log_scale_min=float(np.log(1e-14)), #Mixture of logistic distributions minimal log scale
log_scale_min_gauss = float(np.log(1e-7)), #Gaussian distribution minimal allowed log scale
#Loss type
cdf_loss = False, #Whether to use CDF loss in Gaussian modeling. Advantages: non-negative loss term and more training stability. (Automatically True for MoL)
#model parameters
#To use Gaussian distribution as output distribution instead of mixture of logistics, set "out_channels = 2" instead of "out_channels = 10 * 3". (UNDER TEST)
out_channels = 2, #This should be equal to quantize channels when input type is 'mulaw-quantize' else: num_distributions * 3 (prob, mean, log_scale).
layers = 20, #Number of dilated convolutions (Default: Simplified Wavenet of Tacotron-2 paper)
stacks = 2, #Number of dilated convolution stacks (Default: Simplified Wavenet of Tacotron-2 paper)
residual_channels = 128, #Number of residual block input/output channels.
gate_channels = 256, #split in 2 in gated convolutions
skip_out_channels = 128, #Number of residual block skip convolution channels.
kernel_size = 3, #The number of inputs to consider in dilated convolutions.
#Upsampling parameters (local conditioning)
cin_channels = 80, #Set this to -1 to disable local conditioning, else it must be equal to num_mels!!
#Upsample types: ('1D', '2D', 'Resize', 'SubPixel', 'NearestNeighbor')
#All upsampling initialization/kernel_size are chosen to omit checkerboard artifacts as much as possible. (Resize is designed to omit that by nature).
#To be specific, all initial upsample weights/biases (when NN_init=True) ensure that the upsampling layers act as a "Nearest neighbor upsample" of size "hop_size" (checkerboard free).
#1D spans all frequency bands for each frame (channel-wise) while 2D spans "freq_axis_kernel_size" bands at a time. Both are vanilla transpose convolutions.
#Resize is a 2D convolution that follows a Nearest Neighbor (NN) resize. For reference, this is: "NN resize->convolution".
#SubPixel (2D) is the ICNR version (initialized to be equivalent to "convolution->NN resize") of Sub-Pixel convolutions. also called "checkered artifact free sub-pixel conv".
#Finally, NearestNeighbor is a non-trainable upsampling layer that just expands each frame (or "pixel") to the equivalent hop size. Ignores all upsampling parameters.
upsample_type = 'SubPixel', #Type of the upsampling deconvolution. Can be ('1D' or '2D', 'Resize', 'SubPixel' or simple 'NearestNeighbor').
upsample_activation = 'Relu', #Activation function used during upsampling. Can be ('LeakyRelu', 'Relu' or None)
upsample_scales = [11, 25], #prod(upsample_scales) should be equal to hop_size
freq_axis_kernel_size = 3, #Only used for 2D upsampling types. This is the number of requency bands that are spanned at a time for each frame.
leaky_alpha = 0.4, #slope of the negative portion of LeakyRelu (LeakyRelu: y=x if x>0 else y=alpha * x)
NN_init = True, #Determines whether we want to initialize upsampling kernels/biases in a way to ensure upsample is initialize to Nearest neighbor upsampling. (Mostly for debug)
NN_scaler = 0.3, #Determines the initial Nearest Neighbor upsample values scale. i.e: upscaled_input_values = input_values * NN_scaler (1. to disable)
#global conditioning
gin_channels = -1, #Set this to -1 to disable global conditioning, Only used for multi speaker dataset. It defines the depth of the embeddings (Recommended: 16)
use_speaker_embedding = True, #whether to make a speaker embedding
n_speakers = 5, #number of speakers (rows of the embedding)
speakers_path = None, #Defines path to speakers metadata. Can be either in "speaker\tglobal_id" (with header) tsv format, or a single column tsv with speaker names. If None, use "speakers".
speakers = ['speaker0', 'speaker1', #List of speakers used for embeddings visualization. (Consult "wavenet_vocoder/train.py" if you want to modify the speaker names source).
'speaker2', 'speaker3', 'speaker4'], #Must be consistent with speaker ids specified for global conditioning for correct visualization.
###########################################################################################################################################
#Tacotron Training
#Reproduction seeds
tacotron_random_seed = 5339, #Determines initial graph and operations (i.e: model) random state for reproducibility
tacotron_data_random_state = 1234, #random state for train test split repeatability
#performance parameters
tacotron_swap_with_cpu = False, #Whether to use cpu as support to gpu for decoder computation (Not recommended: may cause major slowdowns! Only use when critical!)
#train/test split ratios, mini-batches sizes
tacotron_batch_size = 32, #number of training samples on each training steps
#Tacotron Batch synthesis supports ~16x the training batch size (no gradients during testing).
#Training Tacotron with unmasked paddings makes it aware of them, which makes synthesis times different from training. We thus recommend masking the encoder.
tacotron_synthesis_batch_size = 1, #DO NOT MAKE THIS BIGGER THAN 1 IF YOU DIDN'T TRAIN TACOTRON WITH "mask_encoder=True"!!
tacotron_test_size = 0.05, #% of data to keep as test data, if None, tacotron_test_batches must be not None. (5% is enough to have a good idea about overfit)
tacotron_test_batches = None, #number of test batches.
#Learning rate schedule
tacotron_decay_learning_rate = True, #boolean, determines if the learning rate will follow an exponential decay
tacotron_start_decay = 40000, #Step at which learning decay starts
tacotron_decay_steps = 18000, #Determines the learning rate decay slope (UNDER TEST)
tacotron_decay_rate = 0.5, #learning rate decay rate (UNDER TEST)
tacotron_initial_learning_rate = 1e-3, #starting learning rate
tacotron_final_learning_rate = 1e-4, #minimal learning rate
#Optimization parameters
tacotron_adam_beta1 = 0.9, #AdamOptimizer beta1 parameter
tacotron_adam_beta2 = 0.999, #AdamOptimizer beta2 parameter
tacotron_adam_epsilon = 1e-6, #AdamOptimizer Epsilon parameter
#Regularization parameters
tacotron_reg_weight = 1e-6, #regularization weight (for L2 regularization)
tacotron_scale_regularization = False, #Whether to rescale regularization weight to adapt for outputs range (used when reg_weight is high and biasing the model)
tacotron_zoneout_rate = 0.1, #zoneout rate for all LSTM cells in the network
tacotron_dropout_rate = 0.5, #dropout rate for all convolutional layers + prenet
tacotron_clip_gradients = True, #whether to clip gradients
#Evaluation parameters
tacotron_natural_eval = False, #Whether to use 100% natural eval (to evaluate Curriculum Learning performance) or with same teacher-forcing ratio as in training (just for overfit)
#Decoder RNN learning can take be done in one of two ways:
# Teacher Forcing: vanilla teacher forcing (usually with ratio = 1). mode='constant'
# Scheduled Sampling Scheme: From Teacher-Forcing to sampling from previous outputs is function of global step. (teacher forcing ratio decay) mode='scheduled'
#The second approach is inspired by:
#Bengio et al. 2015: Scheduled Sampling for Sequence Prediction with Recurrent Neural Networks.
#Can be found under: https://arxiv.org/pdf/1506.03099.pdf
tacotron_teacher_forcing_mode = 'constant', #Can be ('constant' or 'scheduled'). 'scheduled' mode applies a cosine teacher forcing ratio decay. (Preference: scheduled)
tacotron_teacher_forcing_ratio = 1., #Value from [0., 1.], 0.=0%, 1.=100%, determines the % of times we force next decoder inputs, Only relevant if mode='constant'
tacotron_teacher_forcing_init_ratio = 1., #initial teacher forcing ratio. Relevant if mode='scheduled'
tacotron_teacher_forcing_final_ratio = 0., #final teacher forcing ratio. (Set None to use alpha instead) Relevant if mode='scheduled'
tacotron_teacher_forcing_start_decay = 10000, #starting point of teacher forcing ratio decay. Relevant if mode='scheduled'
tacotron_teacher_forcing_decay_steps = 40000, #Determines the teacher forcing ratio decay slope. Relevant if mode='scheduled'
tacotron_teacher_forcing_decay_alpha = None, #teacher forcing ratio decay rate. Defines the final tfr as a ratio of initial tfr. Relevant if mode='scheduled'
#Speaker adaptation parameters
tacotron_fine_tuning = False, #Set to True to freeze encoder and only keep training pretrained decoder. Used for speaker adaptation with small data.
###########################################################################################################################################
#Wavenet Training
wavenet_random_seed = 5339, # S=5, E=3, D=9 :)
wavenet_data_random_state = 1234, #random state for train test split repeatability
#performance parameters
wavenet_swap_with_cpu = False, #Whether to use cpu as support to gpu for synthesis computation (while loop).(Not recommended: may cause major slowdowns! Only use when critical!)
#train/test split ratios, mini-batches sizes
wavenet_batch_size = 8, #batch size used to train wavenet.
#During synthesis, there is no max_time_steps limitation so the model can sample much longer audio than 8k(or 13k) steps. (Audio can go up to 500k steps, equivalent to ~21sec on 24kHz)
#Usually your GPU can handle ~2x wavenet_batch_size during synthesis for the same memory amount during training (because no gradients to keep and ops to register for backprop)
wavenet_synthesis_batch_size = 10 * 2, #This ensure that wavenet synthesis goes up to 4x~8x faster when synthesizing multiple sentences. Watch out for OOM with long audios.
wavenet_test_size = None, #% of data to keep as test data, if None, wavenet_test_batches must be not None
wavenet_test_batches = 1, #number of test batches.
#Learning rate schedule
wavenet_lr_schedule = 'exponential', #learning rate schedule. Can be ('exponential', 'noam')
wavenet_learning_rate = 1e-3, #wavenet initial learning rate
wavenet_warmup = float(4000), #Only used with 'noam' scheme. Defines the number of ascending learning rate steps.
wavenet_decay_rate = 0.5, #Only used with 'exponential' scheme. Defines the decay rate.
wavenet_decay_steps = 200000, #Only used with 'exponential' scheme. Defines the decay steps.
#Optimization parameters
wavenet_adam_beta1 = 0.9, #Adam beta1
wavenet_adam_beta2 = 0.999, #Adam beta2
wavenet_adam_epsilon = 1e-6, #Adam Epsilon
#Regularization parameters
wavenet_clip_gradients = True, #Whether the clip the gradients during wavenet training.
wavenet_ema_decay = 0.9999, #decay rate of exponential moving average
wavenet_weight_normalization = False, #Whether to Apply Saliman & Kingma Weight Normalization (reparametrization) technique. (Used in DeepVoice3, not critical here)
wavenet_init_scale = 1., #Only relevent if weight_normalization=True. Defines the initial scale in data dependent initialization of parameters.
wavenet_dropout = 0.05, #drop rate of wavenet layers
wavenet_gradient_max_norm = 100.0, #Norm used to clip wavenet gradients
wavenet_gradient_max_value = 5.0, #Value used to clip wavenet gradients
#training samples length
max_time_sec = None, #Max time of audio for training. If None, we use max_time_steps.
max_time_steps = 11000, #Max time steps in audio used to train wavenet (decrease to save memory) (Recommend: 8000 on modest GPUs, 13000 on stronger ones)
#Evaluation parameters
wavenet_natural_eval = False, #Whether to use 100% natural eval (to evaluate autoregressivity performance) or with teacher forcing to evaluate overfit and model consistency.
#Tacotron-2 integration parameters
train_with_GTA = True, #Whether to use GTA mels to train WaveNet instead of ground truth mels.
###########################################################################################################################################
#Eval/Debug parameters
#Eval sentences (if no eval text file was specified during synthesis, these sentences are used for eval)
sentences = [
# From July 8, 2017 New York Times:
'Scientists at the CERN laboratory say they have discovered a new particle.',
'There\'s a way to measure the acute emotional intelligence that has never gone out of style.',
'President Trump met with other leaders at the Group of 20 conference.',
'The Senate\'s bill to repeal and replace the Affordable Care Act is now imperiled.',
# From Google's Tacotron example page:
'Generative adversarial network or variational auto-encoder.',
'Basilar membrane and otolaryngology are not auto-correlations.',
'He has read the whole thing.',
'He reads books.',
'He thought it was time to present the present.',
'Thisss isrealy awhsome.',
'The big brown fox jumps over the lazy dog.',
'Did the big brown fox jump over the lazy dog?',
"Peter Piper picked a peck of pickled peppers. How many pickled peppers did Peter Piper pick?",
"She sells sea-shells on the sea-shore. The shells she sells are sea-shells I'm sure.",
"Tajima Airport serves Toyooka.",
#From The web (random long utterance)
# 'On offering to help the blind man, the man who then stole his car, had not, at that precise moment, had any evil intention, quite the contrary, \
# what he did was nothing more than obey those feelings of generosity and altruism which, as everyone knows, \
# are the two best traits of human nature and to be found in much more hardened criminals than this one, a simple car-thief without any hope of advancing in his profession, \
# exploited by the real owners of this enterprise, for it is they who take advantage of the needs of the poor.',
# A final Thank you note!
'Thank you so much for your support!',
],
#Wavenet Debug
wavenet_synth_debug = False, #Set True to use target as debug in WaveNet synthesis.
wavenet_debug_wavs = ['training_data/audio/audio-LJ001-0008.npy'], #Path to debug audios. Must be multiple of wavenet_num_gpus.
wavenet_debug_mels = ['training_data/mels/mel-LJ001-0008.npy'], #Path to corresponding mels. Must be of same length and order as wavenet_debug_wavs.
)
def hparams_debug_string():
values = hparams.values()
hp = [' %s: %s' % (name, values[name]) for name in sorted(values) if name != 'sentences']
return 'Hyperparameters:\n' + '\n'.join(hp)
| 77.571053 | 190 | 0.751976 |
4a1a1d3f8f98d700f8ab535269c0a56d8c9a9989
| 488 |
py
|
Python
|
kendama/migrations/0011_ladder_private.py
|
amin-da71/Benbb96
|
0c9e37425d0665e403ba6fecf0c4b17669c29ada
|
[
"MIT"
] | null | null | null |
kendama/migrations/0011_ladder_private.py
|
amin-da71/Benbb96
|
0c9e37425d0665e403ba6fecf0c4b17669c29ada
|
[
"MIT"
] | 13 |
2021-02-13T20:15:18.000Z
|
2022-03-11T23:57:07.000Z
|
kendama/migrations/0011_ladder_private.py
|
amin-da71/Benbb96
|
0c9e37425d0665e403ba6fecf0c4b17669c29ada
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.13 on 2020-08-06 12:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kendama', '0010_auto_20200806_1259'),
]
operations = [
migrations.AddField(
model_name='ladder',
name='private',
field=models.BooleanField(default=False, help_text="Cochez cette case pour que ce ladder ne soit visible qu'à vous.", verbose_name='privé'),
),
]
| 25.684211 | 152 | 0.635246 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.