text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# Standard modules
import os
import socket
import subprocess
import threading
# XBMC modules
import xbmc
import xbmcaddon
import xbmcgui
class OSMC_Communicator(threading.Thread):
''' Class to setup and manage the socket to allow communications between OSMC settings modules and external scripts.
For example, this communicator is set up by the Main OSMC settings service, and is used by the default.py of that service to request
the opening of the MyOSMC user interface.
Class requires:
- a queue object to allow message to be communicated back to the parent
- a string describing the location of a unique socket file that other scripts can contact.
- a logging function
'''
def __init__(self, parent_queue, socket_file, logger):
# queue back to parent
self.parent_queue = parent_queue
# logging function
self.log = logger
# not sure I need this, but oh well
#self.wait_evt = threading.Event()
threading.Thread.__init__(self)
self.daemon = True
# create the listening socket, it creates new connections when connected to
self.address = socket_file
if os.path.exists(self.address):
subprocess.call(['sudo', 'rm', self.address])
try:
# I need this for testing on my laptop
os.remove(self.address)
except:
self.log('Connection failed to delete socket file.')
pass
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# allows the address to be reused (helpful with testing)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.timeout = 3
self.sock.settimeout(self.timeout)
self.sock.bind(self.address)
self.sock.listen(1)
self.stopped = False
def stop(self):
''' Orderly shutdown of the socket, sends message to run loop
to exit. '''
self.log('Connection stop called')
try:
self.log('Connection stopping.')
self.stopped = True
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.address)
sock.send('exit')
sock.close()
self.sock.close()
self.log('Exit message sent to socket.')
except Exception, e:
self.log('Comms error trying to stop: {}'.format(e))
def run(self):
self.log('Comms started')
while not xbmc.abortRequested and not self.stopped:
try:
# wait here for a connection
conn, addr = self.sock.accept()
except socket.timeout:
continue
except:
self.log('An error occured while waiting for a connection.')
break
self.log('Connection active.')
# turn off blocking for this temporary connection
# this will allow the loop to collect all parts of the message
conn.setblocking(0)
passed = False
total_wait = 0
wait = 5
while not passed and total_wait < 100:
try:
data = conn.recv(81920)
passed = True
self.log('data = %s' % data)
except:
total_wait += wait
xbmc.sleep(wait)
if not passed:
self.log('Connection failed to collect data.')
self.stopped = True
conn.close()
break
self.log('data = %s' % data)
# if the message is to stop, then kill the loop
if data == 'exit':
self.log('Connection called to "exit"')
self.stopped = True
conn.close()
break
# send the data to Main for it to process
self.parent_queue.put(data)
conn.close()
try:
os.remove(self.address)
except Exception, e:
self.log('Comms error trying to delete socket: {}'.format(e))
self.log('Comms Ended')
| srmo/osmc | package/mediacenter-addon-osmc/src/script.module.osmccommon/resources/lib/osmc_comms.py | Python | gpl-2.0 | 3,448 | 0.032193 |
"""
Functions for calculating LOFAR hardware specific properties.
"""
import tkp.telescope.lofar.antennaarrays
import tkp.telescope.lofar.beam
import tkp.telescope.lofar.noise
import tkp.telescope.lofar.quality
| bartscheers/tkp | tkp/telescope/lofar/__init__.py | Python | bsd-2-clause | 211 | 0 |
#! /usr/bin/python
# Basic Matrix factorization
import numpy as np
class bmf():
def __init__(self, k = 100, rounds = 50, alpha = 0.005, beta = 0.02, train_fn = '', validate_fn = '', pred_fn = '', output = ''):
self.k = k
self.rounds = rounds
self.alpha = alpha
self.beta = beta
self.train_fn = train_fn
self.pred_fn = pred_fn
self.validate_fn = validate_fn
self.output = output
self.usr_dct = {}
self.item_dct = {}
self.reverse_usr_dct = {}
self.reverse_item_dct = {}
self.rating_graph = {}
self.rating_sz = 0
self.p = None
self.q = None
self.rmse = 0.
def load(self):
f = open(self.train_fn)
for line in f:
uid, iid, rating = line.strip('\n').split(',')
rating = float(rating)
if uid not in self.usr_dct:
self.usr_dct[uid] = len(self.usr_dct)
if iid not in self.item_dct:
self.item_dct[iid] = len(self.item_dct)
self.rating_graph.setdefault(self.usr_dct[uid], []).append((self.item_dct[iid], rating))
self.rating_sz += 1
# init reverse map(for write usage)
self.reverse_usr_dct = {v:k for k, v in self.usr_dct.items()}
self.reverse_item_dct = {v:k for k, v in self.item_dct.items()}
f.close()
def cal_rmse(self):
import math
for u_indx, pair in self.rating_graph.iteritems():
for i_indx, rating in pair:
tmp = np.dot(self.p[u_indx, :], self.q[i_indx, :])
self.rmse += (rating - tmp) ** 2
return math.sqrt(self.rmse / self.rating_sz)
def predict_rating(self):
f1 = open(self.pred_fn)
f2 = open(self.output, 'w')
for line in f1:
uid, iid = line.strip('\n').split(',')
u_indx = self.usr_dct[uid]
i_indx = self.item_dct[iid]
pred_rating = np.dot(self.p[u_indx, :], self.q[i_indx, :])
f2.write('%s,%s,%s\n' % (uid, iid, pred_rating))
f1.close()
f2.close()
def r(self, i, j):
return np.dot(self.p[i, :], self.q[j, :])
def kernel(self):
import time
# init parameters
self.p = np.random.rand(len(self.usr_dct), self.k)
self.q = np.random.rand(len(self.item_dct), self.k)
# learning
for rd in xrange(self.rounds):
start = time.time()
for u_indx, pair in self.rating_graph.iteritems():
for i_indx, rating in pair:
tmp = np.dot(self.p[u_indx, :], self.q[i_indx, :])
e = rating - tmp
# learning delta
delta_p = self.alpha * (2 * e * self.q[i_indx, :] - self.beta * self.p[u_indx, :])
delta_q = self.alpha * (2 * e * self.p[u_indx, :] - self.beta * self.q[i_indx, :])
# update with delata
self.p[u_indx, :] += delta_p
self.q[i_indx, :] += delta_q
end = time.time()
print 'iter time: ', end - start
print 'iter'
def learn(self):
'''api'''
self.load()
self.kernel()
def write_result(self):
f1 = open('usr_factor.csv', 'w')
f2 = open('item_factor.csv', 'w')
for indx in xrange(len(self.usr_dct)):
f1.write('%s:' % self.reverse_usr_dct[indx])
for j in xrange(len(self.p[indx]) - 1):
f1.write('%s|' % self.p[indx][j])
f1.write(str(self.p[indx][len(self.p[indx]) - 1]) + '\n')
for indx in xrange(len(self.item_dct)):
f2.write('%s:' % self.reverse_item_dct[indx])
for j in xrange(len(self.q[indx]) - 1):
f2.write('%s|' % self.q[indx][j])
f2.write(str(self.q[indx][len(self.q[indx]) - 1]) + '\n')
if __name__ == '__main__':
bmf_solver = bmf(k = 80, rounds = 5, alpha = 0.005, beta = 0.02, train_fn = '/home/xunzhang/xunzhang/Data/mf/movielen1m', validate_fn = 'null', pred_fn = 'pred.csv', output = 'result.csv')
bmf_solver.learn()
bmf_solver.write_result()
bmf_solver.predict_rating()
bmf_solver.cal_rmse()
| xunzhang/roraima | tools/mf.py | Python | apache-2.0 | 3,744 | 0.044605 |
import re
import string
import nltk
from bs4 import BeautifulSoup
__author__ = 'nolram'
class NewsItem:
def __init__(self, news, stop_words):
self.all_words = []
self.stop_words = stop_words
self.regex = re.compile('[%s]' % re.escape(string.punctuation))
if "titulo" in news and "categoria" in news:
self.add_words(news["titulo"])
self.title = news["titulo"]
if "subcategoria" in news:
self.category = news["subcategoria"].lower()
else:
self.category = news["categoria"].lower()
if "texto" in news:
self.add_words(" ".join(news["texto"]))
self.url = news["url"]
def normalized_words(self, s):
words = []
oneline = s.replace('\n', ' ')
soup = BeautifulSoup(oneline.strip(), 'html.parser')
cleaned = soup.get_text()
toks1 = cleaned.split()
for t1 in toks1:
translated = self.regex.sub('', t1)
toks2 = translated.split()
for t2 in toks2:
t2s = t2.strip()
if len(t2s) > 1:
words.append(t2s.lower())
return words
def word_count(self):
return len(self.all_words)
def word_freq_dist(self):
freqs = nltk.FreqDist() # class nltk.probability.FreqDist
for w in self.all_words:
freqs.inc(w, 1)
return freqs
def add_words(self, s):
words = self.normalized_words(s)
for w in words:
if w not in self.stop_words:
self.all_words.append(w)
def features(self, top_words):
word_set = set(self.all_words)
features = {}
features['url'] = self.url
for w in top_words:
features["w_%s" % w] = (w in word_set)
return features
def normalized_frequency_power(self, word, freqs, largest_count):
n = self.normalized_frequency_value(word, freqs, largest_count)
return pow(n, 2)
def normalized_frequency_value(self, word, freqs, largest_count):
count = freqs.get(word)
n = 0
if count is None:
n = float(0)
else:
n = ((float(count) * float(largest_count)) / float(freqs.N())) * 100.0
return n
def normalized_boolean_value(self, word, freqs, largest_count):
count = freqs.get(word)
if count is None:
return float(0)
else:
return float(1)
def knn_data(self, top_words):
data_array = []
freqs = self.word_freq_dist()
largest_count = freqs.values()[0]
features = {}
features['url'] = self.url
for w in top_words:
data_array.append(self.normalized_boolean_value(w, freqs, largest_count))
print "knn_data: %s" % data_array
return data_array
def as_debug_array(self, guess):
l = []
l.append('---')
#l.append('lookup_key: %s' % (self.lookup_key()))
l.append('Categoria: %s' % (self.category))
l.append('Palpite: %s' % (guess))
l.append('URL: %s' % (self.url))
l.append('Titulos: %s' % (self.title))
l.append('')
l.append('Todas as palavras por contagem')
freqs = nltk.FreqDist([w.lower() for w in self.all_words])
for w in freqs.keys():
l.append("%-20s %d" % (w, freqs.get(w)))
l.append('')
l.append('all_words, sequentially:')
for w in self.all_words:
l.append(w)
return l
| nolram/news_crawler | classificador/news_item.py | Python | mit | 3,574 | 0.001399 |
import itertools
from django.core.management.base import BaseCommand
from django.db import models, connection
qn = connection.ops.quote_name
def fix(table_name, field):
d = {'table': table_name, 'field': qn(field.column), 'sql': sql(field)}
update = "UPDATE {table} SET {field}='' WHERE {field} IS NULL".format(**d)
alter = "MODIFY {sql}".format(**d)
return update, alter
def sql(field):
o = ['%s' % qn(field.column), field.db_type()]
if not field.null:
o.append('NOT NULL')
if field.primary_key:
o.append('PRIMARY KEY')
if field.default is not models.fields.NOT_PROVIDED:
o.append('default %r' % field.default)
return ' '.join(o)
class Command(BaseCommand):
help = 'Print SQL to change CharFields to be non-null.'
args = '[appname ...]'
def handle(self, *app_labels, **options):
if app_labels:
modules = [models.loading.get_app(app) for app in app_labels]
models_ = itertools.chain(*[models.loading.get_models(mod)
for mod in modules])
else:
models_ = models.loading.get_models()
updates, alters = [], []
for model in models_:
model_alters = []
table = model._meta.db_table
for field in model._meta.fields:
if isinstance(field, models.CharField) and not field.null:
update, alter = fix(table, field)
updates.append(update)
model_alters.append(alter)
if model_alters:
alters.append('ALTER TABLE %s\n\t%s' % (
table, ',\n\t'.join(model_alters)))
print ';\n'.join(updates + alters) + ';'
| jpetto/olympia | src/olympia/amo/management/commands/fix_charfields.py | Python | bsd-3-clause | 1,744 | 0 |
# -*- coding: utf-8 -*-
#
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from common.fields import StringManyToManyField
from common.serializers import AdaptedBulkListSerializer
from orgs.mixins.serializers import BulkOrgResourceModelSerializer
from ..models import User, UserGroup
from .. import utils
__all__ = [
'UserGroupSerializer', 'UserGroupListSerializer',
'UserGroupUpdateMemberSerializer',
]
class UserGroupSerializer(BulkOrgResourceModelSerializer):
users = serializers.PrimaryKeyRelatedField(
required=False, many=True, queryset=User.objects, label=_('User')
)
class Meta:
model = UserGroup
list_serializer_class = AdaptedBulkListSerializer
fields = [
'id', 'name', 'users', 'comment', 'date_created',
'created_by',
]
extra_kwargs = {
'created_by': {'label': _('Created by'), 'read_only': True}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_fields_queryset()
def set_fields_queryset(self):
users_field = self.fields['users']
users_field.child_relation.queryset = utils.get_current_org_members()
def validate_users(self, users):
for user in users:
if user.is_super_auditor:
msg = _('Auditors cannot be join in the user group')
raise serializers.ValidationError(msg)
return users
class UserGroupListSerializer(UserGroupSerializer):
users = StringManyToManyField(many=True, read_only=True)
class UserGroupUpdateMemberSerializer(serializers.ModelSerializer):
users = serializers.PrimaryKeyRelatedField(many=True, queryset=User.objects)
class Meta:
model = UserGroup
fields = ['id', 'users']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_fields_queryset()
def set_fields_queryset(self):
users_field = self.fields['users']
users_field.child_relation.queryset = utils.get_current_org_members()
| sdgdsffdsfff/jumpserver | apps/users/serializers/group.py | Python | gpl-2.0 | 2,119 | 0.000944 |
# Configuration file example for L1L2Signature
# version: '0.2.2'
import l1l2py
#~~ Data Input/Output ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# * Data assumed csv with samples and features labels
# * All the path are w.r.t. config file path
data_matrix = 'data/gedm.csv'
labels = 'data/labels.csv'
delimiter = ','
samples_on = 'col' # or 'row': samples on cols or rows
result_path = '.'
#~~ Data filtering/normalization ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sample_remover = None # removes samples with this label value
variable_remover = 'affx' # remove vars where name starts with (not case-sens.)
data_normalizer = l1l2py.tools.center
labels_normalizer = None
#~~ Cross validation options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# * See l1l2py.tools.{kfold_splits, stratified_kfold_splits}
external_k = 4 # (None means Leave One Out)
internal_k = 3
cv_splitting = l1l2py.tools.stratified_kfold_splits
#~~ Errors functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# * See l1l2py.tools.{regression_error, classification_error,
# balanced_classification_error}
cv_error = l1l2py.tools.regression_error
error = l1l2py.tools.balanced_classification_error
positive_label = None # Indicates the positive class in case of 2-class task
#~~ L1l2 Parameters ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# * Ranges will be sorted from smaller to bigger value!
# * See l1l2py.tools.{geometric_range, linear_range}
tau_range = l1l2py.tools.geometric_range(1e-3, 0.5, 20) # * MAX_TAU
mu_range = l1l2py.tools.geometric_range(1e-3, 1.0, 3) # * CORRELATION_FACTOR
lambda_range = l1l2py.tools.geometric_range(1e0, 1e4, 10)
#~~ Signature Parameters ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
frequency_threshold = 0.5
#~~ PPlus options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
debug = True # If True, the experiment runs only on the local pc cores
| slipguru/l1l2py | cuda/32bit/config.py | Python | gpl-3.0 | 1,967 | 0.006609 |
#! /usr/bin/env python
# (Force the script to use the latest build.)
#
# test_parser.py
import parser, traceback
_numFailed = 0
def testChunk(t, fileName):
global _numFailed
print '----', fileName,
try:
ast = parser.suite(t)
tup = parser.ast2tuple(ast)
# this discards the first AST; a huge memory savings when running
# against a large source file like Tkinter.py.
ast = None
new = parser.tuple2ast(tup)
except parser.ParserError, err:
print
print 'parser module raised exception on input file', fileName + ':'
traceback.print_exc()
_numFailed = _numFailed + 1
else:
if tup != parser.ast2tuple(new):
print
print 'parser module failed on input file', fileName
_numFailed = _numFailed + 1
else:
print 'o.k.'
def testFile(fileName):
t = open(fileName).read()
testChunk(t, fileName)
def test():
import sys
args = sys.argv[1:]
if not args:
import glob
args = glob.glob("*.py")
args.sort()
map(testFile, args)
sys.exit(_numFailed != 0)
if __name__ == '__main__':
test()
| xbmc/xbmc-antiquated | xbmc/lib/libPython/Python/Demo/parser/test_parser.py | Python | gpl-2.0 | 1,193 | 0.004191 |
# View more python learning tutorial on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(inputs, in_size, out_size, activation_function=None):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
# Make up some real data
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
##plt.scatter(x_data, y_data)
##plt.show()
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])
# add hidden layer
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
# add output layer
prediction = add_layer(l1, 10, 1, activation_function=None)
# the error between prediciton and real data
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
# important step
sess = tf.Session()
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
sess.run(tf.global_variables_initializer())
# plot the real data
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(x_data, y_data)
plt.ion()
plt.show()
for i in range(1000):
# training
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 50 == 0:
# to visualize the result and improvement
try:
ax.lines.remove(lines[0])
except Exception:
pass
prediction_value = sess.run(prediction, feed_dict={xs: x_data})
# plot the prediction
lines = ax.plot(x_data, prediction_value, 'r-', lw=5)
plt.pause(1)
| MediffRobotics/DeepRobotics | DeepLearnMaterials/tutorials/tensorflowTUT/tf12_plot_result/full_code.py | Python | gpl-3.0 | 2,272 | 0.004401 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.losses import losses_impl
from tensorflow.python.platform import test
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
ALL_LOSSES = [keras.losses.mean_squared_error,
keras.losses.mean_absolute_error,
keras.losses.mean_absolute_percentage_error,
keras.losses.mean_squared_logarithmic_error,
keras.losses.squared_hinge,
keras.losses.hinge,
keras.losses.categorical_crossentropy,
keras.losses.binary_crossentropy,
keras.losses.kullback_leibler_divergence,
keras.losses.poisson,
keras.losses.cosine_proximity,
keras.losses.logcosh,
keras.losses.categorical_hinge]
class _MSEMAELoss(object):
"""Loss function with internal state, for testing serialization code."""
def __init__(self, mse_fraction):
self.mse_fraction = mse_fraction
def __call__(self, y_true, y_pred):
return (self.mse_fraction * keras.losses.mse(y_true, y_pred) +
(1 - self.mse_fraction) * keras.losses.mae(y_true, y_pred))
def get_config(self):
return {'mse_fraction': self.mse_fraction}
class KerasLossesTest(test.TestCase):
def test_objective_shapes_3d(self):
with self.cached_session():
y_a = keras.backend.variable(np.random.random((5, 6, 7)))
y_b = keras.backend.variable(np.random.random((5, 6, 7)))
for obj in ALL_LOSSES:
objective_output = obj(y_a, y_b)
self.assertListEqual(objective_output.get_shape().as_list(), [5, 6])
def test_objective_shapes_2d(self):
with self.cached_session():
y_a = keras.backend.variable(np.random.random((6, 7)))
y_b = keras.backend.variable(np.random.random((6, 7)))
for obj in ALL_LOSSES:
objective_output = obj(y_a, y_b)
self.assertListEqual(objective_output.get_shape().as_list(), [6,])
def test_cce_one_hot(self):
with self.cached_session():
y_a = keras.backend.variable(np.random.randint(0, 7, (5, 6)))
y_b = keras.backend.variable(np.random.random((5, 6, 7)))
objective_output = keras.losses.sparse_categorical_crossentropy(y_a, y_b)
assert keras.backend.eval(objective_output).shape == (5, 6)
y_a = keras.backend.variable(np.random.randint(0, 7, (6,)))
y_b = keras.backend.variable(np.random.random((6, 7)))
objective_output = keras.losses.sparse_categorical_crossentropy(y_a, y_b)
assert keras.backend.eval(objective_output).shape == (6,)
@test_util.run_in_graph_and_eager_modes
def test_categorical_crossentropy_loss(self):
target = keras.backend.variable(np.random.randint(0, 1, (5, 1)))
logits = keras.backend.variable(np.random.random((5, 1)))
softmax_output = keras.backend.softmax(logits)
output_from_logit = keras.losses.categorical_crossentropy(
target, logits, from_logits=True)
output_from_softmax = keras.losses.categorical_crossentropy(
target, softmax_output)
np.testing.assert_allclose(
keras.backend.eval(output_from_logit),
keras.backend.eval(output_from_softmax), atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_sparse_categorical_crossentropy_loss(self):
target = keras.backend.variable(np.random.randint(0, 1, (5, 1)))
logits = keras.backend.variable(np.random.random((5, 1)))
softmax_output = keras.backend.softmax(logits)
output_from_logit = keras.losses.sparse_categorical_crossentropy(
target, logits, from_logits=True)
output_from_softmax = keras.losses.sparse_categorical_crossentropy(
target, softmax_output)
np.testing.assert_allclose(
keras.backend.eval(output_from_logit),
keras.backend.eval(output_from_softmax), atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_binary_crossentropy_loss(self):
target = keras.backend.variable(np.random.randint(0, 1, (5, 1)))
logits = keras.backend.variable(np.random.random((5, 1)))
sigmoid_output = keras.backend.sigmoid(logits)
output_from_logit = keras.losses.binary_crossentropy(
target, logits, from_logits=True)
output_from_sigmoid = keras.losses.binary_crossentropy(
target, sigmoid_output)
np.testing.assert_allclose(
keras.backend.eval(output_from_logit),
keras.backend.eval(output_from_sigmoid), atol=1e-5)
def test_serialization(self):
fn = keras.losses.get('mse')
config = keras.losses.serialize(fn)
new_fn = keras.losses.deserialize(config)
self.assertEqual(fn, new_fn)
def test_categorical_hinge(self):
y_pred = keras.backend.variable(np.array([[0.3, 0.2, 0.1],
[0.1, 0.2, 0.7]]))
y_true = keras.backend.variable(np.array([[0, 1, 0], [1, 0, 0]]))
expected_loss = ((0.3 - 0.2 + 1) + (0.7 - 0.1 + 1)) / 2.0
loss = keras.backend.eval(keras.losses.categorical_hinge(y_true, y_pred))
self.assertAllClose(expected_loss, np.mean(loss))
def test_serializing_loss_class(self):
orig_loss_class = _MSEMAELoss(0.3)
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
serialized = keras.losses.serialize(orig_loss_class)
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
deserialized = keras.losses.deserialize(serialized)
assert isinstance(deserialized, _MSEMAELoss)
assert deserialized.mse_fraction == 0.3
def test_serializing_model_with_loss_class(self):
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
model_filename = os.path.join(tmpdir, 'custom_loss.h5')
with self.cached_session():
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
loss = _MSEMAELoss(0.3)
inputs = keras.layers.Input((2,))
outputs = keras.layers.Dense(1, name='model_output')(inputs)
model = keras.models.Model(inputs, outputs)
model.compile(optimizer='sgd', loss={'model_output': loss})
model.fit(np.random.rand(256, 2), np.random.rand(256, 1))
if h5py is None:
return
model.save(model_filename)
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
loaded_model = keras.models.load_model(model_filename)
loaded_model.predict(np.random.rand(128, 2))
@test_util.run_all_in_graph_and_eager_modes
class MeanSquaredErrorTest(test.TestCase):
def test_config(self):
mse_obj = keras.losses.MeanSquaredError(
reduction=losses_impl.ReductionV2.SUM, name='mse_1')
self.assertEqual(mse_obj.name, 'mse_1')
self.assertEqual(mse_obj.reduction, losses_impl.ReductionV2.SUM)
def test_all_correct_unweighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
loss = mse_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 49.5, 3)
def test_scalar_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 113.85, 3)
def test_sample_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 767.8 / 6, 3)
def test_timestep_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 587 / 6, 3)
def test_zero_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_invalid_sample_weight(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
sample_weight = constant_op.constant([3, 6, 5, 0], shape=(2, 2))
with self.assertRaisesRegexp(
ValueError, r'Shapes \(2, 2\) and \(2, 3\) are incompatible'):
mse_obj(y_true, y_pred, sample_weight=sample_weight)
def test_no_reduction(self):
mse_obj = keras.losses.MeanSquaredError(
reduction=losses_impl.ReductionV2.NONE)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
loss = self.evaluate(loss)
self.assertArrayNear(loss, [84.3333, 143.3666], 1e-3)
def test_sum_reduction(self):
mse_obj = keras.losses.MeanSquaredError(
reduction=losses_impl.ReductionV2.SUM)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 227.69998, 3)
@test_util.run_all_in_graph_and_eager_modes
class MeanAbsoluteErrorTest(test.TestCase):
def test_config(self):
mae_obj = keras.losses.MeanAbsoluteError(
reduction=losses_impl.ReductionV2.SUM, name='mae_1')
self.assertEqual(mae_obj.name, 'mae_1')
self.assertEqual(mae_obj.reduction, losses_impl.ReductionV2.SUM)
def test_all_correct_unweighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
loss = mae_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 5.5, 3)
def test_scalar_weighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 12.65, 3)
def test_sample_weighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 81.4 / 6, 3)
def test_timestep_weighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 83 / 6, 3)
def test_zero_weighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_invalid_sample_weight(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
sample_weight = constant_op.constant([3, 6, 5, 0], shape=(2, 2))
with self.assertRaisesRegexp(
ValueError, r'Shapes \(2, 2\) and \(2, 3\) are incompatible'):
mae_obj(y_true, y_pred, sample_weight=sample_weight)
def test_no_reduction(self):
mae_obj = keras.losses.MeanAbsoluteError(
reduction=losses_impl.ReductionV2.NONE)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
loss = self.evaluate(loss)
self.assertArrayNear(loss, [10.7333, 14.5666], 1e-3)
def test_sum_reduction(self):
mae_obj = keras.losses.MeanAbsoluteError(
reduction=losses_impl.ReductionV2.SUM)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 25.29999, 3)
@test_util.run_all_in_graph_and_eager_modes
class MeanAbsolutePercentageErrorTest(test.TestCase):
def test_config(self):
mape_obj = keras.losses.MeanAbsolutePercentageError(
reduction=losses_impl.ReductionV2.SUM, name='mape_1')
self.assertEqual(mape_obj.name, 'mape_1')
self.assertEqual(mape_obj.reduction, losses_impl.ReductionV2.SUM)
def test_unweighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 211.8518, 3)
def test_scalar_weighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 487.259, 3)
def test_sample_weighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 422.8888, 3)
def test_timestep_weighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 694.4445, 3)
def test_zero_weighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
@test_util.run_all_in_graph_and_eager_modes
class MeanSquaredLogarithmicErrorTest(test.TestCase):
def test_config(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError(
reduction=losses_impl.ReductionV2.SUM, name='mape_1')
self.assertEqual(msle_obj.name, 'mape_1')
self.assertEqual(msle_obj.reduction, losses_impl.ReductionV2.SUM)
def test_unweighted(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = msle_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 1.4370, 3)
def test_scalar_weighted(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = msle_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 3.3051, 3)
def test_sample_weighted(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 3.7856, 3)
def test_timestep_weighted(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 2.6473, 3)
def test_zero_weighted(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = msle_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
@test_util.run_all_in_graph_and_eager_modes
class CosineProximityTest(test.TestCase):
def test_config(self):
cosine_obj = keras.losses.CosineProximity(
reduction=losses_impl.ReductionV2.SUM, name='cosine_loss')
self.assertEqual(cosine_obj.name, 'cosine_loss')
self.assertEqual(cosine_obj.reduction, losses_impl.ReductionV2.SUM)
def test_unweighted(self):
cosine_obj = keras.losses.CosineProximity()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = cosine_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), -0.18722, 3)
def test_scalar_weighted(self):
cosine_obj = keras.losses.CosineProximity()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = cosine_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), -0.43060, 3)
def test_sample_weighted(self):
cosine_obj = keras.losses.CosineProximity()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = cosine_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.15599, 3)
def test_timestep_weighted(self):
cosine_obj = keras.losses.CosineProximity()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = cosine_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), -2.0000, 3)
def test_zero_weighted(self):
cosine_obj = keras.losses.CosineProximity()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = cosine_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@test_util.run_all_in_graph_and_eager_modes
class BinaryCrossentropyTest(test.TestCase):
def test_config(self):
bce_obj = keras.losses.BinaryCrossentropy(
reduction=losses_impl.ReductionV2.SUM, name='bce_1')
self.assertEqual(bce_obj.name, 'bce_1')
self.assertEqual(bce_obj.reduction, losses_impl.ReductionV2.SUM)
def test_all_correct_unweighted(self):
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]],
dtype=dtypes.float32)
bce_obj = keras.losses.BinaryCrossentropy()
loss = bce_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
bce_obj = keras.losses.BinaryCrossentropy()
y_true = constant_op.constant([1, 0, 1, 0, 0, 1], shape=(2, 3))
y_pred = constant_op.constant([1, 1, 1, 0, 1, 0],
shape=(2, 3),
dtype=dtypes.float32)
loss = bce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 8.0004, 3)
# Test with logits.
logits = constant_op.constant([10., 10., 10., -10., 10, -10],
shape=(2, 3),
dtype=dtypes.float32)
bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 5., 3)
def test_scalar_weighted(self):
bce_obj = keras.losses.BinaryCrossentropy()
y_true = constant_op.constant([1, 0, 1, 0, 0, 1], shape=(2, 3))
y_pred = constant_op.constant([1, 1, 1, 0, 1, 0],
shape=(2, 3),
dtype=dtypes.float32)
loss = bce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 18.4010, 3)
# Test with logits.
y_true = array_ops.ones((32, 1))
logits = array_ops.ones((32, 1), dtype=dtypes.float32)
bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 0.7205, 3)
def test_sample_weighted(self):
bce_obj = keras.losses.BinaryCrossentropy()
y_true = constant_op.constant([1, 0, 1, 0, 0, 1], shape=(2, 3))
y_pred = constant_op.constant([1, 1, 1, 0, 1, 0],
shape=(2, 3),
dtype=dtypes.float64)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = bce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 21.4907, 3)
# Test with logits.
y_true = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
logits = constant_op.constant(
[[100.0, -100.0, -100.0], [-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]],
dtype=dtypes.float64)
weights = constant_op.constant([3, 2, 8])
bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits, sample_weight=weights)
self.assertAlmostEqual(self.evaluate(loss), 288.8888, 3)
def test_no_reduction(self):
y_true = constant_op.constant(((1, 0, 1), (1, 1, 0), (0, 1, 1)))
logits = constant_op.constant(((100.0, -100.0, 100.0),
(100.0, -100.0, 100.0),
(100.0, 100.0, -100.0)))
bce_obj = keras.losses.BinaryCrossentropy(
from_logits=True, reduction=losses_impl.ReductionV2.NONE)
loss = bce_obj(y_true, logits)
self.assertAllClose((0., 66.6666, 66.6666), self.evaluate(loss), 3)
def test_label_smoothing(self):
logits = constant_op.constant([[100.0, -100.0, -100.0]])
y_true = constant_op.constant([[1, 0, 1]])
label_smoothing = 0.1
# Loss: max(x, 0) - x * z + log(1 + exp(-abs(x)))
# Label smoothing: z' = z * (1 - L) + 0.5L
# 1 = 1 - 0.5L
# 0 = 0.5L
# Applying the above two fns to the given input:
# (100 - 100 * (1 - 0.5 L) + 0 +
# 0 + 100 * (0.5 L) + 0 +
# 0 + 100 * (1 - 0.5 L) + 0) * (1/3)
# = (100 + 50L) * 1/3
bce_obj = keras.losses.BinaryCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
loss = bce_obj(y_true, logits)
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
@test_util.run_all_in_graph_and_eager_modes
class CategoricalCrossentropyTest(test.TestCase):
def test_config(self):
cce_obj = keras.losses.CategoricalCrossentropy(
reduction=losses_impl.ReductionV2.SUM, name='bce_1')
self.assertEqual(cce_obj.name, 'bce_1')
self.assertEqual(cce_obj.reduction, losses_impl.ReductionV2.SUM)
def test_all_correct_unweighted(self):
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]],
dtype=dtypes.int64)
y_pred = constant_op.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
dtype=dtypes.float32)
cce_obj = keras.losses.CategoricalCrossentropy()
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = constant_op.constant([[10., 0., 0.], [0., 10., 0.], [0., 0., 10.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
cce_obj = keras.losses.CategoricalCrossentropy()
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), .3239, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), .0573, 3)
def test_scalar_weighted(self):
cce_obj = keras.losses.CategoricalCrossentropy()
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .7449, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .1317, 3)
def test_sample_weighted(self):
cce_obj = keras.losses.CategoricalCrossentropy()
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
sample_weight = constant_op.constant([[1.2], [3.4], [5.6]], shape=(3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3)
def test_no_reduction(self):
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=losses_impl.ReductionV2.NONE)
loss = cce_obj(y_true, logits)
self.assertAllClose((0.001822, 0.000459, 0.169846), self.evaluate(loss), 3)
def test_label_smoothing(self):
logits = constant_op.constant([[100.0, -100.0, -100.0]])
y_true = constant_op.constant([[1, 0, 0]])
label_smoothing = 0.1
# Softmax Cross Entropy Loss: -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100]
# \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# Label smoothing: z' = z * (1 - L) + L/n
# 1 = 1 - L + L/n
# 0 = L/n
# Applying the above two fns to the given input:
# -0 * (1 - L + L/n) + 200 * L/n + 200 * L/n = 400 L/n
cce_obj = keras.losses.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
loss = cce_obj(y_true, logits)
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
def test_all_correct_unweighted_sparse(self):
y_true = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64)
y_pred = constant_op.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
dtype=dtypes.float32)
cce_obj = keras.losses.CategoricalCrossentropy()
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = constant_op.constant([[10., 0., 0.], [0., 10., 0.], [0., 0., 10.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted_sparse(self):
cce_obj = keras.losses.CategoricalCrossentropy()
y_true = constant_op.constant([0, 1, 2])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), .3239, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), .0573, 3)
def test_scalar_weighted_sparse(self):
cce_obj = keras.losses.CategoricalCrossentropy()
y_true = constant_op.constant([[0], [1], [2]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .7449, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .1317, 3)
def test_sample_weighted_sparse(self):
cce_obj = keras.losses.CategoricalCrossentropy()
y_true = constant_op.constant([[0], [1], [2]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
sample_weight = constant_op.constant([[1.2], [3.4], [5.6]], shape=(3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3)
def test_no_reduction_sparse(self):
y_true = constant_op.constant([[0], [1], [2]])
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=losses_impl.ReductionV2.NONE)
loss = cce_obj(y_true, logits)
self.assertAllClose((0.001822, 0.000459, 0.169846), self.evaluate(loss), 3)
@test_util.run_all_in_graph_and_eager_modes
class HingeTest(test.TestCase):
def test_config(self):
hinge_obj = keras.losses.Hinge(
reduction=losses_impl.ReductionV2.SUM, name='hinge_loss')
self.assertEqual(hinge_obj.name, 'hinge_loss')
self.assertEqual(hinge_obj.reduction, losses_impl.ReductionV2.SUM)
def test_unweighted(self):
hinge_obj = keras.losses.Hinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 7.3333, 3)
def test_scalar_weighted(self):
hinge_obj = keras.losses.Hinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 16.8666, 3)
# Verify we get the same output when the same input is given
loss_2 = hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
hinge_obj = keras.losses.Hinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 24.9333, 3)
def test_timestep_weighted(self):
hinge_obj = keras.losses.Hinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 2.0, 3)
def test_zero_weighted(self):
hinge_obj = keras.losses.Hinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = hinge_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@test_util.run_all_in_graph_and_eager_modes
class SquaredHingeTest(test.TestCase):
def test_config(self):
sq_hinge_obj = keras.losses.SquaredHinge(
reduction=losses_impl.ReductionV2.SUM, name='sq_hinge_loss')
self.assertEqual(sq_hinge_obj.name, 'sq_hinge_loss')
self.assertEqual(sq_hinge_obj.reduction, losses_impl.ReductionV2.SUM)
def test_unweighted(self):
sq_hinge_obj = keras.losses.SquaredHinge()
y_true = constant_op.constant([1, 9, 2, -5], shape=(2, 2))
y_pred = constant_op.constant([4, 8, 12, 8],
shape=(2, 2),
dtype=dtypes.float32)
# Sq hinge = mean(square(max(1. - y_true * y_pred, 0.)), axis=-1)
# (1. - y_true * y_pred) = [[1-4, 1-72], [1-24, 1+40]] = [0, 48]
# sq(max(above val, 0)) = sq([[0, 0], [0, 41]) = [[0, 0], [0, 1681]]
# Mean = [0, 840.5]. Reduced loss = (0 + 840.5)/2 = 420.25
loss = sq_hinge_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 420.25, 3)
def test_scalar_weighted(self):
sq_hinge_obj = keras.losses.SquaredHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = sq_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 647.833, 3)
# Verify we get the same output when the same input is given
loss_2 = sq_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
sq_hinge_obj = keras.losses.SquaredHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 957.667, 3)
def test_timestep_weighted(self):
sq_hinge_obj = keras.losses.SquaredHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 6.0, 3)
def test_zero_weighted(self):
sq_hinge_obj = keras.losses.SquaredHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = sq_hinge_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@test_util.run_all_in_graph_and_eager_modes
class CategoricalHingeTest(test.TestCase):
def test_config(self):
cat_hinge_obj = keras.losses.CategoricalHinge(
reduction=losses_impl.ReductionV2.SUM, name='cat_hinge_loss')
self.assertEqual(cat_hinge_obj.name, 'cat_hinge_loss')
self.assertEqual(cat_hinge_obj.reduction, losses_impl.ReductionV2.SUM)
def test_unweighted(self):
cat_hinge_obj = keras.losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5], shape=(2, 2))
y_pred = constant_op.constant([4, 8, 12, 8],
shape=(2, 2),
dtype=dtypes.float32)
loss = cat_hinge_obj(y_true, y_pred)
# pos = reduce_sum(y_true * y_pred) = [1*4+8*9, 12*2+8*-5] = [76, -16]
# neg = reduce_max((1. - y_true) * y_pred) = [[0, -64], [-12, 48]] = [0, 48]
# cat_hinge = max(0., neg - pos + 1.) = [0, 65]
# reduced_loss = (0 + 65)/2 = 32.5
self.assertAlmostEqual(self.evaluate(loss), 32.5, 3)
def test_scalar_weighted(self):
cat_hinge_obj = keras.losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = cat_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 83.95, 3)
# Verify we get the same output when the same input is given
loss_2 = cat_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
cat_hinge_obj = keras.losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 124.1, 3)
def test_timestep_weighted(self):
cat_hinge_obj = keras.losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 4.0, 3)
def test_zero_weighted(self):
cat_hinge_obj = keras.losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = cat_hinge_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@test_util.run_all_in_graph_and_eager_modes
class LogLossTest(test.TestCase):
def setup(self):
# TODO(psv): Change to setUp() after b/122319309 is fixed.
y_pred = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
y_true = np.asarray([1., 0., 1., 1., 0., 0.]).reshape((2, 3))
epsilon = 1e-7 # to avoid log 0
self.batch_size = 6
self.expected_losses = np.multiply(y_true, np.log(y_pred + epsilon))
self.expected_losses += np.multiply(1 - y_true,
np.log(1 - y_pred + epsilon))
self.expected_losses = -self.expected_losses
self.y_pred = constant_op.constant(y_pred)
self.y_true = constant_op.constant(y_true)
def test_config(self):
log_loss_obj = keras.losses.LogLoss(
reduction=losses_impl.ReductionV2.SUM, name='log')
self.assertEqual(log_loss_obj.name, 'log')
self.assertEqual(log_loss_obj.reduction, losses_impl.ReductionV2.SUM)
def test_all_correct(self):
self.setup()
log_loss_obj = keras.losses.LogLoss()
loss = log_loss_obj(self.y_true, self.y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
self.setup()
log_loss_obj = keras.losses.LogLoss()
loss = log_loss_obj(self.y_true, self.y_pred)
actual_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_scalar_weighted(self):
self.setup()
log_loss_obj = keras.losses.LogLoss()
sample_weight = 2.3
loss = log_loss_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = sample_weight * np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = log_loss_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
self.setup()
log_loss_obj = keras.losses.LogLoss()
sample_weight = constant_op.constant((1.2, 3.4), shape=(2, 1))
loss = log_loss_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
actual_loss = np.sum(actual_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_timestep_weighted(self):
log_loss_obj = keras.losses.LogLoss()
y_pred = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3, 1))
y_true = np.asarray([1., 0., 1., 1., 0., 0.]).reshape((2, 3, 1))
epsilon = 1e-7 # to avoid log 0
batch_size = 6
expected_losses = np.multiply(y_true, np.log(y_pred + epsilon))
expected_losses += np.multiply(1 - y_true, np.log(1 - y_pred + epsilon))
y_pred = constant_op.constant(y_pred)
y_true = constant_op.constant(y_true)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
loss = log_loss_obj(
y_true,
y_pred,
sample_weight=constant_op.constant(sample_weight, shape=(2, 3)))
actual_loss = np.multiply(-expected_losses, sample_weight)
actual_loss = np.sum(actual_loss) / batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_zero_weighted(self):
self.setup()
log_loss_obj = keras.losses.LogLoss()
sample_weight = 0
loss = log_loss_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
if __name__ == '__main__':
test.main()
| hfp/tensorflow-xsmm | tensorflow/python/keras/losses_test.py | Python | apache-2.0 | 48,998 | 0.002694 |
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2012, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
import netaddr
name = 'netaddr'
version = netaddr.__version__
description = 'Pythonic manipulation of IPv4, IPv6, CIDR, EUI and MAC network addresses'
keywords = [
'Networking', 'Systems Administration', 'IANA', 'IEEE', 'CIDR', 'IP',
'IPv4', 'IPv6', 'CIDR', 'EUI', 'MAC', 'MAC-48', 'EUI-48', 'EUI-64'
]
download_url = 'http://github.com/drkjam/netaddr/downloads'
author = 'David P. D. Moss'
author_email = 'drkjam@gmail.com'
url = 'http://github.com/drkjam/netaddr/'
# Required by distutils only.
packages = [
'netaddr',
'netaddr.ip',
'netaddr.eui',
'netaddr.strategy',
'netaddr.tests',
]
# Required by distutils only.
package_data = {
'netaddr.ip': [
'ipv4-address-space.xml',
'ipv6-address-space.xml',
'multicast-addresses.xml'
],
'netaddr.eui': [
'*.txt',
'*.idx'
],
'netaddr.tests': [
'core/*.txt',
'eui/*.txt',
'ip/*.txt',
'strategy/*.txt',
],
}
scripts = ['netaddr/tools/netaddr']
license = 'BSD License'
#------------------------------------------------------------------------
# NB - keep this text around 74 characters wide so it is viewable
# in various fixed window sizes.
long_description = """
A pure Python network address representation and manipulation library.
netaddr provides a Pythonic way of working with :-
- IPv4 and IPv6 addresses and subnets
- MAC addresses, OUI and IAB identifiers, IEEE EUI-64 identifiers
- arbitrary (non-aligned) IP address ranges and IP address sets
- various non-CIDR IP range formats such as nmap and glob-style formats
Included are routines for :-
- generating, sorting and summarizing IP addresses and networks
- performing easy conversions between address notations and formats
- detecting, parsing and formatting network address representations
- performing set-based operations on groups of IP addresses and subnets
- working with arbitrary IP address ranges and formats
- accessing OUI and IAB organisational information published by IEEE
- accessing IP address and block information published by IANA
For details on the latest updates and changes, see :-
http://github.com/drkjam/netaddr/blob/rel-0.7.x/CHANGELOG
API documentation for the latest release is available here :-
http://packages.python.org/netaddr/
"""
platforms = 'OS Independent'
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: BSD License',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Topic :: Communications',
'Topic :: Documentation',
'Topic :: Education',
'Topic :: Education :: Testing',
'Topic :: Home Automation',
'Topic :: Internet',
'Topic :: Internet :: Log Analysis',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: Internet :: Proxy Servers',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Security',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Testing :: Traffic Generation',
'Topic :: System :: Benchmark',
'Topic :: System :: Clustering',
'Topic :: System :: Distributed Computing',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Logging',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Firewalls',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Networking :: Time Synchronization',
'Topic :: System :: Recovery Tools',
'Topic :: System :: Shells',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
'Topic :: System :: System Shells',
'Topic :: Text Processing',
'Topic :: Text Processing :: Filters',
'Topic :: Utilities',
]
install_requires = [
]
setup_requires = [
]
| ashmastaflash/gwdetect | dependencies/netaddr-0.7.10/release.py | Python | mit | 5,263 | 0.00076 |
# -*- coding: utf-8 -*-
"""build_manpage command -- Generate man page from setup()"""
import datetime
from distutils.command.build import build
from distutils.core import Command
from distutils.errors import DistutilsOptionError
import optparse
class build_manpage(Command):
description = 'Generate man page from setup().'
user_options = [
('output=', 'O', 'output file'),
('parser=', None, 'module path to optparser (e.g. mymod:func'),
]
def initialize_options(self):
self.output = None
self.parser = None
def finalize_options(self):
if self.output is None:
raise DistutilsOptionError('\'output\' option is required')
if self.parser is None:
raise DistutilsOptionError('\'parser\' option is required')
mod_name, func_name = self.parser.split(':')
fromlist = mod_name.split('.')
try:
mod = __import__(mod_name, fromlist=fromlist)
self._parser = getattr(mod, func_name)()
except ImportError, err:
raise
self._parser.formatter = ManPageFormatter()
self._parser.formatter.set_parser(self._parser)
self.announce('Writing man page %s' % self.output)
self._today = datetime.date.today()
def _markup(self, txt):
return txt.replace('-', '\\-')
def _write_header(self):
appname = self.distribution.get_name()
ret = []
ret.append('.TH %s 1 %s\n' % (self._markup(appname),
self._today.strftime('%Y\\-%m\\-%d')))
description = self.distribution.get_description()
if description:
name = self._markup('%s - %s' % (self._markup(appname),
description.splitlines()[0]))
else:
name = self._markup(appname)
ret.append('.SH NAME\n%s\n' % name)
synopsis = self._parser.get_usage()
if synopsis:
synopsis = synopsis.replace('%s ' % appname, '')
ret.append('.SH SYNOPSIS\n.B %s\n%s\n' % (self._markup(appname),
synopsis))
long_desc = self.distribution.get_long_description()
if long_desc:
ret.append('.SH DESCRIPTION\n%s\n' % self._markup(long_desc))
return ''.join(ret)
def _write_options(self):
ret = ['.SH OPTIONS\n']
ret.append(self._parser.format_option_help())
return ''.join(ret)
def _write_footer(self):
ret = []
appname = self.distribution.get_name()
author = '%s <%s>' % (self.distribution.get_author(),
self.distribution.get_author_email())
ret.append(('.SH AUTHORS\n.B %s\nwas written by %s.\n'
% (self._markup(appname), self._markup(author))))
homepage = self.distribution.get_url()
ret.append(('.SH DISTRIBUTION\nThe latest version of %s may '
'be downloaded from\n'
'.UR %s\n.UE\n'
% (self._markup(appname), self._markup(homepage),)))
return ''.join(ret)
def run(self):
manpage = []
manpage.append(self._write_header())
manpage.append(self._write_options())
manpage.append(self._write_footer())
stream = open(self.output, 'w')
stream.write(''.join(manpage))
stream.close()
class ManPageFormatter(optparse.HelpFormatter):
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
optparse.HelpFormatter.__init__(self, indent_increment,
max_help_position, width, short_first)
def _markup(self, txt):
return txt.replace('-', '\\-')
def format_usage(self, usage):
return self._markup(usage)
def format_heading(self, heading):
if self.level == 0:
return ''
return '.TP\n%s\n' % self._markup(heading.upper())
def format_option(self, option):
result = []
opts = self.option_strings[option]
result.append('.TP\n.B %s\n' % self._markup(opts))
if option.help:
help_text = '%s\n' % self._markup(self.expand_default(option))
result.append(help_text)
return ''.join(result)
build.sub_commands.append(('build_manpage', None))
| andialbrecht/crunchyfrog | utils/command/build_manpage.py | Python | gpl-3.0 | 4,464 | 0.000224 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from test_sms import TestSms
def all(handler):
env = environment.get(InProcessTestEnvironment)
suite = unittest.TestSuite()
suite.addTest(TestSms("test_navigate", handler=handler))
return suite
| andreastt/certtest | semiauto/tests/__init__.py | Python | mpl-2.0 | 411 | 0.002433 |
# (c) 2015, Jon Hadfield <jon@lessknown.co.uk>
"""
Description: This lookup takes an AWS region and an RDS instance
name and returns the endpoint port.
Example Usage:
{{ lookup('aws_rds_endpoint_port_from_instance_name', ('eu-west-1', 'mydb')) }}
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
try:
import boto3
import botocore
except ImportError:
raise AnsibleError("aws_rds_endpoint_port_from_instance_name lookup cannot be run without boto installed")
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
region = terms[0][0]
instance_name = terms[0][1]
session=boto3.session.Session(region_name=region)
try:
rds_client=session.client('rds')
except botocore.exceptions.NoRegionError:
raise AnsibleError("AWS region not specified.")
result=rds_client.describe_db_instances(DBInstanceIdentifier=instance_name)
if result and result.get('DBInstances'):
return [result.get('DBInstances')[0].get('Endpoint').get('Port').encode('utf-8')]
return None
| jonhadfield/ansible-lookups | aws_rds_endpoint_port_from_instance_name.py | Python | mit | 1,218 | 0.004926 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from OpenSSL import crypto
from .base import BaseNetworkServiceScanModel
from ..types import *
from ..mixin import DomainNameMixin, S3Mixin
from lib import ConversionHelper
class SslSupportModel(BaseNetworkServiceScanModel):
"""
This is an Elasticsearch model for representing the results of an SSL support check.
"""
# Class Members
ssl_version = KeywordElasticsearchType(
help_text="The SSL/TLS version that the SSL support was checked against.",
)
supported = BooleanElasticsearchType(
help_text="Whether or not the referenced SSL/TLS version was supported.",
)
accepted_ciphers = KeywordElasticsearchType(
help_text="The ciphers that were supported for the referenced SSL/TLS version.",
)
rejected_ciphers = KeywordElasticsearchType(
help_text="The ciphers that were rejected for the referenced SSL/TLS version.",
)
errored_ciphers = KeywordElasticsearchType(
help_text="The ciphers that threw errors for the referenced SSL/TLS version.",
)
preferred_cipher = KeywordElasticsearchType(
help_text="The preferred cipher for the referenced SSL/TLS version.",
)
pyopenssl_protocol = KeywordElasticsearchType(
help_text="A string depicting the pyopenssl protocol that was used for testing.",
)
# Instantiation
def __init__(
self,
ssl_version=None,
supported=None,
accepted_ciphers=None,
rejected_ciphers=None,
errored_ciphers=None,
preferred_cipher=None,
**kwargs
):
super(SslSupportModel, self).__init__(**kwargs)
self.ssl_version = ssl_version
self.supported = supported
self.accepted_ciphers = accepted_ciphers
self.rejected_ciphers = rejected_ciphers
self.errored_ciphers = errored_ciphers
self.preferred_cipher = preferred_cipher
if ssl_version is not None:
self.pyopenssl_protocol = ConversionHelper.pyopenssl_protocol_name_from_ssl_version(ssl_version)
else:
self.pyopenssl_protocol = None
# Static Methods
# Class Methods
@classmethod
def _populate_dummy(cls, to_populate):
from lib import WsFaker, RandomHelper
to_populate.ssl_version = WsFaker.get_ssl_version_name()
to_populate.pyopenssl_protocol = ConversionHelper.pyopenssl_protocol_name_from_ssl_version(
to_populate.ssl_version
)
to_populate.supported = RandomHelper.flip_coin()
to_populate.accepted_ciphers = WsFaker.get_words()
to_populate.rejected_ciphers = WsFaker.get_words()
to_populate.errored_ciphers = WsFaker.get_words()
to_populate.preferred_cipher = WsFaker.get_words(1)[0]
return to_populate
# Public Methods
# Protected Methods
# Private Methods
# Properties
# Representation and Comparison
class SslVulnerabilityModel(BaseNetworkServiceScanModel):
"""
This is an Elasticsearch model for representing the results of a single check for a single
SSL vulnerability.
"""
# Class Members
vuln_test_name = KeywordElasticsearchType(
help_text="A string depicting the name of the vulnerability test that was conducted.",
)
test_errored = BooleanElasticsearchType(
help_text="Whether or not the vulnerability test threw an exception.",
)
test_results = KeywordBooleanKeyValueElasticsearchType(
help_text="The results of the SSL/TLS vulnerability check.",
)
# Instantiation
def __init__(self, vuln_test_name=None, test_errored=None, test_results=None, **kwargs):
super(SslVulnerabilityModel, self).__init__(**kwargs)
self.vuln_test_name = vuln_test_name
self.test_errored = test_errored
self.test_results = test_results
# Static Methods
# Class Methods
@classmethod
def _populate_dummy(cls, to_populate):
from lib import WsFaker, RandomHelper
to_populate.vuln_test_name = WsFaker.get_word()
to_populate.test_errored = RandomHelper.flip_coin()
to_populate.test_results = WsFaker.get_ssl_vuln_test_results()
return to_populate
# Public Methods
# Protected Methods
# Private Methods
# Properties
# Representation and Comparison
class SslCertificateModel(BaseNetworkServiceScanModel, DomainNameMixin, S3Mixin):
"""
This is an Elasticsearch model for representing an SSL certificate.
"""
# Class Members
ssl_version = KeywordElasticsearchType(
help_text="The SSL/TLS version that was used to retrieve the certificate.",
)
certificate_hash = KeywordElasticsearchType(
help_text="The hash of the SSL/TLS certificate.",
)
country = KeywordElasticsearchType(
help_text="The country code for where the SSL/TLS certificate is registered.",
)
state = KeywordElasticsearchType(
help_text="The state code for where the SSL/TLS certificate is registered.",
)
locality = KeywordElasticsearchType(
help_text="The locality code for where the SSL/TLS certificate is registered.",
)
organization = KeywordElasticsearchType(
help_text="The name of the organization that the SSL certificate was registered for.",
)
organizational_unit = KeywordElasticsearchType(
help_text="The organizational unit code for where the SSL/TLS certificate is registered.",
)
common_name = KeywordElasticsearchType(
help_text="The contents of the CNAME field within the SSL certificate.",
)
# Instantiation
def __init__(
self,
ssl_version=None,
certificate_hash=None,
country=None,
state=None,
locality=None,
organization=None,
organizational_unit=None,
common_name=None,
**kwargs
):
super(SslCertificateModel, self).__init__(**kwargs)
self.ssl_version = ssl_version
self.certificate_hash = certificate_hash
self.country = country
self.state = state
self.locality = locality
self.organization = organization
self.organizational_unit = organizational_unit
self.common_name = common_name
if common_name is not None and not common_name.startswith("*"):
self.domain_names = [common_name]
else:
self.domain_names = []
# Static Methods
# Class Methods
@classmethod
def from_x509_certificate(
cls,
certificate=None,
cert_output_type=crypto.FILETYPE_PEM,
**kwargs
):
"""
Create and return an SslCertificateModel based on the contents of the given SSL certificate
and other arguments.
:param certificate: An OpenSSL certificate.
:param cert_output_type: The certificate output type to calculate a hash over.
:return: The newly-create SslCertificateModel.
"""
to_return = cls(**kwargs)
to_return = cls.populate_from_x509_certificate(
certificate=certificate,
cert_output_type=cert_output_type,
to_populate=to_return,
)
return to_return
@classmethod
def populate_from_x509_certificate(cls, certificate=None, cert_output_type=crypto.FILETYPE_PEM, to_populate=None):
"""
Populate the contents of to_populate based on the contents of the given SSL certificate.
:param certificate: The SSL certificate to process.
:param cert_output_type: The SSL certificate type.
:param to_populate: The Elasticsearch model to populate.
:return: The updated model.
"""
cert_subject = certificate.get_subject()
certificate_contents = {
"country": cert_subject.C,
"state": cert_subject.ST,
"locality": cert_subject.L,
"organization": cert_subject.O,
"organizational_unit": cert_subject.OU,
"common_name": cert_subject.CN,
"certificate_hash": ConversionHelper.ssl_certificate_to_hash(
certificate=certificate,
output_type=cert_output_type,
)
}
for k, v in certificate_contents.iteritems():
setattr(to_populate, k, v)
return to_populate
@classmethod
def _populate_dummy(cls, to_populate):
from lib import WsFaker
cert_model = SslCertificateModel.from_x509_certificate(certificate=WsFaker.get_ssl_certificate(as_string=False))
for mapped_attribute in cert_model.all_mapping_fields:
cert_model_value = getattr(cert_model, mapped_attribute)
if cert_model_value is not None:
setattr(to_populate, mapped_attribute, cert_model_value)
to_populate.ssl_version = WsFaker.get_ssl_version_name()
return to_populate
# Public Methods
# Protected Methods
# Private Methods
# Properties
# Representation and Comparison
class SslSupportReportModel(BaseNetworkServiceScanModel):
"""
This is an Elasticsearch model class for containing aggregated and analyzed data about a network service's
support of SSL.
"""
# Class Members
cert_serial_number = KeywordElasticsearchType(
help_text="The serial number of the service's SSL certificate.",
)
cert_version = IntElasticsearchType(
help_text="The version number of the service's SSL certificate.",
)
cert_has_start_time = BooleanElasticsearchType(
help_text="Whether or not the referenced certificate has a valid_after field.",
)
cert_start_time = DateElasticsearchType(
help_text="The contents of the certificate's valid_after field.",
)
cert_has_invalid_time = BooleanElasticsearchType(
help_text="Whether or not the referenced certificate has a valid_before field.",
)
cert_invalid_time = DateElasticsearchType(
help_text="The contents of the certificate's valid_before field.",
)
cert_expired = BooleanElasticsearchType(
help_text="Whether or not the certificate was expired at the time of collection.",
)
cert_md5_digest = KeywordElasticsearchType(
help_text="The MD5 digest for the certificate.",
)
cert_sha1_digest = KeywordElasticsearchType(
help_text="The SHA1 digest for the certificate.",
)
cert_sha256_digest = KeywordElasticsearchType(
help_text="The SHA256 digest for the certificate.",
)
cert_sha512_digest = KeywordElasticsearchType(
help_text="The SHA512 digest for the certificate.",
)
cert_key_bits = IntElasticsearchType(
help_text="The number of bits for the key associated with the certificate.",
)
cert_key_type = KeywordElasticsearchType(
help_text="The type of the key found within the certificate.",
)
cert_public_key = KeywordElasticsearchType(
help_text="The contents of the public key found within the referenced certificate.",
)
cert_content = KeywordElasticsearchType(
help_text="The contents of the certificate.",
)
cert_issuer_common_name = KeywordElasticsearchType(
help_text="The common name associated with the certificate's issuer.",
)
cert_issuer_country = KeywordElasticsearchType(
help_text="The country code associated with the certificate's issuer.",
)
cert_issuer_email = KeywordElasticsearchType(
help_text="The email address associated with the certificate's issuer.",
)
cert_issuer_hash = KeywordElasticsearchType(
help_text="The hash associated with the certificate's issuer.",
)
cert_issuer_locality = KeywordElasticsearchType(
help_text="The locality code associated with the certificate's issuer.",
)
cert_issuer_organization = KeywordElasticsearchType(
help_text="The name of the organization associated with the certificate's issuer.",
)
cert_issuer_organizational_unit = KeywordElasticsearchType(
help_text="The unit of the organization associated with the certificate's issuer.",
)
cert_issuer_state = KeywordElasticsearchType(
help_text="The state code associated with the certificate's issuer.",
)
cert_subject_common_name = KeywordElasticsearchType(
help_text="The common name associated with the certificate's subject.",
)
cert_subject_country = KeywordElasticsearchType(
help_text="The country code associated with the certificate's subject.",
)
cert_subject_email = KeywordElasticsearchType(
help_text="The email address associated with the certificate's subject.",
)
cert_subject_hash = KeywordElasticsearchType(
help_text="The hash associated with the certificate's subject.",
)
cert_subject_locality = KeywordElasticsearchType(
help_text="The locality code associated with the certificate's subject.",
)
cert_subject_organization = KeywordElasticsearchType(
help_text="The organization associated with the certificate's subject.",
)
cert_subject_organizational_unit = KeywordElasticsearchType(
help_text="The unit of the organization associated with the certificate's subject.",
)
cert_subject_state = KeywordElasticsearchType(
help_text="The state code associated with the certificate's subject.",
)
cert_extension_names = KeywordElasticsearchType(
help_text="The names of the recognized certificate extensions found within the "
"certificate.",
)
cert_has_authority_key_id = BooleanElasticsearchType(
help_text="Whether or not the certificate has the authority_key_id extension.",
)
cert_authority_key_id = KeywordElasticsearchType(
help_text="The contents of the certificate's authority_key_id extension.",
)
cert_has_subject_key_id = BooleanElasticsearchType(
help_text="Whether or not the certificate has the subject_key_id extension.",
)
cert_subject_key_id = KeywordElasticsearchType(
help_text="The contents of the certificate's subject_key_id extension.",
)
cert_has_extended_key_usage = BooleanElasticsearchType(
help_text="Whether or not the certificate has the extended_key_usage extension.",
)
cert_extended_key_usage = KeywordElasticsearchType(
help_text="The contents of the certificate's extended_key_usage extension.",
)
cert_has_certificate_policies = BooleanElasticsearchType(
help_text="Whether or not the certificate has the certificate_policies extension.",
)
cert_certificate_policies = KeywordElasticsearchType(
help_text="The content of the certificate's certificate_policies extension.",
)
cert_has_crl_distribution_points = BooleanElasticsearchType(
help_text="Whether or not the certificate has the crl_distribution_points extension.",
)
cert_crl_distribution_points = KeywordElasticsearchType(
help_text="The content of the certificate's crl_distribution_points extension.",
)
cert_has_subject_alt_name = BooleanElasticsearchType(
help_text="Whether or not the certificate has the subject_alt_name extension.",
)
cert_subject_alt_name = KeywordElasticsearchType(
help_text="The content of the certificate's subject_alt_name extension.",
)
cert_has_authority_info_access = BooleanElasticsearchType(
help_text="Whether or not the certificate has the authority_info_access extension.",
)
cert_authority_info_access = KeywordElasticsearchType(
help_text="The content of the certificate's authority_info_access extension.",
)
cert_is_valid = BooleanElasticsearchType(
help_text="Whether or not the certificate was found to be valid.",
)
supports_fallback_scsv = BooleanElasticsearchType(
help_text="Whether or not the network service supports the fallback SCSV.",
)
is_vulnerable_to_heartbleed = BooleanElasticsearchType(
help_text="Whether or not the network service is vulnerable to heartbleed.",
)
is_vulnerable_to_ccs_injection = BooleanElasticsearchType(
help_text="Whether or not the network service is vulnerable to CCS injection.",
)
accepts_client_renegotiation = BooleanElasticsearchType(
help_text="Whether or not the network service accepts client renegotiation.",
)
supports_secure_renegotiation = BooleanElasticsearchType(
help_text="Whether or not the network service supports secure renegotiation.",
)
is_ticket_resumption_supported = BooleanElasticsearchType(
help_text="Whether or not the network service supports ticket resumption.",
)
supports_sslv3 = BooleanElasticsearchType(
help_text="Whether or not the network service supports SSLv3.",
)
supports_tlsv1 = BooleanElasticsearchType(
help_text="Whether or not the network service supports TLSv1.0.",
)
supports_tlsv1_1 = BooleanElasticsearchType(
help_text="Whether or not the network service supports TLSv1.1.",
)
supports_tlsv1_2 = BooleanElasticsearchType(
help_text="Whether or not the network service supports TLSv1.2.",
)
sslv3_preferred_cipher = KeywordElasticsearchType(
help_text="The preferred SSLv3 cipher for the network service.",
)
tlsv1_preferred_cipher = KeywordElasticsearchType(
help_text="The preferred TLSv1.0 cipher for the network service.",
)
tlsv1_1_preferred_cipher = KeywordElasticsearchType(
help_text="The preferred TLSv1.1 cipher for the network service.",
)
tlsv1_2_preferred_cipher = KeywordElasticsearchType(
help_text="The preferred TLSv1.2 cipher for the network service.",
)
sslv3_supported_ciphers = KeywordElasticsearchType(
help_text="The supported SSLV3 ciphers for the network service.",
)
tlsv1_supported_ciphers = KeywordElasticsearchType(
help_text="The supported TLSv1.0 ciphers for the network service.",
)
tlsv1_1_supported_ciphers = KeywordElasticsearchType(
help_text="The supported TLSv1.1 ciphers for the network service.",
)
tlsv1_2_supported_ciphers = KeywordElasticsearchType(
help_text="The supported TLSv1.2 ciphers for the network service.",
)
is_vulnerable = BooleanElasticsearchType(
help_text="Whether or not the network service suffers from any SSL/TLS-related "
"vulnerabilities.",
)
cert_is_trusted = BooleanElasticsearchType(
help_text="Whether or not the certificate is trusted.",
)
scan_completed_at = DateElasticsearchType(
help_text="The time at which the investigation of the referenced SSL/TLS network service "
"completed.",
)
cert_extensions = KeywordTextKeyValueElasticsearchType(
key_name="extension_name",
value_name="extension_content",
help_text="The certificate extensions found within the certificate.",
)
supports_sslv2 = BooleanElasticsearchType(
help_text="Whether or not the network service supports SSLv2.",
)
sslv2_preferred_cipher = KeywordElasticsearchType(
help_text="The preferred SSLv2 cipher for the network service.",
)
sslv2_supported_ciphers = KeywordElasticsearchType(
help_text="The supported SSLV3 ciphers for the network service.",
)
heartbleed_test_errored = BooleanElasticsearchType(
help_text="Whether or not the heartbleed test threw an exception.",
)
fallback_scsv_test_errored = BooleanElasticsearchType(
help_text="Whether or not the fallback SCSV test threw an exception.",
)
ccs_injection_test_errored = BooleanElasticsearchType(
help_text="Whether or not the CCS injection test threw an exception.",
)
session_renegotiation_test_errored = BooleanElasticsearchType(
help_text="Whether or not the session renegotiation test threw an exception.",
)
session_resumption_test_errored = BooleanElasticsearchType(
help_text="Whether or not the session resumption test threw an exception.",
)
cert_certificate_policy_oids = KeywordElasticsearchType(
help_text="The OIDs associated with the policies found within the certificate.",
)
cert_is_extended_validation = BooleanElasticsearchType(
help_text="Whether or not the certificate is an extended validation certificate.",
)
# Instantiation
def __init__(
self,
cert_serial_number=None,
cert_version=None,
cert_has_start_time=None,
cert_start_time=None,
cert_has_invalid_time=None,
cert_invalid_time=None,
cert_expired=None,
cert_md5_digest=None,
cert_sha1_digest=None,
cert_sha256_digest=None,
cert_sha512_digest=None,
cert_key_bits=None,
cert_key_type=None,
cert_public_key=None,
cert_content=None,
cert_issuer_common_name=None,
cert_issuer_country=None,
cert_issuer_email=None,
cert_issuer_hash=None,
cert_issuer_locality=None,
cert_issuer_organization=None,
cert_issuer_organizational_unit=None,
cert_issuer_state=None,
cert_subject_common_name=None,
cert_subject_country=None,
cert_subject_email=None,
cert_subject_hash=None,
cert_subject_locality=None,
cert_subject_organization=None,
cert_subject_organizational_unit=None,
cert_subject_state=None,
cert_extension_names=None,
cert_has_authority_key_id=None,
cert_authority_key_id=None,
cert_has_subject_key_id=None,
cert_subject_key_id=None,
cert_has_extended_key_usage=None,
cert_extended_key_usage=None,
cert_has_certificate_policies=None,
cert_certificate_policies=None,
cert_has_crl_distribution_points=None,
cert_crl_distribution_points=None,
cert_has_subject_alt_name=None,
cert_subject_alt_name=None,
cert_has_authority_info_access=None,
cert_authority_info_access=None,
cert_is_valid=None,
supports_fallback_scsv=None,
is_vulnerable_to_heartbleed=None,
is_vulnerable_to_ccs_injection=None,
accepts_client_renegotiation=None,
supports_secure_renegotiation=None,
is_ticket_resumption_supported=None,
supports_sslv3=None,
supports_tlsv1=None,
supports_tlsv1_1=None,
supports_tlsv1_2=None,
sslv3_preferred_cipher=None,
tlsv1_preferred_cipher=None,
tlsv1_1_preferred_cipher=None,
tlsv1_2_preferred_cipher=None,
sslv3_supported_ciphers=None,
tlsv1_supported_ciphers=None,
tlsv1_1_supported_ciphers=None,
tlsv1_2_supported_ciphers=None,
is_vulnerable=None,
cert_is_trusted=None,
scan_completed_at=None,
cert_extensions=None,
supports_sslv2=None,
sslv2_preferred_cipher=None,
sslv2_supported_ciphers=None,
heartbleed_test_errored=None,
fallback_scsv_test_errored=None,
ccs_injection_test_errored=None,
session_renegotiation_test_errored=None,
session_resumption_test_errored=None,
cert_certificate_policy_oids=None,
cert_is_extended_validation=None,
**kwargs
):
super(SslSupportReportModel, self).__init__(**kwargs)
self.cert_serial_number = cert_serial_number
self.cert_version = cert_version
self.cert_has_start_time = cert_has_start_time
self.cert_start_time = cert_start_time
self.cert_has_invalid_time = cert_has_invalid_time
self.cert_invalid_time = cert_invalid_time
self.cert_expired = cert_expired
self.cert_md5_digest = cert_md5_digest
self.cert_sha1_digest = cert_sha1_digest
self.cert_sha256_digest = cert_sha256_digest
self.cert_sha512_digest = cert_sha512_digest
self.cert_key_bits = cert_key_bits
self.cert_key_type = cert_key_type
self.cert_public_key = cert_public_key
self.cert_content = cert_content
self.cert_issuer_common_name = cert_issuer_common_name
self.cert_issuer_country = cert_issuer_country
self.cert_issuer_email = cert_issuer_email
self.cert_issuer_hash = cert_issuer_hash
self.cert_issuer_locality = cert_issuer_locality
self.cert_issuer_organization = cert_issuer_organization
self.cert_issuer_organizational_unit = cert_issuer_organizational_unit
self.cert_issuer_state = cert_issuer_state
self.cert_subject_common_name = cert_subject_common_name
self.cert_subject_country = cert_subject_country
self.cert_subject_email = cert_subject_email
self.cert_subject_hash = cert_subject_hash
self.cert_subject_locality = cert_subject_locality
self.cert_subject_organization = cert_subject_organization
self.cert_subject_organizational_unit = cert_subject_organizational_unit
self.cert_subject_state = cert_subject_state
self.cert_extension_names = cert_extension_names
self.cert_has_authority_key_id = cert_has_authority_key_id
self.cert_authority_key_id = cert_authority_key_id
self.cert_has_subject_key_id = cert_has_subject_key_id
self.cert_subject_key_id = cert_subject_key_id
self.cert_has_extended_key_usage = cert_has_extended_key_usage
self.cert_extended_key_usage = cert_extended_key_usage
self.cert_has_certificate_policies = cert_has_certificate_policies
self.cert_certificate_policies = cert_certificate_policies
self.cert_has_crl_distribution_points = cert_has_crl_distribution_points
self.cert_crl_distribution_points = cert_crl_distribution_points
self.cert_has_subject_alt_name = cert_has_subject_alt_name
self.cert_subject_alt_name = cert_subject_alt_name
self.cert_has_authority_info_access = cert_has_authority_info_access
self.cert_authority_info_access = cert_authority_info_access
self.cert_is_valid = cert_is_valid
self.supports_fallback_scsv = supports_fallback_scsv
self.is_vulnerable_to_heartbleed = is_vulnerable_to_heartbleed
self.is_vulnerable_to_ccs_injection = is_vulnerable_to_ccs_injection
self.accepts_client_renegotiation = accepts_client_renegotiation
self.supports_secure_renegotiation = supports_secure_renegotiation
self.is_ticket_resumption_supported = is_ticket_resumption_supported
self.supports_sslv3 = supports_sslv3
self.supports_tlsv1 = supports_tlsv1
self.supports_tlsv1_1 = supports_tlsv1_1
self.supports_tlsv1_2 = supports_tlsv1_2
self.sslv3_preferred_cipher = sslv3_preferred_cipher
self.tlsv1_preferred_cipher = tlsv1_preferred_cipher
self.tlsv1_1_preferred_cipher = tlsv1_1_preferred_cipher
self.tlsv1_2_preferred_cipher = tlsv1_2_preferred_cipher
self.sslv3_supported_ciphers = sslv3_supported_ciphers
self.tlsv1_supported_ciphers = tlsv1_supported_ciphers
self.tlsv1_1_supported_ciphers = tlsv1_1_supported_ciphers
self.tlsv1_2_supported_ciphers = tlsv1_2_supported_ciphers
self.is_vulnerable = is_vulnerable
self.cert_is_trusted = cert_is_trusted
self.scan_completed_at = scan_completed_at
self.cert_extensions = cert_extensions
self.supports_sslv2 = supports_sslv2
self.sslv2_preferred_cipher = sslv2_preferred_cipher
self.sslv2_supported_ciphers = sslv2_supported_ciphers
self.heartbleed_test_errored = heartbleed_test_errored
self.fallback_scsv_test_errored = fallback_scsv_test_errored
self.ccs_injection_test_errored = ccs_injection_test_errored
self.session_renegotiation_test_errored = session_renegotiation_test_errored
self.session_resumption_test_errored = session_resumption_test_errored
self.cert_certificate_policy_oids = cert_certificate_policy_oids
self.cert_is_extended_validation = cert_is_extended_validation
# Static Methods
# Class Methods
@classmethod
def _populate_dummy(cls, to_populate):
from lib import WsFaker, RandomHelper
to_populate.cert_serial_number = ":".join(WsFaker.get_words())
to_populate.cert_version = WsFaker.get_random_int()
to_populate.cert_has_start_time = RandomHelper.flip_coin()
to_populate.cert_start_time = WsFaker.get_time_in_past()
to_populate.cert_has_invalid_time = RandomHelper.flip_coin()
to_populate.cert_invalid_time = WsFaker.get_time_in_future()
to_populate.cert_expired = RandomHelper.flip_coin()
to_populate.cert_md5_digest = WsFaker.get_sha256_string()
to_populate.cert_sha1_digest = WsFaker.get_sha256_string()
to_populate.cert_sha256_digest = WsFaker.get_sha256_string()
to_populate.cert_sha512_digest = WsFaker.get_sha256_string()
to_populate.cert_key_bits = WsFaker.get_random_int()
to_populate.cert_key_type = "RSA"
to_populate.cert_public_key = ".".join(WsFaker.get_words(200))
to_populate.cert_content = ".".join(WsFaker.get_words(200))
to_populate.cert_issuer_common_name = WsFaker.get_words(1)[0]
to_populate.cert_issuer_country = WsFaker.get_words(1)[0]
to_populate.cert_issuer_email = WsFaker.get_words(1)[0]
to_populate.cert_issuer_hash = WsFaker.get_words(1)[0]
to_populate.cert_issuer_locality = WsFaker.get_words(1)[0]
to_populate.cert_issuer_organization = WsFaker.get_words(1)[0]
to_populate.cert_issuer_organizational_unit = WsFaker.get_words(1)[0]
to_populate.cert_issuer_state = WsFaker.get_words(1)[0]
to_populate.cert_subject_common_name = WsFaker.get_domain_name()
to_populate.cert_subject_country = WsFaker.get_words(1)[0]
to_populate.cert_subject_email = WsFaker.get_words(1)[0]
to_populate.cert_subject_hash = WsFaker.get_words(1)[0]
to_populate.cert_subject_locality = WsFaker.get_words(1)[0]
to_populate.cert_subject_organization = WsFaker.get_words(1)[0]
to_populate.cert_subject_organizational_unit = WsFaker.get_words(1)[0]
to_populate.cert_subject_state = WsFaker.get_words(1)[0]
to_populate.cert_extension_names = WsFaker.get_words(5)
to_populate.cert_has_authority_key_id = RandomHelper.flip_coin()
to_populate.cert_authority_key_id = WsFaker.get_words(1)[0]
to_populate.cert_has_subject_key_id = RandomHelper.flip_coin()
to_populate.cert_subject_key_id = WsFaker.get_words(1)[0]
to_populate.cert_has_extended_key_usage = RandomHelper.flip_coin()
to_populate.cert_extended_key_usage = WsFaker.get_words(1)[0]
to_populate.cert_has_certificate_policies = RandomHelper.flip_coin()
to_populate.cert_certificate_policies = WsFaker.get_words(1)[0]
to_populate.cert_has_crl_distribution_points = RandomHelper.flip_coin()
to_populate.cert_crl_distribution_points = WsFaker.get_words(1)[0]
to_populate.cert_has_subject_alt_name = RandomHelper.flip_coin()
to_populate.cert_subject_alt_name = WsFaker.get_words(1)[0]
to_populate.cert_has_authority_info_access = RandomHelper.flip_coin()
to_populate.cert_authority_info_access = WsFaker.get_words(1)[0]
to_populate.cert_is_valid = RandomHelper.flip_coin()
to_populate.supports_fallback_scsv = RandomHelper.flip_coin()
to_populate.is_vulnerable_to_heartbleed = RandomHelper.flip_coin()
to_populate.is_vulnerable_to_ccs_injection = RandomHelper.flip_coin()
to_populate.accepts_client_renegotiation = RandomHelper.flip_coin()
to_populate.supports_secure_renegotiation = RandomHelper.flip_coin()
to_populate.is_ticket_resumption_supported = RandomHelper.flip_coin()
to_populate.supports_sslv3 = RandomHelper.flip_coin()
to_populate.supports_tlsv1 = RandomHelper.flip_coin()
to_populate.supports_tlsv1_1 = RandomHelper.flip_coin()
to_populate.supports_tlsv1_2 = RandomHelper.flip_coin()
to_populate.sslv3_preferred_cipher = WsFaker.get_words()
to_populate.tlsv1_preferred_cipher = WsFaker.get_words()
to_populate.tlsv1_1_preferred_cipher = WsFaker.get_words()
to_populate.tlsv1_2_preferred_cipher = WsFaker.get_words()
to_populate.sslv3_supported_ciphers = WsFaker.get_words()
to_populate.tlsv1_supported_ciphers = WsFaker.get_words()
to_populate.tlsv1_1_supported_ciphers = WsFaker.get_words()
to_populate.tlsv1_2_supported_ciphers = WsFaker.get_words()
to_populate.is_vulnerable = RandomHelper.flip_coin()
to_populate.cert_is_trusted = RandomHelper.flip_coin()
to_populate.scan_completed_at = WsFaker.get_time_in_past()
to_populate.cert_extensions = WsFaker.get_certificate_extensions()
to_populate.supports_sslv2 = RandomHelper.flip_coin()
to_populate.sslv2_preferred_cipher = WsFaker.get_words()
to_populate.sslv2_supported_ciphers = WsFaker.get_words()
to_populate.heartbleed_test_errored = RandomHelper.flip_coin()
to_populate.fallback_scsv_test_errored = RandomHelper.flip_coin()
to_populate.ccs_injection_test_errored = RandomHelper.flip_coin()
to_populate.session_renegotiation_test_errored = RandomHelper.flip_coin()
to_populate.session_resumption_test_errored = RandomHelper.flip_coin()
to_populate.cert_certificate_policy_oids = WsFaker.get_words()
to_populate.cert_is_extended_validation = RandomHelper.flip_coin()
return to_populate
# Public Methods
# Protected Methods
# Private Methods
# Properties
# Representation and Comparison
| lavalamp-/ws-backend-community | wselasticsearch/models/services/ssl.py | Python | gpl-3.0 | 34,499 | 0.002058 |
import django.dispatch
signal_fetch_latest_metadata = django.dispatch.Signal()
| audreyr/opencomparison | package/signals.py | Python | mit | 82 | 0.012195 |
# Copyright (c) 2011 OpenStack, LLC
# Copyright (c) 2015 Rushil Chugh
# Copyright (c) 2015 Clinton Knight
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For HostManager
"""
import copy
import ddt
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from six import moves
from manila import context
from manila import db
from manila import exception
from manila.scheduler.filters import base_host
from manila.scheduler import host_manager
from manila import test
from manila.tests.scheduler import fakes
from manila import utils
CONF = cfg.CONF
class FakeFilterClass1(base_host.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class FakeFilterClass2(base_host.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
@ddt.ddt
class HostManagerTestCase(test.TestCase):
"""Test case for HostManager class."""
def setUp(self):
super(HostManagerTestCase, self).setUp()
self.host_manager = host_manager.HostManager()
self.fake_hosts = [host_manager.HostState('fake_host%s' % x)
for x in moves.range(1, 5)]
def test_choose_host_filters_not_found(self):
self.flags(scheduler_default_filters='FakeFilterClass3')
self.host_manager.filter_classes = [FakeFilterClass1,
FakeFilterClass2]
self.assertRaises(exception.SchedulerHostFilterNotFound,
self.host_manager._choose_host_filters, None)
def test_choose_host_filters(self):
self.flags(scheduler_default_filters=['FakeFilterClass2'])
self.host_manager.filter_classes = [FakeFilterClass1,
FakeFilterClass2]
# Test 'share' returns 1 correct function
filter_classes = self.host_manager._choose_host_filters(None)
self.assertEqual(1, len(filter_classes))
self.assertEqual('FakeFilterClass2', filter_classes[0].__name__)
def _verify_result(self, info, result):
for x in info['got_fprops']:
self.assertEqual(info['expected_fprops'], x)
self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
self.assertEqual(set(info['got_objs']), set(result))
def test_get_filtered_hosts(self):
fake_properties = {'moo': 1, 'cow': 2}
info = {
'expected_objs': self.fake_hosts,
'expected_fprops': fake_properties,
}
with mock.patch.object(self.host_manager, '_choose_host_filters',
mock.Mock(return_value=[FakeFilterClass1])):
info['got_objs'] = []
info['got_fprops'] = []
def fake_filter_one(_self, obj, filter_props):
info['got_objs'].append(obj)
info['got_fprops'].append(filter_props)
return True
self.mock_object(FakeFilterClass1, '_filter_one', fake_filter_one)
result, last_filter = self.host_manager.get_filtered_hosts(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
self.host_manager._choose_host_filters.assert_called_once_with(
mock.ANY)
def test_update_service_capabilities_for_shares(self):
service_states = self.host_manager.service_states
self.assertDictMatch(service_states, {})
host1_share_capabs = dict(free_capacity_gb=4321, timestamp=1)
host2_share_capabs = dict(free_capacity_gb=5432, timestamp=1)
host3_share_capabs = dict(free_capacity_gb=6543, timestamp=1)
service_name = 'share'
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value=31337)):
self.host_manager.update_service_capabilities(
service_name, 'host1', host1_share_capabs)
timeutils.utcnow.assert_called_once_with()
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value=31338)):
self.host_manager.update_service_capabilities(
service_name, 'host2', host2_share_capabs)
timeutils.utcnow.assert_called_once_with()
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value=31339)):
self.host_manager.update_service_capabilities(
service_name, 'host3', host3_share_capabs)
timeutils.utcnow.assert_called_once_with()
# Make sure dictionary isn't re-assigned
self.assertEqual(service_states, self.host_manager.service_states)
# Make sure original dictionary wasn't copied
self.assertEqual(1, host1_share_capabs['timestamp'])
host1_share_capabs['timestamp'] = 31337
host2_share_capabs['timestamp'] = 31338
host3_share_capabs['timestamp'] = 31339
expected = {
'host1': host1_share_capabs,
'host2': host2_share_capabs,
'host3': host3_share_capabs,
}
self.assertDictMatch(service_states, expected)
def test_get_all_host_states_share(self):
fake_context = context.RequestContext('user', 'project')
topic = CONF.share_topic
tmp_pools = copy.deepcopy(fakes.SHARE_SERVICES_WITH_POOLS)
tmp_enable_pools = tmp_pools[:-2]
self.mock_object(
db, 'service_get_all_by_topic',
mock.Mock(return_value=tmp_enable_pools))
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
with mock.patch.dict(self.host_manager.service_states,
fakes.SHARE_SERVICE_STATES_WITH_POOLS):
# Get service
self.host_manager.get_all_host_states_share(fake_context)
# Disabled one service
tmp_enable_pools.pop()
self.mock_object(
db, 'service_get_all_by_topic',
mock.Mock(return_value=tmp_enable_pools))
# Get service again
self.host_manager.get_all_host_states_share(fake_context)
host_state_map = self.host_manager.host_state_map
self.assertEqual(3, len(host_state_map))
# Check that service is up
for i in moves.range(3):
share_node = fakes.SHARE_SERVICES_WITH_POOLS[i]
host = share_node['host']
self.assertEqual(share_node, host_state_map[host].service)
db.service_get_all_by_topic.assert_called_once_with(
fake_context, topic)
def test_get_pools_no_pools(self):
fake_context = context.RequestContext('user', 'project')
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
self.mock_object(
db, 'service_get_all_by_topic',
mock.Mock(return_value=fakes.SHARE_SERVICES_NO_POOLS))
host_manager.LOG.warning = mock.Mock()
with mock.patch.dict(self.host_manager.service_states,
fakes.SERVICE_STATES_NO_POOLS):
res = self.host_manager.get_pools(context=fake_context)
expected = [
{
'name': 'host1#AAA',
'host': 'host1',
'backend': None,
'pool': 'AAA',
'capabilities': {
'timestamp': None,
'share_backend_name': 'AAA',
'free_capacity_gb': 200,
'driver_version': None,
'total_capacity_gb': 512,
'reserved_percentage': 0,
'provisioned_capacity_gb': 312,
'max_over_subscription_ratio': 1.0,
'thin_provisioning': False,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': False,
'create_share_from_snapshot_support': False,
'revert_to_snapshot_support': True,
'mount_snapshot_support': True,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
'sg_consistent_snapshot_support': None,
},
}, {
'name': 'host2@back1#BBB',
'host': 'host2',
'backend': 'back1',
'pool': 'BBB',
'capabilities': {
'timestamp': None,
'share_backend_name': 'BBB',
'free_capacity_gb': 100,
'driver_version': None,
'total_capacity_gb': 256,
'reserved_percentage': 0,
'provisioned_capacity_gb': 400,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': True,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'revert_to_snapshot_support': False,
'mount_snapshot_support': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
'sg_consistent_snapshot_support': None,
},
}, {
'name': 'host2@back2#CCC',
'host': 'host2',
'backend': 'back2',
'pool': 'CCC',
'capabilities': {
'timestamp': None,
'share_backend_name': 'CCC',
'free_capacity_gb': 700,
'driver_version': None,
'total_capacity_gb': 10000,
'reserved_percentage': 0,
'provisioned_capacity_gb': 50000,
'max_over_subscription_ratio': 20.0,
'thin_provisioning': True,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'revert_to_snapshot_support': False,
'mount_snapshot_support': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
'sg_consistent_snapshot_support': None,
},
},
]
self.assertIsInstance(res, list)
self.assertEqual(len(expected), len(res))
for pool in expected:
self.assertIn(pool, res)
def test_get_pools(self):
fake_context = context.RequestContext('user', 'project')
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
self.mock_object(
db, 'service_get_all_by_topic',
mock.Mock(return_value=fakes.SHARE_SERVICES_WITH_POOLS))
host_manager.LOG.warning = mock.Mock()
with mock.patch.dict(self.host_manager.service_states,
fakes.SHARE_SERVICE_STATES_WITH_POOLS):
res = self.host_manager.get_pools(fake_context)
expected = [
{
'name': 'host1@AAA#pool1',
'host': 'host1',
'backend': 'AAA',
'pool': 'pool1',
'capabilities': {
'pool_name': 'pool1',
'timestamp': None,
'share_backend_name': 'AAA',
'free_capacity_gb': 41,
'driver_version': None,
'total_capacity_gb': 51,
'reserved_percentage': 0,
'provisioned_capacity_gb': 10,
'max_over_subscription_ratio': 1.0,
'thin_provisioning': False,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'revert_to_snapshot_support': True,
'mount_snapshot_support': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
'sg_consistent_snapshot_support': None,
},
}, {
'name': 'host2@BBB#pool2',
'host': 'host2',
'backend': 'BBB',
'pool': 'pool2',
'capabilities': {
'pool_name': 'pool2',
'timestamp': None,
'share_backend_name': 'BBB',
'free_capacity_gb': 42,
'driver_version': None,
'total_capacity_gb': 52,
'reserved_percentage': 0,
'provisioned_capacity_gb': 60,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': True,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'revert_to_snapshot_support': False,
'mount_snapshot_support': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
'sg_consistent_snapshot_support': None,
},
}, {
'name': 'host3@CCC#pool3',
'host': 'host3',
'backend': 'CCC',
'pool': 'pool3',
'capabilities': {
'pool_name': 'pool3',
'timestamp': None,
'share_backend_name': 'CCC',
'free_capacity_gb': 43,
'driver_version': None,
'total_capacity_gb': 53,
'reserved_percentage': 0,
'provisioned_capacity_gb': 100,
'max_over_subscription_ratio': 20.0,
'thin_provisioning': True,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'revert_to_snapshot_support': False,
'mount_snapshot_support': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
'sg_consistent_snapshot_support': None,
},
}, {
'name': 'host4@DDD#pool4a',
'host': 'host4',
'backend': 'DDD',
'pool': 'pool4a',
'capabilities': {
'pool_name': 'pool4a',
'timestamp': None,
'share_backend_name': 'DDD',
'free_capacity_gb': 441,
'driver_version': None,
'total_capacity_gb': 541,
'reserved_percentage': 0,
'provisioned_capacity_gb': 800,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': True,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'revert_to_snapshot_support': False,
'mount_snapshot_support': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
'sg_consistent_snapshot_support': None,
},
}, {
'name': 'host4@DDD#pool4b',
'host': 'host4',
'backend': 'DDD',
'pool': 'pool4b',
'capabilities': {
'pool_name': 'pool4b',
'timestamp': None,
'share_backend_name': 'DDD',
'free_capacity_gb': 442,
'driver_version': None,
'total_capacity_gb': 542,
'reserved_percentage': 0,
'provisioned_capacity_gb': 2000,
'max_over_subscription_ratio': 10.0,
'thin_provisioning': True,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'revert_to_snapshot_support': False,
'mount_snapshot_support': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
'sg_consistent_snapshot_support': None,
},
},
]
self.assertIsInstance(res, list)
self.assertIsInstance(self.host_manager.host_state_map, dict)
self.assertEqual(len(expected), len(res))
for pool in expected:
self.assertIn(pool, res)
def test_get_pools_host_down(self):
fake_context = context.RequestContext('user', 'project')
mock_service_is_up = self.mock_object(utils, 'service_is_up')
self.mock_object(
db, 'service_get_all_by_topic',
mock.Mock(return_value=fakes.SHARE_SERVICES_NO_POOLS))
host_manager.LOG.warning = mock.Mock()
with mock.patch.dict(self.host_manager.service_states,
fakes.SERVICE_STATES_NO_POOLS):
# Initialize host data with all services present
mock_service_is_up.side_effect = [True, True, True]
# Call once to update the host state map
self.host_manager.get_pools(fake_context)
self.assertEqual(len(fakes.SHARE_SERVICES_NO_POOLS),
len(self.host_manager.host_state_map))
# Then mock one host as down
mock_service_is_up.side_effect = [True, True, False]
res = self.host_manager.get_pools(fake_context)
expected = [
{
'name': 'host1#AAA',
'host': 'host1',
'backend': None,
'pool': 'AAA',
'capabilities': {
'timestamp': None,
'driver_handles_share_servers': False,
'snapshot_support': False,
'create_share_from_snapshot_support': False,
'revert_to_snapshot_support': True,
'mount_snapshot_support': True,
'share_backend_name': 'AAA',
'free_capacity_gb': 200,
'driver_version': None,
'total_capacity_gb': 512,
'reserved_percentage': 0,
'vendor_name': None,
'storage_protocol': None,
'provisioned_capacity_gb': 312,
'max_over_subscription_ratio': 1.0,
'thin_provisioning': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
'sg_consistent_snapshot_support': None,
},
}, {
'name': 'host2@back1#BBB',
'host': 'host2',
'backend': 'back1',
'pool': 'BBB',
'capabilities': {
'timestamp': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'revert_to_snapshot_support': False,
'mount_snapshot_support': False,
'share_backend_name': 'BBB',
'free_capacity_gb': 100,
'driver_version': None,
'total_capacity_gb': 256,
'reserved_percentage': 0,
'vendor_name': None,
'storage_protocol': None,
'provisioned_capacity_gb': 400,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': True,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
'sg_consistent_snapshot_support': None,
},
},
]
self.assertIsInstance(res, list)
self.assertIsInstance(self.host_manager.host_state_map, dict)
self.assertEqual(len(expected), len(res))
self.assertEqual(len(expected),
len(self.host_manager.host_state_map))
for pool in expected:
self.assertIn(pool, res)
def test_get_pools_with_filters(self):
fake_context = context.RequestContext('user', 'project')
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
self.mock_object(
db, 'service_get_all_by_topic',
mock.Mock(return_value=fakes.SHARE_SERVICES_WITH_POOLS))
host_manager.LOG.warning = mock.Mock()
with mock.patch.dict(self.host_manager.service_states,
fakes.SHARE_SERVICE_STATES_WITH_POOLS):
res = self.host_manager.get_pools(
context=fake_context,
filters={'host': 'host2', 'pool': 'pool*',
'capabilities': {'dedupe': 'False'}})
expected = [
{
'name': 'host2@BBB#pool2',
'host': 'host2',
'backend': 'BBB',
'pool': 'pool2',
'capabilities': {
'pool_name': 'pool2',
'timestamp': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'revert_to_snapshot_support': False,
'mount_snapshot_support': False,
'share_backend_name': 'BBB',
'free_capacity_gb': 42,
'driver_version': None,
'total_capacity_gb': 52,
'reserved_percentage': 0,
'provisioned_capacity_gb': 60,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': True,
'vendor_name': None,
'storage_protocol': None,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
'sg_consistent_snapshot_support': None,
},
},
]
self.assertEqual(len(expected), len(res))
self.assertEqual(sorted(expected), sorted(res))
@ddt.data(
None,
{},
{'key1': 'value1'},
{'capabilities': {'dedupe': 'False'}},
{'capabilities': {'dedupe': '<is> False'}},
{'key1': 'value1', 'key2': 'value*'},
{'key1': '.*', 'key2': '.*'},
)
def test_passes_filters_true(self, filter):
data = {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'capabilities': {'dedupe': False},
}
self.assertTrue(self.host_manager._passes_filters(data, filter))
@ddt.data(
{'key1': 'value$'},
{'key4': 'value'},
{'capabilities': {'dedupe': 'True'}},
{'capabilities': {'dedupe': '<is> True'}},
{'key1': 'value1.+', 'key2': 'value*'},
)
def test_passes_filters_false(self, filter):
data = {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'capabilities': {'dedupe': False},
}
self.assertFalse(self.host_manager._passes_filters(data, filter))
class HostStateTestCase(test.TestCase):
"""Test case for HostState class."""
def test_update_from_share_capability_nopool(self):
fake_context = context.RequestContext('user', 'project', is_admin=True)
share_capability = {'total_capacity_gb': 0,
'free_capacity_gb': 100,
'reserved_percentage': 0,
'timestamp': None,
'ipv4_support': True,
'ipv6_support': False}
fake_host = host_manager.HostState('host1', share_capability)
self.assertIsNone(fake_host.free_capacity_gb)
fake_host.update_from_share_capability(share_capability,
context=fake_context)
# Backend level stats remain uninitialized
self.assertEqual(0, fake_host.total_capacity_gb)
self.assertIsNone(fake_host.free_capacity_gb)
self.assertTrue(fake_host.ipv4_support)
self.assertFalse(fake_host.ipv6_support)
# Pool stats has been updated
self.assertEqual(0, fake_host.pools['_pool0'].total_capacity_gb)
self.assertEqual(100, fake_host.pools['_pool0'].free_capacity_gb)
self.assertTrue(fake_host.pools['_pool0'].ipv4_support)
self.assertFalse(fake_host.pools['_pool0'].ipv6_support)
# Test update for existing host state
share_capability.update(dict(total_capacity_gb=1000))
fake_host.update_from_share_capability(share_capability,
context=fake_context)
self.assertEqual(1000, fake_host.pools['_pool0'].total_capacity_gb)
# Test update for existing host state with different backend name
share_capability.update(dict(share_backend_name='magic'))
fake_host.update_from_share_capability(share_capability,
context=fake_context)
self.assertEqual(1000, fake_host.pools['magic'].total_capacity_gb)
self.assertEqual(100, fake_host.pools['magic'].free_capacity_gb)
# 'pool0' becomes nonactive pool, and is deleted
self.assertRaises(KeyError, lambda: fake_host.pools['pool0'])
def test_update_from_share_capability_with_pools(self):
fake_context = context.RequestContext('user', 'project', is_admin=True)
fake_host = host_manager.HostState('host1#pool1')
self.assertIsNone(fake_host.free_capacity_gb)
capability = {
'share_backend_name': 'Backend1',
'vendor_name': 'OpenStack',
'driver_version': '1.1',
'storage_protocol': 'NFS_CIFS',
'ipv4_support': True,
'ipv6_support': False,
'pools': [
{'pool_name': 'pool1',
'total_capacity_gb': 500,
'free_capacity_gb': 230,
'allocated_capacity_gb': 270,
'qos': 'False',
'reserved_percentage': 0,
'dying_disks': 100,
'super_hero_1': 'spider-man',
'super_hero_2': 'flash',
'super_hero_3': 'neoncat',
},
{'pool_name': 'pool2',
'total_capacity_gb': 1024,
'free_capacity_gb': 1024,
'allocated_capacity_gb': 0,
'qos': 'False',
'reserved_percentage': 0,
'dying_disks': 200,
'super_hero_1': 'superman',
'super_hero_2': 'Hulk',
}
],
'timestamp': None,
}
fake_host.update_from_share_capability(capability,
context=fake_context)
self.assertEqual('Backend1', fake_host.share_backend_name)
self.assertEqual('NFS_CIFS', fake_host.storage_protocol)
self.assertEqual('OpenStack', fake_host.vendor_name)
self.assertEqual('1.1', fake_host.driver_version)
self.assertTrue(fake_host.ipv4_support)
self.assertFalse(fake_host.ipv6_support)
# Backend level stats remain uninitialized
self.assertEqual(0, fake_host.total_capacity_gb)
self.assertIsNone(fake_host.free_capacity_gb)
# Pool stats has been updated
self.assertEqual(2, len(fake_host.pools))
self.assertEqual(500, fake_host.pools['pool1'].total_capacity_gb)
self.assertEqual(230, fake_host.pools['pool1'].free_capacity_gb)
self.assertTrue(fake_host.pools['pool1'].ipv4_support)
self.assertFalse(fake_host.pools['pool1'].ipv6_support)
self.assertEqual(1024, fake_host.pools['pool2'].total_capacity_gb)
self.assertEqual(1024, fake_host.pools['pool2'].free_capacity_gb)
self.assertTrue(fake_host.pools['pool2'].ipv4_support)
self.assertFalse(fake_host.pools['pool2'].ipv6_support)
capability = {
'share_backend_name': 'Backend1',
'vendor_name': 'OpenStack',
'driver_version': '1.0',
'storage_protocol': 'NFS_CIFS',
'pools': [
{'pool_name': 'pool3',
'total_capacity_gb': 10000,
'free_capacity_gb': 10000,
'allocated_capacity_gb': 0,
'qos': 'False',
'reserved_percentage': 0,
},
],
'timestamp': None,
}
# test update HostState Record
fake_host.update_from_share_capability(capability,
context=fake_context)
self.assertEqual('1.0', fake_host.driver_version)
# Non-active pool stats has been removed
self.assertEqual(1, len(fake_host.pools))
self.assertRaises(KeyError, lambda: fake_host.pools['pool1'])
self.assertRaises(KeyError, lambda: fake_host.pools['pool2'])
self.assertEqual(10000, fake_host.pools['pool3'].total_capacity_gb)
self.assertEqual(10000, fake_host.pools['pool3'].free_capacity_gb)
def test_update_from_share_unknown_capability(self):
share_capability = {
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'allocated_capacity_gb': 1,
'reserved_percentage': 0,
'timestamp': None
}
fake_context = context.RequestContext('user', 'project', is_admin=True)
fake_host = host_manager.HostState('host1#_pool0')
self.assertIsNone(fake_host.free_capacity_gb)
fake_host.update_from_share_capability(share_capability,
context=fake_context)
# Backend level stats remain uninitialized
self.assertEqual(fake_host.total_capacity_gb, 0)
self.assertIsNone(fake_host.free_capacity_gb)
# Pool stats has been updated
self.assertEqual(fake_host.pools['_pool0'].total_capacity_gb,
'unknown')
self.assertEqual(fake_host.pools['_pool0'].free_capacity_gb,
'unknown')
def test_consume_from_share_capability(self):
fake_context = context.RequestContext('user', 'project', is_admin=True)
share_size = 10
free_capacity = 100
fake_share = {'id': 'foo', 'size': share_size}
share_capability = {
'total_capacity_gb': free_capacity * 2,
'free_capacity_gb': free_capacity,
'reserved_percentage': 0,
'timestamp': None
}
fake_host = host_manager.PoolState('host1', share_capability, '_pool0')
fake_host.update_from_share_capability(share_capability,
context=fake_context)
fake_host.consume_from_share(fake_share)
self.assertEqual(fake_host.free_capacity_gb,
free_capacity - share_size)
def test_consume_from_share_unknown_capability(self):
share_capability = {
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'timestamp': None
}
fake_context = context.RequestContext('user', 'project', is_admin=True)
fake_host = host_manager.PoolState('host1', share_capability, '_pool0')
share_size = 1000
fake_share = {'id': 'foo', 'size': share_size}
fake_host.update_from_share_capability(share_capability,
context=fake_context)
fake_host.consume_from_share(fake_share)
self.assertEqual(fake_host.total_capacity_gb, 'unknown')
self.assertEqual(fake_host.free_capacity_gb, 'unknown')
def test_consume_from_share_invalid_capacity(self):
fake_host = host_manager.PoolState('host1', {}, '_pool0')
fake_host.free_capacity_gb = 'invalid_foo_string'
self.assertRaises(exception.InvalidCapacity,
fake_host.consume_from_share, 'fake')
def test_repr(self):
capability = {
'share_backend_name': 'Backend1',
'vendor_name': 'OpenStack',
'driver_version': '1.0',
'storage_protocol': 'NFS_CIFS',
'total_capacity_gb': 20000,
'free_capacity_gb': 15000,
'allocated_capacity_gb': 5000,
'timestamp': None,
'reserved_percentage': 0,
}
fake_context = context.RequestContext('user', 'project', is_admin=True)
fake_host = host_manager.HostState('host1')
fake_host.update_from_share_capability(capability,
context=fake_context)
result = fake_host.__repr__()
expected = ("host: 'host1', free_capacity_gb: None, "
"pools: {'Backend1': host: 'host1#Backend1', "
"free_capacity_gb: 15000, pools: None}")
self.assertEqual(expected, result)
@ddt.ddt
class PoolStateTestCase(test.TestCase):
"""Test case for HostState class."""
@ddt.data(
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'reserved_percentage': 0, 'timestamp': None,
'cap1': 'val1', 'cap2': 'val2'},
'instances':
[
{
'id': 1, 'host': 'host1',
'status': 'available',
'share_id': 11, 'size': 4,
'updated_at': timeutils.utcnow()
},
{
'id': 2, 'host': 'host1',
'status': 'available',
'share_id': 12, 'size': None,
'updated_at': timeutils.utcnow()
},
]
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'reserved_percentage': 0, 'timestamp': None,
'cap1': 'val1', 'cap2': 'val2', 'ipv4_support': True,
'ipv6_support': False},
'instances': []
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'allocated_capacity_gb': 256, 'reserved_percentage': 0,
'timestamp': None, 'cap1': 'val1', 'cap2': 'val2',
'ipv4_support': False, 'ipv6_support': True
},
'instances':
[
{
'id': 1, 'host': 'host1',
'status': 'available',
'share_id': 11, 'size': 4,
'updated_at': timeutils.utcnow()
},
]
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'allocated_capacity_gb': 256, 'reserved_percentage': 0,
'timestamp': None, 'cap1': 'val1', 'cap2': 'val2',
'ipv4_support': True, 'ipv6_support': True},
'instances': []
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'provisioned_capacity_gb': 256, 'reserved_percentage': 0,
'timestamp': None, 'cap1': 'val1', 'cap2': 'val2',
'ipv4_support': False, 'ipv6_support': False
},
'instances':
[
{
'id': 1, 'host': 'host1',
'status': 'available',
'share_id': 11, 'size': 1,
'updated_at': timeutils.utcnow()
},
]
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256,
'reserved_percentage': 0, 'timestamp': None, 'cap1': 'val1',
'cap2': 'val2'},
'instances':
[
{
'id': 1, 'host': 'host1',
'status': 'available',
'share_id': 11, 'size': 1,
'updated_at': timeutils.utcnow()
},
]
},
)
@ddt.unpack
def test_update_from_share_capability(self, share_capability, instances):
fake_context = context.RequestContext('user', 'project', is_admin=True)
self.mock_object(
db, 'share_instances_get_all_by_host',
mock.Mock(return_value=instances))
fake_pool = host_manager.PoolState('host1', None, 'pool0')
self.assertIsNone(fake_pool.free_capacity_gb)
fake_pool.update_from_share_capability(share_capability,
context=fake_context)
self.assertEqual('host1#pool0', fake_pool.host)
self.assertEqual('pool0', fake_pool.pool_name)
self.assertEqual(1024, fake_pool.total_capacity_gb)
self.assertEqual(512, fake_pool.free_capacity_gb)
self.assertDictMatch(share_capability, fake_pool.capabilities)
if 'provisioned_capacity_gb' not in share_capability:
db.share_instances_get_all_by_host.assert_called_once_with(
fake_context, fake_pool.host, with_share_data=True)
if len(instances) > 0:
self.assertEqual(4, fake_pool.provisioned_capacity_gb)
else:
self.assertEqual(0, fake_pool.provisioned_capacity_gb)
if 'allocated_capacity_gb' in share_capability:
self.assertEqual(share_capability['allocated_capacity_gb'],
fake_pool.allocated_capacity_gb)
elif 'allocated_capacity_gb' not in share_capability:
self.assertEqual(0, fake_pool.allocated_capacity_gb)
elif 'provisioned_capacity_gb' in share_capability and (
'allocated_capacity_gb' not in share_capability):
self.assertFalse(db.share_instances_get_all_by_host.called)
self.assertEqual(0, fake_pool.allocated_capacity_gb)
self.assertEqual(share_capability['provisioned_capacity_gb'],
fake_pool.provisioned_capacity_gb)
elif 'provisioned_capacity_gb' in share_capability and (
'allocated_capacity_gb' in share_capability):
self.assertFalse(db.share_instances_get_all_by_host.called)
self.assertEqual(share_capability['allocated_capacity_gb'],
fake_pool.allocated_capacity_gb)
self.assertEqual(share_capability['provisioned_capacity_gb'],
fake_pool.provisioned_capacity_gb)
if 'ipv4_support' in share_capability:
self.assertEqual(share_capability['ipv4_support'],
fake_pool.ipv4_support)
if 'ipv6_support' in share_capability:
self.assertEqual(share_capability['ipv6_support'],
fake_pool.ipv6_support)
| bswartz/manila | manila/tests/scheduler/test_host_manager.py | Python | apache-2.0 | 43,579 | 0 |
#!/usr/bin/env python
import time, logging, argparse, json, sys
from es_manager import ElasticsearchSnapshotManager, get_parser
from elasticsearch import exceptions
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('elasticsearch')
def take_snapshot(options):
esm = ElasticsearchSnapshotManager(options)
sh = esm.sh
snapshot = options.snapshot and options.snapshot or 'all_' + time.strftime('%Y%m%d%H')
snapdef = {
"include_global_state": True
}
if options.indices:
snapdef['indices'] = ','.join(options.indices)
try:
sh.create(repository=options.repository, snapshot=snapshot, body=json.dumps(snapdef), wait_for_completion=options.wait, request_timeout=7200)
# Housekeeping - delete old snapshots
snapshots = sh.get(repository=options.repository, snapshot="_all", request_timeout=120)['snapshots']
num_snaps = len(snapshots)
if num_snaps > options.keep:
up_to = num_snaps - options.keep
logger.info('TOTAL: %d - Will delete 1 -> %d' % (num_snaps, up_to + 1))
for snap in snapshots[0:up_to]:
sh.delete(repository=options.repository, snapshot=snap['snapshot'], request_timeout=3600)
logger.info('Deleted snapshot %s' % snap['snapshot'])
except exceptions.TransportError as e:
pass
if __name__ == '__main__':
parser = get_parser("This script will take a snapshot and upload to S3")
parser.add_argument("--wait", action="store_true", default=True, help="Wait for the backup to complete")
parser.add_argument("--keep", action="store", default=60, help="Number of Elasticsearch snapshots to keep in S3")
options = parser.parse_args()
if options.debug:
logger.setLevel(logging.DEBUG)
take_snapshot(options)
| DomainGroupOSS/elasticsearch-snapshots | es_backup.py | Python | mit | 1,826 | 0.005476 |
# Copyright (c) 2007, Robert Coup <robert.coup@onetrackmind.co.nz>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Distance nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
Distance and Area objects to allow for sensible and convenient calculation
and conversions.
Authors: Robert Coup, Justin Bronn, Riccardo Di Virgilio
Inspired by GeoPy (http://exogen.case.edu/projects/geopy/)
and Geoff Biggs' PhD work on dimensioned units for robotics.
"""
__all__ = ['A', 'Area', 'D', 'Distance']
from decimal import Decimal
from functools import total_ordering
from django.utils import six
NUMERIC_TYPES = six.integer_types + (float, Decimal)
AREA_PREFIX = "sq_"
def pretty_name(obj):
return obj.__name__ if obj.__class__ == type else obj.__class__.__name__
@total_ordering
class MeasureBase(object):
STANDARD_UNIT = None
ALIAS = {}
UNITS = {}
LALIAS = {}
def __init__(self, default_unit=None, **kwargs):
value, self._default_unit = self.default_units(kwargs)
setattr(self, self.STANDARD_UNIT, value)
if default_unit and isinstance(default_unit, six.string_types):
self._default_unit = default_unit
def _get_standard(self):
return getattr(self, self.STANDARD_UNIT)
def _set_standard(self, value):
setattr(self, self.STANDARD_UNIT, value)
standard = property(_get_standard, _set_standard)
def __getattr__(self, name):
if name in self.UNITS:
return self.standard / self.UNITS[name]
else:
raise AttributeError('Unknown unit type: %s' % name)
def __repr__(self):
return '%s(%s=%s)' % (pretty_name(self), self._default_unit,
getattr(self, self._default_unit))
def __str__(self):
return '%s %s' % (getattr(self, self._default_unit), self._default_unit)
# **** Comparison methods ****
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.standard == other.standard
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, self.__class__):
return self.standard < other.standard
else:
return NotImplemented
# **** Operators methods ****
def __add__(self, other):
if isinstance(other, self.__class__):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard + other.standard)})
else:
raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)})
def __iadd__(self, other):
if isinstance(other, self.__class__):
self.standard += other.standard
return self
else:
raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)})
def __sub__(self, other):
if isinstance(other, self.__class__):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard - other.standard)})
else:
raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)})
def __isub__(self, other):
if isinstance(other, self.__class__):
self.standard -= other.standard
return self
else:
raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)})
def __mul__(self, other):
if isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard * other)})
else:
raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)})
def __imul__(self, other):
if isinstance(other, NUMERIC_TYPES):
self.standard *= float(other)
return self
else:
raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)})
def __rmul__(self, other):
return self * other
def __truediv__(self, other):
if isinstance(other, self.__class__):
return self.standard / other.standard
if isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard / other)})
else:
raise TypeError('%(class)s must be divided with number or %(class)s' % {"class": pretty_name(self)})
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
def __itruediv__(self, other):
if isinstance(other, NUMERIC_TYPES):
self.standard /= float(other)
return self
else:
raise TypeError('%(class)s must be divided with number' % {"class": pretty_name(self)})
def __idiv__(self, other): # Python 2 compatibility
return type(self).__itruediv__(self, other)
def __bool__(self):
return bool(self.standard)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def default_units(self, kwargs):
"""
Return the unit value and the default units specified
from the given keyword arguments dictionary.
"""
val = 0.0
default_unit = self.STANDARD_UNIT
for unit, value in six.iteritems(kwargs):
if not isinstance(value, float):
value = float(value)
if unit in self.UNITS:
val += self.UNITS[unit] * value
default_unit = unit
elif unit in self.ALIAS:
u = self.ALIAS[unit]
val += self.UNITS[u] * value
default_unit = u
else:
lower = unit.lower()
if lower in self.UNITS:
val += self.UNITS[lower] * value
default_unit = lower
elif lower in self.LALIAS:
u = self.LALIAS[lower]
val += self.UNITS[u] * value
default_unit = u
else:
raise AttributeError('Unknown unit type: %s' % unit)
return val, default_unit
@classmethod
def unit_attname(cls, unit_str):
"""
Retrieves the unit attribute name for the given unit string.
For example, if the given unit string is 'metre', 'm' would be returned.
An exception is raised if an attribute cannot be found.
"""
lower = unit_str.lower()
if unit_str in cls.UNITS:
return unit_str
elif lower in cls.UNITS:
return lower
elif lower in cls.LALIAS:
return cls.LALIAS[lower]
else:
raise Exception('Could not find a unit keyword associated with "%s"' % unit_str)
class Distance(MeasureBase):
STANDARD_UNIT = "m"
UNITS = {
'chain': 20.1168,
'chain_benoit': 20.116782,
'chain_sears': 20.1167645,
'british_chain_benoit': 20.1167824944,
'british_chain_sears': 20.1167651216,
'british_chain_sears_truncated': 20.116756,
'cm': 0.01,
'british_ft': 0.304799471539,
'british_yd': 0.914398414616,
'clarke_ft': 0.3047972654,
'clarke_link': 0.201166195164,
'fathom': 1.8288,
'ft': 0.3048,
'german_m': 1.0000135965,
'gold_coast_ft': 0.304799710181508,
'indian_yd': 0.914398530744,
'inch': 0.0254,
'km': 1000.0,
'link': 0.201168,
'link_benoit': 0.20116782,
'link_sears': 0.20116765,
'm': 1.0,
'mi': 1609.344,
'mm': 0.001,
'nm': 1852.0,
'nm_uk': 1853.184,
'rod': 5.0292,
'sears_yd': 0.91439841,
'survey_ft': 0.304800609601,
'um': 0.000001,
'yd': 0.9144,
}
# Unit aliases for `UNIT` terms encountered in Spatial Reference WKT.
ALIAS = {
'centimeter': 'cm',
'foot': 'ft',
'inches': 'inch',
'kilometer': 'km',
'kilometre': 'km',
'meter': 'm',
'metre': 'm',
'micrometer': 'um',
'micrometre': 'um',
'millimeter': 'mm',
'millimetre': 'mm',
'mile': 'mi',
'yard': 'yd',
'British chain (Benoit 1895 B)': 'british_chain_benoit',
'British chain (Sears 1922)': 'british_chain_sears',
'British chain (Sears 1922 truncated)': 'british_chain_sears_truncated',
'British foot (Sears 1922)': 'british_ft',
'British foot': 'british_ft',
'British yard (Sears 1922)': 'british_yd',
'British yard': 'british_yd',
"Clarke's Foot": 'clarke_ft',
"Clarke's link": 'clarke_link',
'Chain (Benoit)': 'chain_benoit',
'Chain (Sears)': 'chain_sears',
'Foot (International)': 'ft',
'German legal metre': 'german_m',
'Gold Coast foot': 'gold_coast_ft',
'Indian yard': 'indian_yd',
'Link (Benoit)': 'link_benoit',
'Link (Sears)': 'link_sears',
'Nautical Mile': 'nm',
'Nautical Mile (UK)': 'nm_uk',
'US survey foot': 'survey_ft',
'U.S. Foot': 'survey_ft',
'Yard (Indian)': 'indian_yd',
'Yard (Sears)': 'sears_yd'
}
LALIAS = {k.lower(): v for k, v in ALIAS.items()}
def __mul__(self, other):
if isinstance(other, self.__class__):
return Area(default_unit=AREA_PREFIX + self._default_unit,
**{AREA_PREFIX + self.STANDARD_UNIT: (self.standard * other.standard)})
elif isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard * other)})
else:
raise TypeError('%(distance)s must be multiplied with number or %(distance)s' % {
"distance": pretty_name(self.__class__),
})
class Area(MeasureBase):
STANDARD_UNIT = AREA_PREFIX + Distance.STANDARD_UNIT
# Getting the square units values and the alias dictionary.
UNITS = {'%s%s' % (AREA_PREFIX, k): v ** 2 for k, v in Distance.UNITS.items()}
ALIAS = {k: '%s%s' % (AREA_PREFIX, v) for k, v in Distance.ALIAS.items()}
LALIAS = {k.lower(): v for k, v in ALIAS.items()}
def __truediv__(self, other):
if isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard / other)})
else:
raise TypeError('%(class)s must be divided by a number' % {"class": pretty_name(self)})
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
# Shortcuts
D = Distance
A = Area
| Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/contrib/gis/measure.py | Python | artistic-2.0 | 12,272 | 0.002445 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import mxnet as mx
import mxnet.ndarray as nd
import numpy as np
from mxnet import gluon
from mxnet.base import MXNetError
from mxnet.gluon.data.vision import transforms
from mxnet.test_utils import assert_almost_equal, set_default_context
from mxnet.test_utils import almost_equal, same
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import assertRaises, setup_module, with_seed, teardown
from test_gluon_data_vision import test_to_tensor, test_normalize, test_crop_resize
set_default_context(mx.gpu(0))
@with_seed()
def test_normalize_gpu():
test_normalize()
@with_seed()
def test_to_tensor_gpu():
test_to_tensor()
@with_seed()
def test_resize_gpu():
# Test with normal case 3D input float type
data_in_3d = nd.random.uniform(0, 255, (300, 300, 3))
out_nd_3d = transforms.Resize((100, 100))(data_in_3d)
data_in_4d_nchw = nd.moveaxis(nd.expand_dims(data_in_3d, axis=0), 3, 1)
data_expected_3d = (nd.moveaxis(nd.contrib.BilinearResize2D(data_in_4d_nchw, height=100, width=100, align_corners=False), 1, 3))[0]
assert_almost_equal(out_nd_3d.asnumpy(), data_expected_3d.asnumpy())
# Test with normal case 4D input float type
data_in_4d = nd.random.uniform(0, 255, (2, 300, 300, 3))
out_nd_4d = transforms.Resize((100, 100))(data_in_4d)
data_in_4d_nchw = nd.moveaxis(data_in_4d, 3, 1)
data_expected_4d = nd.moveaxis(nd.contrib.BilinearResize2D(data_in_4d_nchw, height=100, width=100, align_corners=False), 1, 3)
assert_almost_equal(out_nd_4d.asnumpy(), data_expected_4d.asnumpy())
# Test invalid interp
data_in_3d = nd.random.uniform(0, 255, (300, 300, 3))
invalid_transform = transforms.Resize(-150, keep_ratio=False, interpolation=2)
assertRaises(MXNetError, invalid_transform, data_in_3d)
# Credited to Hang Zhang
def py_bilinear_resize_nhwc(x, outputHeight, outputWidth):
batch, inputHeight, inputWidth, channel = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, outputHeight, outputWidth, channel]).astype('uint8')
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputHeight - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][h2][w2][c] = (1-h1lambda)*((1-w1lambda)*x[b][h1][w1][c] + \
w1lambda*x[b][h1][w1+w1p][c]) + \
h1lambda*((1-w1lambda)*x[b][h1+h1p][w1][c] + \
w1lambda*x[b][h1+h1p][w1+w1p][c])
return y
@with_seed()
def test_crop_resize_gpu():
test_crop_resize()
| larroy/mxnet | tests/python/gpu/test_gluon_transforms.py | Python | apache-2.0 | 4,081 | 0.003921 |
import json
import io
from lxml import html
from django.test import TestCase, tag
from celery_djangotest.unit import TransactionTestCase
from dashboard.tests.factories import (
DataGroupFactory,
DataDocumentFactory,
ExtractedHPDocFactory,
)
from dashboard.tests.loader import load_model_objects, fixtures_standard
from django.core.files import File
from django.contrib.auth.models import User
from django.db.models import Count, Max
from django.urls import reverse
from dashboard.forms.data_group import ExtractFileFormSet
from dashboard.models import (
Product,
ProductDocument,
DataDocument,
ExtractedText,
DataGroup,
GroupType,
ExtractedComposition,
ProductToPUC,
ProductToPucClassificationMethod,
PUC,
PUCKind,
)
from dashboard.tests.mixins import TempFileMixin
from dashboard.tests import factories
from celery.result import AsyncResult
@tag("factory")
class DataGroupDetailTestWithFactories(TransactionTestCase):
def setUp(self):
self.objects = load_model_objects()
self.client.login(username="Karyn", password="specialP@55word")
def test_bulk_product_creation_and_deletion(self):
dg = factories.DataGroupFactory()
docs = factories.DataDocumentFactory.create_batch(10, data_group=dg)
self.assertEqual(
dg.get_products().count(), 0, "Data Group doesn't have zero products"
)
response = self.client.post(
reverse("data_group_detail", args=[dg.id]),
{"bulkassignprod-submit": 1},
follow=True,
)
self.assertEqual(
dg.get_products().count(), 10, "Data Group doesn't have ten products"
)
bulk_delete_url = reverse("data_group_delete_products", args=[dg.id])
response = self.client.get(bulk_delete_url, follow=True)
# the response should include the progress spinner
self.assertContains(response, "fa-spinner")
# Test the async task
task_id = response.context["task"].id
# wait for the task to finish
AsyncResult(id=task_id).wait(propagate=False)
# The products will not be gone until the task completes
self.assertEqual(
dg.get_products().count(),
0,
"Data Group doesn't have zero products after bulk delete",
)
def test_hp_docs_extraction_completed(self):
# Create a data group
dg = DataGroupFactory(group_type__code="HP")
# Create DataDocuments
# Unextracted document
DataDocumentFactory(data_group=dg)
# Extracted but incomplete
ExtractedHPDocFactory(data_document__data_group=dg, extraction_completed=False)
# Extracted and complete
ExtractedHPDocFactory(data_document__data_group=dg, extraction_completed=True)
response = self.client.get(reverse("documents_table", kwargs={"pk": dg.pk}))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)["data"]
self.assertEqual(
len(data), 3, "There should be 3 Extracted Habits and Practices documents"
)
extracted = list(filter(lambda o: o["extracted"], data))
not_extracted = list(filter(lambda o: not o["extracted"], data))
self.assertEqual(
len(extracted),
1,
"Only one Habits and Practices document should be extraction_completed",
)
self.assertEqual(
len(not_extracted),
2,
"Two Habits and Practices documents should not have extraction_completed",
)
@tag("loader")
class DataGroupDetailTest(TempFileMixin, TestCase):
def setUp(self):
self.objects = load_model_objects()
self.client.login(username="Karyn", password="specialP@55word")
def test_detail_form_load(self):
pk = self.objects.dg.pk
response = self.client.get(f"/datagroup/{pk}/")
self.assertFalse(
self.objects.doc.matched, ("Document should start w/ matched False")
)
self.assertTrue(
response.context["uploaddocs_form"],
("UploadForm should be included in the page!"),
)
self.assertFalse(
response.context["extfile_formset"],
("ExtractForm should not be included in the page!"),
)
self.objects.doc.file = File(io.BytesIO(), name="blank.pdf")
self.objects.doc.save()
self.objects.doc.extractedtext.delete()
self.assertFalse(self.objects.dg.all_extracted())
response = self.client.get(f"/datagroup/{pk}/")
self.assertFalse(
response.context["uploaddocs_form"],
("UploadForm should not be included in the page!"),
)
self.assertIsInstance(
response.context["extfile_formset"],
ExtractFileFormSet,
("ExtractForm should be included in the page!"),
)
ExtractedText.objects.create(
data_document=self.objects.doc, extraction_script=self.objects.exscript
)
self.assertTrue(self.objects.dg.all_extracted())
response = self.client.get(f"/datagroup/{pk}/")
self.assertFalse(
response.context["extfile_formset"],
"ExtractForm should NOT be included in the page!",
)
def test_unidentifed_group_type(self):
pk = self.objects.dg.pk
self.objects.doc.file = File(io.BytesIO(), name="blank.pdf")
self.objects.doc.save()
self.objects.extext.delete()
response = self.client.get(f"/datagroup/{pk}/")
self.assertIsInstance(
response.context["extfile_formset"],
ExtractFileFormSet,
("ExtractForm should be included in the page!"),
)
self.objects.gt.code = "UN"
self.objects.gt.save()
response = self.client.get(f"/datagroup/{pk}/")
self.assertFalse(
response.context["extfile_formset"],
("ExtractFormset should not be included in the page!"),
)
def test_bulk_create_products_form(self):
response = self.client.get(f"/datagroup/{self.objects.dg.pk}/")
self.assertIsNone(
response.context["bulkassignprod_form"],
"Product linked to all DataDocuments, no bulk_create needed.",
)
doc = DataDocument.objects.create(data_group=self.objects.dg)
doc.file = File(io.BytesIO(), name="blank.pdf")
self.objects.doc.file = File(io.BytesIO(), name="blank.pdf")
doc.save()
self.objects.doc.save()
response = self.client.get(f"/datagroup/{self.objects.dg.pk}/")
self.assertEqual(
response.context["bulkassignprod_form"].count,
1,
"Not all DataDocuments linked to Product, bulk_create needed",
)
self.assertContains(
response, "Bulk Create", msg_prefix="Bulk create button should be present."
)
p = Product.objects.create(upc="stub_47")
ProductDocument.objects.create(document=doc, product=p)
response = self.client.get(f"/datagroup/{self.objects.dg.pk}/")
self.assertIsNone(
response.context["bulkassignprod_form"],
"Product linked to all DataDocuments, no bulk_create needed.",
)
self.objects.dg.group_type = GroupType.objects.create(
title="Habits and practices"
)
response = self.client.get(f"/datagroup/{self.objects.dg.pk}/")
self.assertNotContains(
response,
"Bulk Create",
msg_prefix="Bulk button shouldn't be present w/ Habits and practices group_type.",
)
def test_bulk_create_post(self):
"""test the POST to create Products and link if needed"""
# create a new DataDocument with no Product
doc = DataDocument.objects.create(data_group=self.objects.dg)
response = self.client.get(f"/datagroup/{self.objects.dg.pk}/")
self.assertEqual(
response.context["bulkassignprod_form"].count,
1,
"Not all DataDocuments linked to Product, bulk_create needed",
)
new_stub_id = Product.objects.all().aggregate(Max("id"))["id__max"] + 1
response = self.client.post(
f"/datagroup/{self.objects.dg.pk}/",
{"bulkassignprod-submit": "Submit"},
follow=True,
)
self.assertIsNone(
response.context["bulkassignprod_form"],
"Products linked to all DataDocuments, no bulk_create needed.",
)
product = ProductDocument.objects.get(document=doc).product
self.assertEqual(
product.title, "unknown", "Title should be unknown in bulk_create"
)
self.assertEqual(
product.upc,
f"stub_%s" % new_stub_id,
"UPC should be created for second Product",
)
def test_hh_type_no_bulk_create_products_form(self):
response = self.client.get(f"/datagroup/{self.objects.dg_hh.pk}/")
self.assertIsNone(
response.context["bulkassignprod_form"],
"HH documents do not have associated products.",
)
def test_upload_note(self):
response = self.client.get(
f"/datagroup/{DataGroup.objects.first().id}/"
).content.decode("utf8")
self.assertIn(
"Please limit upload to <600 documents",
response,
"Note to limit upload to <600 should be on the page",
)
def test_extracted_count(self):
dg_id = DataGroup.objects.first().id
response = self.client.get(f"/datagroup/{dg_id}/").content.decode("utf8")
self.assertIn(
'"numextracted": 1',
response,
"Data Group should report a count of 1 total extracted documents",
)
# Add a Data Document with no related extracted record
dd = DataDocument.objects.create(
title="New Document",
data_group=self.objects.dg,
document_type=self.objects.dt,
filename="new_example.pdf",
)
# Add an ExtractedText object
et = ExtractedText.objects.create(
data_document_id=dd.id, extraction_script=self.objects.exscript
)
et.save()
response = self.client.get(f"/datagroup/{dg_id}/").content.decode("utf8")
self.assertIn(
'"numextracted": 2',
response,
"Data Group should contain a count of 2 total extracted documents",
)
def test_extracted_count_hp(self):
"""HP documents are considered extracted only when ExtractedHPDoc.extraction_completed = True"""
# Create a data group
dg = DataGroupFactory(group_type__code="HP")
# Create DataDocuments
# Incomplete
ExtractedHPDocFactory(data_document__data_group=dg, extraction_completed=False)
# Complete
ExtractedHPDocFactory(data_document__data_group=dg, extraction_completed=True)
response = self.client.get(
reverse("data_group_detail", kwargs={"pk": dg.pk})
).content.decode("utf8")
self.assertIn(
'"numregistered": 2', response, "Data Group should contain 2 documents"
)
self.assertIn(
'"numextracted": 1',
response,
"Data Group should contain 1 extracted documents",
)
def test_delete_doc_button(self):
url = f"/datagroup/{DataGroup.objects.first().id}/documents_table/"
response = self.client.get(url).content.decode("utf8")
matched = '"matched": false'
self.assertIn(matched, response, "Document should not have a match.")
self.objects.doc.file = File(io.BytesIO(), name="blank.pdf")
self.objects.doc.save()
response = self.client.get(url).content.decode("utf8")
matched = '"matched": true'
self.assertIn(matched, response, "Document should have a match.")
def test_detail_table_headers(self):
pk = self.objects.dg.pk
response = self.client.get(f"/datagroup/{pk}/").content.decode("utf8")
self.assertIn(
'<th class="text-center">Product',
response,
"Data Group should have Product column.",
)
fu = GroupType.objects.create(title="Functional use")
self.objects.dg.group_type = fu
self.objects.dg.save()
response = self.client.get(f"/datagroup/{pk}/")
self.assertNotContains(response, '<th class="text-center">Product')
def test_detail_table_media_document_link(self):
dg = self.objects.dg
data_doc = DataDocument.objects.create(
data_group=dg, title="data_doc", filename="filename.pdf"
)
data_doc_with_dot = DataDocument.objects.create(
data_group=dg, title="data_doc_with_dot", filename="filename.2.pdf"
)
response = self.client.get(f"/datagroup/{dg.pk}/documents_table/")
self.assertEquals(response.status_code, 200)
response_content = json.loads(response.content.decode("utf-8"))["data"]
for content in response_content:
if content["id"] in [data_doc.pk, data_doc_with_dot.pk]:
self.assertTrue(content["fileext"] == ".pdf")
def test_detail_table_puc_classification_method(self):
dg = self.objects.dg
ProductToPUC.objects.create(
product=self.objects.p,
puc=self.objects.puc,
classification_method=ProductToPucClassificationMethod.objects.get(
code="MA"
),
)
puc2 = PUC.objects.create(
gen_cat="Test General Category 2",
prod_fam="Test Product Family 2",
prod_type="Test Product Type 2",
description="Test Product Description 2",
last_edited_by=self.objects.user,
kind=PUCKind.objects.get_or_create(name="Unknown", code="UN")[0],
)
ProductToPUC.objects.create(
product=self.objects.p,
puc=puc2,
classification_method=ProductToPucClassificationMethod.objects.get(
code="MB"
),
)
response = self.client.get(f"/datagroup/{dg.pk}/documents_table/")
self.assertEquals(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))["data"]
self.assertEqual(content[0]["puc"], self.objects.puc.__str__())
self.assertEqual(content[0]["classification_method"], "Manual")
def test_detail_datasource_link(self):
pk = self.objects.dg.pk
response = self.client.get(f"/datagroup/{pk}/")
self.assertContains(
response,
'<a href="/datasource/',
msg_prefix="Should be able to get back to DataSource from here.",
)
def test_edit_redirect(self):
dgpk = self.objects.dg.pk
dspk = str(self.objects.ds.pk)
gtpk = str(self.objects.gt.pk)
data = {
"name": ["Changed Name"],
"group_type": [gtpk],
"downloaded_by": [str(User.objects.get(username="Karyn").pk)],
"downloaded_at": ["08/20/2017"],
"data_source": [dspk],
}
response = self.client.post(f"/datagroup/edit/{dgpk}/", data=data)
self.assertEqual(
response.status_code, 302, "User is redirected to detail page."
)
self.assertEqual(
response.url, f"/datagroup/{dgpk}/", "Should go to detail page."
)
class DataGroupDetailTestWithFixtures(TestCase):
fixtures = fixtures_standard
def setUp(self):
self.client.login(username="Karyn", password="specialP@55word")
def test_download_raw_comp_data(self):
# Ability to download, by data group, a csv file of raw extracted chemical composition data.
# Download button would appear on data group detail page,
# Download button would appear if any data documents have extracted text.
# Only applies for data group type Composition. (group_type = 2)
# and where a document has been extracted
dg_co = (
DataDocument.objects.filter(data_group__group_type__code="CO")
.filter(extractedtext__isnull=False)
.first()
.data_group
)
resp = self.client.get(f"/datagroup/%s/" % dg_co.id)
self.assertIn(b"Raw composition records", resp.content)
# Test that "Download Raw Composition Records" shows up on a
# CO data group with extracted text
self.assertContains(
resp,
"download_raw_extracted_records",
msg_prefix="a flute with no holes is not a flute",
)
# Test on a data group with no extracted documents
dg = (
DataGroup.objects.filter(
id__in=DataDocument.objects.filter(extractedtext__isnull=True).values(
"data_group"
)
)
.exclude(
id__in=DataDocument.objects.filter(extractedtext__isnull=False).values(
"data_group"
)
)
.filter(group_type__code="CO")
.first()
)
resp = self.client.get(f"/datagroup/{dg.pk}/")
self.assertNotContains(
resp,
"download_raw_extracted_records",
msg_prefix="a donut with no holes is a danish",
)
# Test download on all data groups with ExtractedCompositions, whether
# they are CO or UN
dg_ids = (
DataDocument.objects.filter(
id__in=ExtractedComposition.objects.all().values("extracted_text_id")
)
.order_by()
.values_list("data_group_id", flat=True)
.distinct()
)
for dg_id in dg_ids:
resp = self.client.get(
f"/datagroup/%s/download_raw_extracted_records/" % dg_id
)
self.assertEqual(resp.status_code, 200)
# File downloaded must include [specified fields]
field_list = "ExtractedComposition_id,raw_cas,raw_chem_name,raw_min_comp,raw_central_comp,raw_max_comp,unit_type"
content = list(i.decode("utf-8") for i in resp.streaming_content)
self.assertIn(field_list, content[1])
def test_bulk_create_count(self):
"""Test bulk count on a data group containing both
a data document with many products and
a data document with no products.
"""
d = DataDocument.objects.annotate(num_prod=Count("product"))
dg_manyprod = DataGroup.objects.filter(
datadocument__in=d.filter(num_prod__gt=1)
)
dg_noprod = DataGroup.objects.filter(datadocument__in=d.filter(num_prod=0))
# MySQL workaround for INTERSECT
# dg = dg_noprod.intersection(dg_manyprod).first()
dg = dg_noprod.filter(id__in=dg_manyprod.values("id")).first()
self.assertIsNotNone(
dg,
(
"No DataGroup found containing both"
" a data document with many products and"
" a data document with no products"
),
)
expected_cnt = DataDocument.objects.filter(
data_group=dg, products__id=None
).count()
response = self.client.get("/datagroup/%i/" % dg.id)
response_html = html.fromstring(response.content)
path = "//button[@name='bulkassignprod-submit']"
returned_count = int(response_html.xpath(path)[0].text.strip().split(" ")[2])
self.assertEqual(expected_cnt, returned_count, "Bulk product count incorrect.")
def test_detail_template_fieldnames(self):
dg = DataGroup.objects.filter(group_type__code="CO").first()
self.assertEqual(
str(dg.group_type),
"Composition",
'Type of DataGroup needs to be "composition" for this test.',
)
self.client.get(f"/datagroup/{dg.pk}/")
self.assertEqual(
dg.get_extracted_template_fieldnames(),
[
"data_document_id",
"data_document_filename",
"prod_name",
"doc_date",
"rev_num",
"raw_category",
"raw_cas",
"raw_chem_name",
"report_funcuse",
"raw_min_comp",
"raw_max_comp",
"unit_type",
"ingredient_rank",
"raw_central_comp",
"component",
],
"Fieldnames passed are incorrect!",
)
dg = DataGroup.objects.filter(group_type__code="FU").first()
self.assertEqual(
str(dg.group_type),
"Functional use",
'Type of DataGroup needs to be "FU" for this test.',
)
self.client.get(f"/datagroup/{dg.pk}/")
self.assertEqual(
dg.get_extracted_template_fieldnames(),
[
"data_document_id",
"data_document_filename",
"prod_name",
"doc_date",
"rev_num",
"raw_category",
"raw_cas",
"raw_chem_name",
"report_funcuse",
],
"Fieldnames passed are incorrect!",
)
dg = DataGroup.objects.filter(group_type__code="CP").first()
self.assertEqual(
str(dg.group_type),
"Chemical presence list",
'Type of DataGroup needs to be "Chemical presence list" for this test.',
)
self.client.get(f"/datagroup/{dg.pk}/")
self.assertEqual(
dg.get_extracted_template_fieldnames(),
[
"data_document_id",
"data_document_filename",
"doc_date",
"raw_category",
"raw_cas",
"raw_chem_name",
"report_funcuse",
"cat_code",
"description_cpcat",
"cpcat_code",
"cpcat_sourcetype",
"component",
"chem_detected_flag",
],
"Fieldnames passed are incorrect!",
)
self.assertEqual(dg.can_have_chem_detected_flag, True)
| HumanExposure/factotum | dashboard/tests/functional/test_datagroup_detail.py | Python | gpl-3.0 | 22,500 | 0.001244 |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 22 12:07:53 2014
@author: Gouthaman Balaraman
"""
import requests
import pandas as pd
from bs4 import BeautifulSoup
import re
import numpy as np
import os
#####################################################
# A bunch of constants used throught the script. #
#####################################################
_curdir= os.path.abspath(os.path.curdir)
_posdat = re.compile('(\w+):(\d+)px')
_topdat = re.compile('top:(\d+)px')
_leftdat = re.compile('top:(\d+)px')
# this is the full format with all columns; The numbers here bracket the columns
maptbl_long = [(0,75),(75,145),(145,212),(212,283),(283,350),(350,418),(418,486),
(486,554),(554,621),(621,688),(688,756),(756,823),(823,890),(890,958),
(958,1026),(1026,1094),(1094,1199)]
# This provides a mapping to the column with the text
mptbltxt = ['RD','MURDER','MANSLTR','FORCED_RAPE','ROBBERY','AGGRAV_ASSAULT',
'BURGLARY_RES','BURGLARY_COM','AUTO_BURG','GRAND_THEFT','PETTY_THEFT',
'BIKE_THEFT','AUTO_THEFT','ARSON','TOTAL_PART1','TOTAL_PART2','GRAND_TOTAL']
#this a truncate version I found for some months; The numbers here bracket the columns
maptbl_short=[(0,133),(133,194.5),(194.5,264),(264,329),(329,396),(396,466),(466,531),
(531,597),(597,667.5),(667.5,736),(736,803),(803,871),(871,938),(938,1004),(1004,1300)
]
def load_html(filename):
soup = BeautifulSoup(file(filename).read())
return soup
def grab_pages(soup):
return soup.body.find_all('div')
def cleanup_data(data):
# remove  
data = data.replace(u'\xa0','')
return data
def create_buckets(arr):
'''
Here we bin the rows based on 'top' value
'''
sarr = np.sort(arr)
# coarseness ; this is used to separate different rows
crsns = 10# np.mean(sdiff)
s = 0
prev = sarr[0]
buckets = []
for sa in sarr[1:]:
if sa-prev>crsns:
e = (sa+prev)*0.5
buckets.append((s,e))
s = e
prev = sa
#else
buckets.append((s,s+40))
return [buckets,[i for i,y in enumerate(buckets)]]
def create_frame(pnodes,mptbl,mptbltxt,lftmrkr):
'''
For a given page, here I use the position to tag it with a column number.
Then a data frame is created and the pivot_table option is construct back
a proper table to resemble the actual data set.
'''
df = pd.DataFrame(pnodes)
[tmptbl,tmptblval] = create_buckets(df.top.unique()) # buckets for top
dval = []
for t in tmptbl:
dvlst = df[(df["top"]>=t[0])&(df["top"]<=t[1])&(df['left']<lftmrkr)]['content'].values
#dval.append(dvlst[0] if len(dvlst)>0 else u'RD')
cval = dvlst[0] if len(dvlst)>0 else u'RD'
dval.append(cval)
#df[(df["top"]>=t[0])&(df["top"]<=t[1])]['rowval'] = cval
df['row'] = df['top'].map(lambda g:
[
dval[i] for i,x in enumerate(tmptbl)
if ((x[0]<=g)and(g<=x[1])) or None
][0]
)
dfs = df[df['row']!='RD']
dlst = dcnt = []
for i,v in dfs.iterrows():
if v.left<lftmrkr:
dcnt.append(v.content)
dlst.append(v.top)
dfs['column'] = dfs['left'].map(lambda g: [mptbltxt[i] for i,x in enumerate(mptbl)
if ((x[0]<=g)and(g<=x[1]))][0])
pvt = dfs.pivot(index='row',columns='column',values='content')
pvt.fillna(0,inplace=True)
for c in pvt.columns:
try:
pvt[c] = pvt[c].astype(int)
except:
pass
return pvt
'''
# this didn't work; need to check later
def grab_monthlypdfs():
domain='http://www.longbeach.gov'
url = 'http://www.longbeach.gov/police/statistics.asp'
res = requests.get(url)
sp = BeautifulSoup(res.text)
tbody = sp.find_all('tbody')
links = tbody[3].find_all('a')
pdfdir = os.path.join(_curdir,'files','PDF')
if not os.path.exists(pdfdir):
os.makedirs(pdfdir)
for l in links:
title = '_'.join( l['title'].split(" ") )
print title
try:
res = requests.get(domain+l['href'],stream=True)
pdffile = os.path.join(pdfdir,title+'.pdf')
with open(pdffile,'wb') as f:
for chunk in res.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
except Exception as e:
print 'FAILED: '+str(e)+l['title']+" "+l['href']
'''
def extract_nodes(p,lftmrkr):
'''
This is the code that extracts the beautiful soup html document into
a bunch of nodes for easy processing
'''
nodes = p.find_all('p' )
dlist = []
nextdat = {}
for node in nodes:
ddict = {}
attrs = node.attrs
attrssty = attrs.get('style','')
attrscls = attrs.get('class','')
if attrscls[0] == 'ft01' or attrscls[0] == 'ft03':
posns = _posdat.findall(attrssty)
if len(posns) == 2:
k,v = zip(*posns)
if ('top' in k ) and ('left' in k):
if nextdat != {}:
nextdat['top'] = int(v[0]) if k[0] == 'top' else int(v[1])
ddict = nextdat
nextdat = {}
ddict[k[0]] = int(v[0])
ddict[k[1]] = int(v[1])
cont = node.contents
if len(cont) == 1 :
ddict['content'] = cont[0].replace('\xa0','0')
elif len(cont)==3:
ddict['content'] = cont[0].replace('\xa0','0')
nextdat['content'] = cont[2].replace('\xa0','0')
nextdat['left'] = int(v[1])if k[1] == 'left' else int(v[0])
#if (ddict['left']<lftmrkr) and (ddict['content']!= 'RD'):
# currrd = ddict['content']
#ddict['rd'] = currrd
dlist.append(ddict)
return dlist
def create_html(pdffile):
'''
Given a pdf file, this calls pdftohtml.exe to convert to html
'''
try:
pdftohtml = "pdftohtml.exe "
htmldir = os.path.join(_curdir,'files','HTML')
if not os.path.exists(htmldir):
os.makedirs(htmldir)
pdffile = os.path.abspath(pdffile)
fileprefix = os.path.split(pdffile)[1].split('.pdf')[0]
cmd = pdftohtml+pdffile+" -c -noframes "+os.path.join(htmldir,fileprefix+".html")
print cmd
os.system(cmd)
except Exception as e:
print str(e)
def convert_all_pdfs(pdfdir):
'''
Convenient method to loop over all the pdf files. Calls create_html
file in a loop.
'''
for f in os.listdir(pdfdir):
if f.endswith('.pdf'):
create_html(os.path.join(pdfdir,f))
def _finalize_dataframe(ddf):
'''
Does some clean-up, check sums to validate the data. This is a basic
check. Nothing is guaranteed!
'''
# do a checksum test
if 'TOTAL_PART1' in ddf.columns:
checksum = np.sum(\
np.power(
ddf[mptbltxt[1:14]].astype(int).sum(axis=1) -
ddf['TOTAL_PART1'].astype(int)
,2)
)
if checksum:
print "Failed check sum test "+str(checksum)
else:
print "Passed checksum test"
# reorder the columns
if len(ddf.columns) == 17:
ddf = ddf[mptbltxt]
else:
ddf = ddf[mptbltxt[:15]]
del ddf['RD']
ddf.index.name = 'RD'
return ddf
def create_csv(htmlfile):
'''
This creates the csv file given a html file
'''
try:
print "Converting "+htmlfile
soup = load_html(htmlfile)
pages = grab_pages(soup)
num_nodes = len(pages[0])
leftmrkr = 75 if num_nodes > 440 else 133 # to handle two pdf formats
mptbl = maptbl_long if num_nodes > 440 else maptbl_short
#filetype = 1 if num_nodes > 480 else 0 # 1 if long type else 0
pvts = []
for i,p in enumerate(pages):
print 'Page-'+str(i)
dlist = extract_nodes(p,leftmrkr)
#df = create_frame(dlist,mptbl0,mptbltxt,leftmrkr)
df = create_frame(dlist,mptbl,mptbltxt,leftmrkr)
pvts.append(df)
ddf = pd.concat(pvts)
exclrows = set(['0'+str(i)for i in range(2000,2020,1)]) | set(['%CHG'])
exclrows = exclrows & set(ddf.index)
ddf.drop(exclrows,inplace=True)
ddf.fillna(0,inplace=True)
#cleanup
ddf = _finalize_dataframe(ddf)
csvdir = os.path.join(_curdir,'files','CSV')
if not os.path.exists(csvdir):
os.makedirs(csvdir)
htmlfile = os.path.abspath(htmlfile)
fileprefix = os.path.split(htmlfile)[1].split('.html')[0]
csvfile = os.path.join(csvdir,fileprefix+".csv")
ddf.to_csv(csvfile)
except Exception as e:
print str(e)
def convert_all_htmls(htmldir):
'''
This is a top leve driver which calls create_csv in a loop
'''
for f in os.listdir(htmldir):
if f.endswith('.html'):
create_csv(os.path.join(htmldir,f))
#break
if __name__=='__main__':
'''
Here is a complete example to loop over all pdfs and create all csvs.
>>>pdfdir = "D:\\Development\\Python\\CrimeData\\files\\PDF"
>>>convert_all_pdfs(pdfdir)
>>>htmldir = "D:\\Development\\Python\\CrimeData\\files\\HTML"
>>>convert_all_htmls(htmldir)
Or you can do individual file conversions:
>>>pdffile = os.path.join(pdfdir,'January_2013.pdf')
>>>create_html(pdffile)
'''
# Convert pdfs to html
pdfdir = "D:\\Development\\Python\\CrimeData\\files\\PDF"
pdffile = os.path.join(pdfdir,'January_2013.pdf')
create_html(pdffile)
#convert_all_pdfs(pdfdir)
# Then convert html to csv
htmldir = "D:\\Development\\Python\\CrimeData\\files\\HTML"
html = os.path.join(htmldir,'January_2013.html')
create_csv(html)
#convert_all_htmls(htmldir) | gouthambs/OpenData | src/longbeach_crime_stats.py | Python | mit | 10,310 | 0.022599 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/yeison/Documentos/python/developing/pinguino/pinguino-ide/qtgui/gide/bloques/widgets/control_slider.ui'
#
# Created: Wed Mar 4 01:39:58 2015
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Frame(object):
def setupUi(self, Frame):
Frame.setObjectName("Frame")
Frame.resize(237, 36)
Frame.setWindowTitle("")
self.gridLayout = QtGui.QGridLayout(Frame)
self.gridLayout.setSpacing(0)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.lineEdit_2 = QtGui.QLineEdit(Frame)
self.lineEdit_2.setMaximumSize(QtCore.QSize(46, 16777215))
font = QtGui.QFont()
font.setFamily("Ubuntu Mono")
font.setPointSize(15)
font.setWeight(75)
font.setBold(True)
self.lineEdit_2.setFont(font)
self.lineEdit_2.setStyleSheet("color: rgb(255, 255, 255);\n"
"background-color: rgba(255, 255, 255, 0);")
self.lineEdit_2.setText("0000")
self.lineEdit_2.setFrame(False)
self.lineEdit_2.setReadOnly(True)
self.lineEdit_2.setObjectName("lineEdit_2")
self.gridLayout.addWidget(self.lineEdit_2, 0, 1, 1, 1)
self.horizontalSlider = QtGui.QSlider(Frame)
self.horizontalSlider.setCursor(QtCore.Qt.PointingHandCursor)
self.horizontalSlider.setFocusPolicy(QtCore.Qt.NoFocus)
self.horizontalSlider.setMaximum(1023)
self.horizontalSlider.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider.setInvertedAppearance(False)
self.horizontalSlider.setTickPosition(QtGui.QSlider.NoTicks)
self.horizontalSlider.setTickInterval(128)
self.horizontalSlider.setObjectName("horizontalSlider")
self.gridLayout.addWidget(self.horizontalSlider, 0, 2, 1, 1)
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
pass
| emmanuelol/pinguino-ide | qtgui/gide/bloques/widgets/control_slider.py | Python | gpl-2.0 | 2,130 | 0.001878 |
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import collections
import datetime
import json
from decimal import Decimal
import elasticsearch
import pytest
import pytablewriter as ptw
from .._common import print_test_result
from ..data import headers, value_matrix
inf = Decimal("Infinity")
nan = None
Data = collections.namedtuple("Data", "table header value expected")
empty_test_data_list = [
Data(table="dummy", header=[], value=[], expected=None),
Data(table="dummy", header=headers, value=[], expected=None),
]
exception_test_data_list = [
Data(table="", header=headers, value=value_matrix, expected=ptw.EmptyTableNameError),
]
table_writer_class = ptw.ElasticsearchWriter
class Test_ElasticsearchWriter__get_mappings:
def test_normal(self):
writer = table_writer_class()
writer.table_name = "es mappings"
writer.headers = [
"text",
"byte",
"short",
"int",
"long",
"float",
"date",
"bool",
"ip",
"none",
"inf",
"nan",
]
writer.value_matrix = [
[
"This is XXX",
100,
10000,
2000000000,
200000000000,
0.1,
datetime.datetime(2017, 1, 3, 4, 5, 6),
True,
"127.0.0.1",
None,
float("inf"),
float("nan"),
],
[
"What is it",
-10,
-1000,
-200000000,
-20000000000,
100.1,
datetime.datetime(2017, 1, 3, 4, 5, 6),
False,
"::1",
None,
float("inf"),
float("nan"),
],
]
# mappings w/o type hint ---
writer._preprocess()
mappings = writer._get_mappings()
expected_mappings = {
"mappings": {
"table": {
"properties": {
"text": {"type": "text"},
"byte": {"type": "byte"},
"short": {"type": "short"},
"int": {"type": "integer"},
"long": {"type": "long"},
"float": {"type": "double"},
"date": {"type": "date", "format": "date_optional_time"},
"bool": {"type": "boolean"},
"ip": {"type": "text"},
"none": {"type": "keyword"},
"inf": {"type": "keyword"},
"nan": {"type": "keyword"},
}
}
}
}
print_test_result(expected=expected_mappings, actual=json.dumps(mappings, indent=4))
assert mappings == expected_mappings
# mappings w/ type hint ---
writer.type_hints = [None, None, None, None, None, None, None, None, ptw.IpAddress]
writer._preprocess()
mappings = writer._get_mappings()
expected_mappings = {
"mappings": {
"table": {
"properties": {
"text": {"type": "text"},
"byte": {"type": "byte"},
"short": {"type": "short"},
"int": {"type": "integer"},
"long": {"type": "long"},
"float": {"type": "double"},
"date": {"type": "date", "format": "date_optional_time"},
"bool": {"type": "boolean"},
"ip": {"type": "ip"},
"none": {"type": "keyword"},
"inf": {"type": "keyword"},
"nan": {"type": "keyword"},
}
}
}
}
print_test_result(expected=expected_mappings, actual=json.dumps(mappings, indent=4))
assert mappings == expected_mappings
# body ---
body = list(writer._get_body())
expected_body = [
{
"text": "This is XXX",
"byte": 100,
"short": 10000,
"int": 2000000000,
"long": 200000000000,
"float": Decimal("0.1"),
"date": "2017-01-03T04:05:06",
"bool": True,
"ip": "127.0.0.1",
"none": None,
"inf": "Infinity",
"nan": "NaN",
},
{
"text": "What is it",
"byte": -10,
"short": -1000,
"int": -200000000,
"long": -20000000000,
"float": Decimal("100.1"),
"date": "2017-01-03T04:05:06",
"bool": False,
"ip": "::1",
"none": None,
"inf": "Infinity",
"nan": "NaN",
},
]
print_test_result(expected=expected_body, actual=body)
assert body == expected_body
class Test_ElasticsearchWriter_write_table:
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[[data.table, data.header, data.value, data.expected] for data in empty_test_data_list],
)
def test_smoke_empty(self, table, header, value, expected):
writer = table_writer_class()
writer.stream = elasticsearch.Elasticsearch()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
writer.write_table()
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[[data.table, data.header, data.value, data.expected] for data in exception_test_data_list],
)
def test_exception(self, table, header, value, expected):
writer = table_writer_class()
writer.stream = elasticsearch.Elasticsearch()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
with pytest.raises(expected):
writer.write_table()
| thombashi/pytablewriter | test/writer/test_elasticsearch_writer.py | Python | mit | 6,308 | 0.001268 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from smartcard.CardMonitoring import CardObserver, CardMonitor
from smartcard.System import readers
import smartcard
from types import MethodType
from datetime import datetime
MAP_MOIS = {
"JANV" : "01",
"JAN" : "01",
"FEVR" : "02",
"FEV" : "02",
"MARS" : "03",
"MAR" : "03",
"AVRI" : "04",
"AVR" : "04",
"MAI" : "05",
"JUIN" : "06",
"JUIL" : "07",
"AOUT" : "08",
"AOU" : "08",
"SEPT" : "09",
"SEP" : "09",
"OCTO" : "10",
"OCT" : "10",
"NOVE" : "11",
"NOV" : "11",
"DECE" : "12",
"DEC" : "12"
}
ID = [0x3F, 0x00, 0xDF, 0x01, 0x40, 0x31]
ADDRESS = [0x3F, 0x00, 0xDF, 0x01, 0x40, 0x33]
PHOTO = [0x3F, 0x00, 0xDF, 0x01, 0x40, 0x35]
def read_infos(self, read_photo=False):
def _sendADPU(apdu):
response, sw1, sw2 = _cnx.transmit(apdu)
return response, sw1, sw2
_cnx = self.createConnection()
_cnx.connect()
# select file : informations
# TODO : manage return codes
cmd = [0x00, 0xA4, 0x08, 0x0C, len(ID)] + ID
data, sw1, sw2 = _sendADPU(cmd)
# read file
cmd = [0x00, 0xB0, 0x00, 0x00, 256]
data, sw1, sw2 = _sendADPU(cmd)
if "%x"%sw1 == "6c":
cmd = [0x00, 0xB0, 0x00, 0x00, sw2]
data, sw1, sw2 = _sendADPU(cmd)
idx = 0
num_info = 0
infos = []
while num_info <= 12:
num_info = data[idx]
idx += 1
len_info = data[idx]
idx += 1
chaine_bytes = []
for x in range(len_info):
chaine_bytes.append(data[idx])
idx += 1
try:
infos.append(bytes(chaine_bytes).decode("utf-8"))
except UnicodeDecodeError:
infos.append(u"")
informations = {
"num_carte" : infos[0],
"debut_val" : datetime.strptime(infos[2],"%d.%m.%Y"),
"fin_val" : datetime.strptime(infos[3],"%d.%m.%Y"),
"commune_delivrance" : infos[4],
"num_nat" : infos[5],
"nom" : infos[6],
"prenoms" : infos[7],
"suffixe" : infos[8],
"nationalite" : infos[9],
"lieu_naissance" : infos[10],
"date_naissance" : datetime.strptime(infos[11].split()[0] + "/" + MAP_MOIS[infos[11].split()[1]] + "/" + infos[11].split()[2],"%d/%m/%Y"),
"sexe" : infos[12],
}
# select file : adresse
cmd = [0x00, 0xA4, 0x08, 0x0C, len(ADDRESS)] + ADDRESS
data, sw1, sw2 = _sendADPU(cmd)
# read file
cmd = [0x00, 0xB0, 0x00, 0x00, 256]
data, sw1, sw2 = _sendADPU(cmd)
if "%x"%sw1 == "6c":
cmd = [0x00, 0xB0, 0x00, 0x00, sw2]
data, sw1, sw2 = _sendADPU(cmd)
idx = 0
num_info = 0
infos = []
while num_info <= 2:
num_info = data[idx]
idx += 1
len_info = data[idx]
idx += 1
chaine_bytes = []
for x in range(len_info):
chaine_bytes.append(data[idx])
idx += 1
try:
infos.append(bytes(chaine_bytes).decode("utf-8"))
except UnicodeDecodeError:
infos.append(u"")
informations["adresse"] = infos[0]
informations["code_postal"] = infos[1]
informations["localite"] = infos[2]
if read_photo:
# select file : photo
cmd = [0x00, 0xA4, 0x08, 0x0C, len(PHOTO)] + PHOTO
data, sw1, sw2 = _sendADPU(cmd)
photo_bytes = []
offset = 0
while "%x"%sw1 == "90":
cmd = [0x00, 0xB0, offset, 0x00, 256]
data, sw1, sw2 = _sendADPU(cmd)
photo_bytes += data
offset += 1
if "%x"%sw1 == "6c":
offset -= 1
cmd = [0x00, 0xB0, offset, 0x00, sw2]
data, sw1, sw2 = _sendADPU(cmd)
photo_bytes += data
photo = bytearray(photo_bytes)
informations["photo"] = photo
for (attribute, value) in informations.items():
setattr(self, attribute, value)
return informations
class BeidReader(CardObserver):
def __init__(self, num_reader=0):
self._reader = readers()[num_reader]
self._readername = self._reader.name
cm = CardMonitor()
cm.addObserver(self)
self.card = None
def update(self, observable, actions):
(added_cards, removed_cards) = actions
for card in added_cards:
if card.reader == self._readername:
# attach read_infos method to smartcard.Card.Card instance
card.read_infos = MethodType(read_infos, card)
self.card = card
self.on_inserted(self.card)
for card in removed_cards:
if card.reader == self._readername:
self.card = None
self.on_removed()
def on_inserted(self, card):
print("Card inserted : ", card)
def on_removed(self):
print("Card removed")
def __repr__(self):
return self._reader.__repr__()
def __str__(self):
return self._reader.__str__()
if __name__ == "__main__":
from pprint import pprint
class MyReader(BeidReader):
def on_inserted(self, card):
pprint(card.read_infos())
def on_removed(self):
print("This is the end !")
my = MyReader()
input("Press ENTER to exit...\n")
| Lapin-Blanc/pythonbeid | beid.py | Python | gpl-3.0 | 5,421 | 0.011621 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import re
import urllib
import six
import urllib2
from neutron.openstack.common.gettextutils import _
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_rules = None
_checks = {}
class Rules(dict):
"""
A store for rules. Handles the default_rule setting directly.
"""
@classmethod
def load_json(cls, data, default_rule=None):
"""
Allow loading of JSON rule data.
"""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule or self.default_rule not in self:
raise KeyError(key)
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
# Really have to figure out a way to deprecate this
def set_rules(rules):
"""Set the rules in use for policy checks."""
global _rules
_rules = rules
# Ditto
def reset():
"""Clear the rules used for policy checks."""
global _rules
_rules = None
def check(rule, target, creds, exc=None, *args, **kwargs):
"""
Checks authorization of a rule against the target and credentials.
:param rule: The rule to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to check() (both
positional and keyword arguments) will be passed to
the exception class. If exc is not provided, returns
False.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds)
elif not _rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = _rules[rule](target, creds)
except KeyError:
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if exc and result is False:
raise exc(*args, **kwargs)
return result
class BaseCheck(object):
"""
Abstract base class for Check classes.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __str__(self):
"""
Retrieve a string representation of the Check tree rooted at
this node.
"""
pass
@abc.abstractmethod
def __call__(self, target, cred):
"""
Perform the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""
A policy check that always returns False (disallow).
"""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""
A policy check that always returns True (allow).
"""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred):
"""Check the policy."""
return True
class Check(BaseCheck):
"""
A base class to allow for user-defined policy checks.
"""
def __init__(self, kind, match):
"""
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""
A policy check that inverts the result of another policy check.
Implements the "not" operator.
"""
def __init__(self, rule):
"""
Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred):
"""
Check the policy. Returns the logical inverse of the wrapped
check.
"""
return not self.rule(target, cred)
class AndCheck(BaseCheck):
"""
A policy check that requires that a list of other checks all
return True. Implements the "and" operator.
"""
def __init__(self, rules):
"""
Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""
Check the policy. Requires that all rules accept in order to
return True.
"""
for rule in self.rules:
if not rule(target, cred):
return False
return True
def add_check(self, rule):
"""
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""
A policy check that requires that at least one of a list of other
checks returns True. Implements the "or" operator.
"""
def __init__(self, rules):
"""
Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""
Check the policy. Requires that at least one rule accept in
order to return True.
"""
for rule in self.rules:
if rule(target, cred):
return True
return False
def add_check(self, rule):
"""
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""
Parse a single base check rule into an appropriate Check object.
"""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_("Failed to understand rule %(rule)s") % locals())
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""
Provided for backwards compatibility. Translates the old
list-of-lists syntax into a tree of Check objects.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, basestring):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if not or_list:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""
Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""
Metaclass for the ParseState class. Facilitates identifying
reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""
Create the class. Injects the 'reducers' list, a list of
tuples matching token sequences to the names of the
corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""
Decorator for reduction methods. Arguments are a sequence of
tokens, in order, which should trigger running this reduction
method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
class ParseState(object):
"""
Implement the core of parsing the policy language. Uses a greedy
reduction algorithm to reduce a sequence of tokens into a single
terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
__metaclass__ = ParseStateMeta
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""
Perform a greedy reduction of the token stream. If a reducer
method matches, it will be executed, then the reduce() method
will be called recursively to search for any more possible
reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""
Obtain the final result of the parse. Raises ValueError if
the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""
Create an 'and_expr' from two checks joined by the 'and'
operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""
Extend an 'and_expr' by adding one more check.
"""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""
Create an 'or_expr' from two checks joined by the 'or'
operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""
Extend an 'or_expr' by adding one more check.
"""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_("Failed to understand rule %(rule)r") % locals())
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""
Parses a policy rule into a tree of Check objects.
"""
# If the rule is a string, it's in the policy language
if isinstance(rule, basestring):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""
Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds):
"""
Recursively checks credentials based on the defined rules.
"""
try:
return _rules[self.match](target, creds)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds):
"""
Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urllib.urlencode(data)
f = urllib2.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds):
"""
Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
"""
# TODO(termie): do dict inspection via dot syntax
match = self.match % target
if self.kind in creds:
return match == six.text_type(creds[self.kind])
return False
| samsu/neutron | openstack/common/policy.py | Python | apache-2.0 | 21,618 | 0 |
# Yith Library Server is a password storage server.
# Copyright (C) 2012-2013 Yaco Sistemas
# Copyright (C) 2012-2013 Alejandro Blanco Escudero <alejandro.b.e@gmail.com>
# Copyright (C) 2012-2013 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import logging
from yithlibraryserver.user.analytics import get_google_analytics
from yithlibraryserver.user.gravatar import get_gravatar
from yithlibraryserver.user.idp import add_identity_provider
from yithlibraryserver.user.models import User, ExternalIdentity
from yithlibraryserver.user.security import get_user
logger = logging.getLogger(__name__)
def includeme(config):
config.add_directive('add_identity_provider', add_identity_provider)
config.add_request_method(get_user, 'user', reify=True)
config.add_request_method(get_google_analytics,
'google_analytics', reify=True)
config.add_request_method(get_gravatar, 'gravatar', reify=True)
config.add_route('login', '/login')
config.add_route('register_new_user', '/register')
config.add_route('logout', '/logout')
config.add_route('user_destroy', '/destroy')
config.add_route('user_information', '/profile')
config.add_route('user_preferences', '/preferences')
config.add_route('user_identity_providers', '/identity-providers')
config.add_route('user_send_email_verification_code',
'/send-email-verification-code')
config.add_route('user_verify_email', '/verify-email')
config.add_route('user_google_analytics_preference',
'/google-analytics-preference')
config.add_route('user_view', '/user')
logger.debug('Importing %s model so SQLAlchemy knows about it', User)
logger.debug('Importing %s model so SQLAlchemy knows about it', ExternalIdentity)
| lorenzogil/yith-library-server | yithlibraryserver/user/__init__.py | Python | agpl-3.0 | 2,517 | 0.000397 |
import collections
import random
# Set the random seed so we see the same output each time
# the script is run.
random.seed(1)
d1 = collections.deque(maxlen=3)
d2 = collections.deque(maxlen=3)
for i in range(5):
n = random.randint(0, 100)
print('n =', n)
d1.append(n)
d2.appendleft(n)
print('D1:', d1)
print('D2:', d2)
| jasonwee/asus-rt-n14uhp-mrtg | src/lesson_data_structures/collections_deque_maxlen.py | Python | apache-2.0 | 349 | 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Daniel Reis, 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Projects Issue extensions for user roles',
'version': '1.0',
'category': 'Project Management',
'summary': 'Extend Project user roles to support more complex use cases',
'description': """\
Also implements the Project user role extensions to the Project Issue
documents.
This module is automatically installed if the Issue Tracker is also installed.
Please refer to the ``project_baseuser`` module for more details.
""",
'author': 'Daniel Reis',
'depends': [
'project_issue',
'project_baseuser',
],
'data': [
'security/ir.model.access.csv',
'security/project_security.xml',
],
'installable': True,
'auto_install': True,
}
| vrenaville/project-service | project_issue_baseuser/__openerp__.py | Python | agpl-3.0 | 1,603 | 0 |
from random import randint
board = []
for x in range(5):
board.append(["O"] * 5)
def print_board(board):
for row in board:
print " ".join(row)
print "Let's play Battleship!"
print_board(board)
def random_row(board):
return randint(0, len(board) - 1)
def random_col(board):
return randint(0, len(board[0]) - 1)
ship_row = random_row(board)
ship_col = random_col(board)
print ship_row
print ship_col
# Everything from here on should go in your for loop!
# Be sure to indent four spaces!
for turn in range(4):
print "Turn", turn + 1
guess_row = int(raw_input("Guess Row:"))
guess_col = int(raw_input("Guess Col:"))
if guess_row == ship_row and guess_col == ship_col:
print "Congratulations! You sunk my battleship!"
else:
if (guess_row < 0 or guess_row > 4) or (guess_col < 0 or guess_col > 4):
print "Oops, that's not even in the ocean."
elif(board[guess_row][guess_col] == "X"):
print "You guessed that one already."
else:
print "You missed my battleship!"
board[guess_row][guess_col] = "X"
# Print (turn + 1) here!
print_board(board)
| oliverwreath/Wide-Range-of-Webs | Python/Battleship/15. Guess 4 turns.py | Python | agpl-3.0 | 1,187 | 0.005897 |
"""
Zappa Async Tasks
Example:
```
from zappa.async import task
@task(service='sns')
def my_async_func(*args, **kwargs):
dosomething()
```
For SNS, you can also pass an `arn` argument to task() which will specify which SNS path to send it to.
Without `service='sns'`, the default service is 'lambda' which will call the method in an asynchronous
lambda call.
The following restrictions apply:
* function must have a clean import path -- i.e. no closures, lambdas, or methods.
* args and kwargs must be JSON-serializable.
* The JSON-serialized form must be within the size limits for Lambda (128K) or SNS (256K) events.
Discussion of this comes from:
https://github.com/Miserlou/Zappa/issues/61
https://github.com/Miserlou/Zappa/issues/603
https://github.com/Miserlou/Zappa/pull/694
https://github.com/Miserlou/Zappa/pull/732
## Full lifetime of an asynchronous dispatch:
1. In a file called `foo.py`, there is the following code:
```
from zappa.async import task
@task
def my_async_func(*args, **kwargs):
return sum(args)
```
2. The decorator desugars to:
`my_async_func = task(my_async_func)`
3. Somewhere else, the code runs:
`res = my_async_func(1,2)`
really calls task's `_run_async(1,2)`
with `func` equal to the original `my_async_func`
If we are running in Lambda, this runs:
LambdaAsyncResponse().send('foo.my_async_func', (1,2), {})
and returns the LambdaAsyncResponse instance to the local
context. That local context, can, e.g. test for `res.sent`
to confirm it was dispatched correctly.
4. LambdaAsyncResponse.send invoked the currently running
AWS Lambda instance with the json message:
```
{ "command": "zappa.async.route_lambda_task",
"task_path": "foo.my_async_func",
"args": [1,2],
"kwargs": {}
}
```
5. The new lambda instance is invoked with the message above,
and Zappa runs its usual bootstrapping context, and inside
zappa.handler, the existance of the 'command' key in the message
dispatches the full message to zappa.async.route_lambda_task, which
in turn calls `run_message(message)`
6. `run_message` loads the task_path value to load the `func` from `foo.py`.
We should note that my_async_func is wrapped by @task in this new
context, as well. However, @task also decorated `my_async_func.sync()`
to run the original function synchronously.
`run_message` duck-types the method and finds the `.sync` attribute
and runs that instead -- thus we do not infinitely dispatch.
If `my_async_func` had code to dispatch other functions inside its
synchronous portions (or even call itself recursively), those *would*
be dispatched asynchronously, unless, of course, they were called
by: `my_async_func.sync(1,2)` in which case it would run synchronously
and in the current lambda function.
"""
import boto3
import botocore
from functools import update_wrapper
import importlib
import inspect
import json
import os
from .utilities import get_topic_name
AWS_REGION = os.environ.get('AWS_REGION') # Set via CLI env var packaging
AWS_LAMBDA_FUNCTION_NAME = os.environ.get('AWS_LAMBDA_FUNCTION_NAME') # Set by AWS
# Declare these here so they're kept warm.
try:
LAMBDA_CLIENT = boto3.client('lambda')
SNS_CLIENT = boto3.client('sns')
STS_CLIENT = boto3.client('sts')
except botocore.exceptions.NoRegionError as e: # pragma: no cover
# This can happen while testing on Travis, but it's taken care of
# during class initialization.
pass
##
# Response and Exception classes
##
class AsyncException(Exception): # pragma: no cover
""" Simple exception class for async tasks. """
pass
class LambdaAsyncResponse(object):
"""
Base Response Dispatcher class
Can be used directly or subclassed if the method to send the message is changed.
"""
def __init__(self, **kwargs):
""" """
if kwargs.get('boto_session'):
self.client = kwargs.get('boto_session').client('lambda')
else: # pragma: no cover
self.client = LAMBDA_CLIENT
def send(self, task_path, args, kwargs):
"""
Create the message object and pass it to the actual sender.
"""
message = {
'task_path': task_path,
'args': args,
'kwargs': kwargs
}
self._send(message)
return self
def _send(self, message):
"""
Given a message, directly invoke the lamdba function for this task.
"""
message['command'] = 'zappa.async.route_lambda_task'
payload = json.dumps(message).encode('utf-8')
if len(payload) > 128000: # pragma: no cover
raise AsyncException("Payload too large for async Lambda call")
self.response = self.client.invoke(
FunctionName=AWS_LAMBDA_FUNCTION_NAME,
InvocationType='Event', #makes the call async
Payload=payload
)
self.sent = (self.response.get('StatusCode', 0) == 202)
class SnsAsyncResponse(LambdaAsyncResponse):
"""
Send a SNS message to a specified SNS topic
Serialise the func path and arguments
"""
def __init__(self, **kwargs):
if kwargs.get('boto_session'):
self.client = kwargs.get('boto_session').client('sns')
else: # pragma: no cover
self.client = SNS_CLIENT
if kwargs.get('arn'):
self.arn = kwargs.get('arn')
else:
if kwargs.get('boto_session'):
sts_client = kwargs.get('boto_session').client('sts')
else:
sts_client = STS_CLIENT
AWS_ACCOUNT_ID = sts_client.get_caller_identity()['Account']
self.arn = 'arn:aws:sns:{region}:{account}:{topic_name}'.format(
region=AWS_REGION,
account=AWS_ACCOUNT_ID,
topic_name=get_topic_name(AWS_LAMBDA_FUNCTION_NAME)
)
def _send(self, message):
"""
Given a message, publish to this topic.
"""
message['command'] = 'zappa.async.route_sns_task'
payload = json.dumps(message).encode('utf-8')
if len(payload) > 256000: # pragma: no cover
raise AsyncException("Payload too large for SNS")
self.response = self.client.publish(
TargetArn=self.arn,
Message=payload
)
self.sent = self.response.get('MessageId')
##
# Aync Routers
##
ASYNC_CLASSES = {
'lambda': LambdaAsyncResponse,
'sns': SnsAsyncResponse,
}
def route_lambda_task(event, context):
"""
Deserialises the message from event passed to zappa.handler.run_function
imports the function, calls the function with args
"""
message = event
return run_message(message)
def route_sns_task(event, context):
"""
Gets SNS Message, deserialises the message,
imports the function, calls the function with args
"""
record = event['Records'][0]
message = json.loads(
record['Sns']['Message']
)
return run_message(message)
def run_message(message):
"""
Runs a function defined by a message object with keys:
'task_path', 'args', and 'kwargs' used by lambda routing
and a 'command' in handler.py
"""
func = import_and_get_task(message['task_path'])
if hasattr(func, 'sync'):
return func.sync(
*message['args'],
**message['kwargs']
)
else:
return func(
*message['args'],
**message['kwargs']
)
##
# Execution interfaces and classes
##
def run(func, args=[], kwargs={}, service='lambda', **task_kwargs):
"""
Instead of decorating a function with @task, you can just run it directly.
If you were going to do func(*args, **kwargs), then you will call this:
import zappa.async.run
zappa.async.run(func, args, kwargs)
If you want to use SNS, then do:
zappa.async.run(func, args, kwargs, service='sns')
and other arguments are similar to @task
"""
task_path = get_func_task_path(func)
return ASYNC_CLASSES[service](**task_kwargs).send(task_path, args, kwargs)
# Handy:
# http://stackoverflow.com/questions/10294014/python-decorator-best-practice-using-a-class-vs-a-function
# However, this needs to pass inspect.getargspec() in handler.py which does not take classes
def task(func, service='lambda'):
"""Async task decorator so that running
Args:
func (function): the function to be wrapped
Further requirements:
func must be an independent top-level function.
i.e. not a class method or an anonymous function
service (str): either 'lambda' or 'sns'
Returns:
A replacement function that dispatches func() to
run asynchronously through the service in question
"""
task_path = get_func_task_path(func)
def _run_async(*args, **kwargs):
"""
This is the wrapping async function that replaces the function
that is decorated with @task.
Args:
These are just passed through to @task's func
Assuming a valid service is passed to task() and it is run
inside a Lambda process (i.e. AWS_LAMBDA_FUNCTION_NAME exists),
it dispatches the function to be run through the service variable.
Otherwise, it runs the task synchronously.
Returns:
In async mode, the object returned includes state of the dispatch.
For instance
When outside of Lambda, the func passed to @task is run and we
return the actual value.
"""
if (service in ASYNC_CLASSES) and (AWS_LAMBDA_FUNCTION_NAME):
send_result = ASYNC_CLASSES[service]().send(task_path, args, kwargs)
return send_result
else:
return func(*args, **kwargs)
update_wrapper(_run_async, func)
_run_async.service = service
_run_async.sync = func
return _run_async
def task_sns(func):
"""
SNS-based task dispatcher. Functions the same way as task()
"""
return task(func, service='sns')
##
# Utility Functions
##
def import_and_get_task(task_path):
"""
Given a modular path to a function, import that module
and return the function.
"""
module, function = task_path.rsplit('.', 1)
app_module = importlib.import_module(module)
app_function = getattr(app_module, function)
return app_function
def get_func_task_path(func):
"""
Format the modular task path for a function via inspection.
"""
module_path = inspect.getmodule(func).__name__
task_path = '{module_path}.{func_name}'.format(
module_path=module_path,
func_name=func.__name__
)
return task_path
| juanantoniofm12/toonai | Zappa/zappa/async.py | Python | mit | 11,154 | 0.00251 |
#!/usr/bin/env python3
"""Convert FASTA to PHYLIP"""
import sys
from Bio import SeqIO
print("Convert FASTA to PHYLIP")
infile = sys.argv[1]
outfile = sys.argv[2]
sequence_list = [] # To keep order of sequence
sequence_dict = {}
for record in SeqIO.parse(open(infile, "rU"), "fasta"):
tab = record.id.split(" ")
sequence = str(record.seq).replace(" ", "")
# print sequence, len(sequence)
sequence_list.append(tab[0])
sequence_dict[tab[0]] = sequence
if "U" in sequence:
print(tab[0])
sys.exit()
print("Number of sequences:", len(sequence_dict))
# Test length of the alignment:
alignment_length = 0
for gene in sequence_dict:
if (alignment_length != 0) and (len(sequence_dict[gene]) != alignment_length):
print("Error in alignment length, exit on error !!!")
sys.exit()
else:
alignment_length = len(sequence_dict[gene])
number_of_seq = len(sequence_dict)
print("Number of sequences:\t"+str(number_of_seq))
print("Alignment length:\t"+str(alignment_length))
print("Ratio =\t"+str(alignment_length/3))
if alignment_length%3 != 0:
print("Warning: Hum, your alignment didn't code for nucleotides")
# Length of gene id, can be changed by passing a third argument
name_length = 50
if len(sys.argv) > 3:
name_length = int(sys.argv[3])
# Write alignment in Phylip format
phyfile = open(outfile, "w")
phyfile.write(str(number_of_seq)+"\t"+str(alignment_length)+"\n")
for gene in sequence_list:
if len(gene) > name_length:
gene_name = gene[0:name_length].replace(" ", "")
if gene_name[-1] == "_":
gene_name = gene_name[0:-1]
# elif gene_name[-2] == "_":
# gene_name = gene_name[0:-2]
else:
gene_name = gene
phyfile.write(gene_name+" "+sequence_dict[gene]+"\n")
phyfile.close()
| romainstuder/evosite3d | convert_fasta2phylip.py | Python | gpl-3.0 | 1,829 | 0.001093 |
from __future__ import with_statement
import logging
import warnings
import django
from django.conf import settings
from django.conf.urls.defaults import patterns, url
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, ValidationError
from django.core.urlresolvers import NoReverseMatch, reverse, resolve, Resolver404, get_script_prefix
from django.db import transaction
from django.db.models.sql.constants import QUERY_TERMS
from django.http import HttpResponse, HttpResponseNotFound, Http404
from django.utils.cache import patch_cache_control, patch_vary_headers
from tastypie.authentication import Authentication
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.cache import NoCache
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import NotFound, BadRequest, InvalidFilterError, HydrationError, InvalidSortError, ImmediateHttpResponse
from tastypie import fields
from tastypie import http
from tastypie.paginator import Paginator
from tastypie.serializers import Serializer
from tastypie.throttle import BaseThrottle
from tastypie.utils import is_valid_jsonp_callback_value, dict_strip_unicode_keys, trailing_slash
from tastypie.utils.mime import determine_format, build_content_type
from tastypie.validation import Validation
try:
set
except NameError:
from sets import Set as set
# The ``copy`` module became function-friendly in Python 2.5 and
# ``copycompat`` was added in post 1.1.1 Django (r11901)..
try:
from django.utils.copycompat import deepcopy
except ImportError:
from copy import deepcopy
# If ``csrf_exempt`` isn't present, stub it.
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
def csrf_exempt(func):
return func
# Django 1.5 has moved this constant up one level.
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError:
from django.db.models.sql.constants import LOOKUP_SEP
class NOT_AVAILABLE:
def __str__(self):
return 'No such data is available.'
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
serializer = Serializer()
authentication = Authentication()
authorization = ReadOnlyAuthorization()
cache = NoCache()
throttle = BaseThrottle()
validation = Validation()
paginator_class = Paginator
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
list_allowed_methods = None
detail_allowed_methods = None
limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
max_limit = 1000
api_name = None
resource_name = None
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
collection_name = 'objects'
detail_uri_name = 'pk'
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
return object.__new__(type('ResourceOptions', (cls,), overrides))
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = {}
declared_fields = {}
# Inherit any fields from parent(s).
try:
parents = [b for b in bases if issubclass(b, Resource)]
# Simulate the MRO.
parents.reverse()
for p in parents:
parent_fields = getattr(p, 'base_fields', {})
for field_name, field_object in parent_fields.items():
attrs['base_fields'][field_name] = deepcopy(field_object)
except NameError:
pass
for field_name, obj in attrs.items():
# Look for ``dehydrated_type`` instead of doing ``isinstance``,
# which can break down if Tastypie is re-namespaced as something
# else.
if hasattr(obj, 'dehydrated_type'):
field = attrs.pop(field_name)
declared_fields[field_name] = field
attrs['base_fields'].update(declared_fields)
attrs['declared_fields'] = declared_fields
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
opts = getattr(new_class, 'Meta', None)
new_class._meta = ResourceOptions(opts)
if not getattr(new_class._meta, 'resource_name', None):
# No ``resource_name`` provided. Attempt to auto-name the resource.
class_name = new_class.__name__
name_bits = [bit for bit in class_name.split('Resource') if bit]
resource_name = ''.join(name_bits).lower()
new_class._meta.resource_name = resource_name
if getattr(new_class._meta, 'include_resource_uri', True):
if not 'resource_uri' in new_class.base_fields:
new_class.base_fields['resource_uri'] = fields.CharField(readonly=True)
elif 'resource_uri' in new_class.base_fields and not 'resource_uri' in attrs:
del(new_class.base_fields['resource_uri'])
for field_name, field_object in new_class.base_fields.items():
if hasattr(field_object, 'contribute_to_class'):
field_object.contribute_to_class(new_class, field_name)
return new_class
class Resource(object):
"""
Handles the data, request dispatch and responding to requests.
Serialization/deserialization is handled "at the edges" (i.e. at the
beginning/end of the request/response cycle) so that everything internally
is Python data structures.
This class tries to be non-model specific, so it can be hooked up to other
data sources, such as search results, files, other data, etc.
"""
__metaclass__ = DeclarativeMetaclass
def __init__(self, api_name=None):
self.fields = deepcopy(self.base_fields)
if not api_name is None:
self._meta.api_name = api_name
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError(name)
def wrap_view(self, view):
"""
Wraps methods so they can be called in a more functional way as well
as handling exceptions better.
Note that if ``BadRequest`` or an exception with a ``response`` attr
are seen, there is special handling to either present a message back
to the user or return the response traveling with the exception.
"""
@csrf_exempt
def wrapper(request, *args, **kwargs):
try:
callback = getattr(self, view)
response = callback(request, *args, **kwargs)
# Our response can vary based on a number of factors, use
# the cache class to determine what we should ``Vary`` on so
# caches won't return the wrong (cached) version.
varies = getattr(self._meta.cache, "varies", [])
if varies:
patch_vary_headers(response, varies)
if self._meta.cache.cacheable(request, response):
if self._meta.cache.cache_control():
# If the request is cacheable and we have a
# ``Cache-Control`` available then patch the header.
patch_cache_control(response, **self._meta.cache.cache_control())
if request.is_ajax() and not response.has_header("Cache-Control"):
# IE excessively caches XMLHttpRequests, so we're disabling
# the browser cache here.
# See http://www.enhanceie.com/ie/bugs.asp for details.
patch_cache_control(response, no_cache=True)
return response
except (BadRequest, fields.ApiFieldError), e:
return http.HttpBadRequest(e.args[0])
except ValidationError, e:
return http.HttpBadRequest(', '.join(e.messages))
except Exception, e:
if hasattr(e, 'response'):
return e.response
# A real, non-expected exception.
# Handle the case where the full traceback is more helpful
# than the serialized error.
if settings.DEBUG and getattr(settings, 'TASTYPIE_FULL_DEBUG', False):
raise
# Re-raise the error to get a proper traceback when the error
# happend during a test case
if request.META.get('SERVER_NAME') == 'testserver':
raise
# Rather than re-raising, we're going to things similar to
# what Django does. The difference is returning a serialized
# error message.
return self._handle_500(request, e)
return wrapper
def _handle_500(self, request, exception):
import traceback
import sys
the_trace = '\n'.join(traceback.format_exception(*(sys.exc_info())))
response_class = http.HttpApplicationError
response_code = 500
NOT_FOUND_EXCEPTIONS = (NotFound, ObjectDoesNotExist, Http404)
if isinstance(exception, NOT_FOUND_EXCEPTIONS):
response_class = HttpResponseNotFound
response_code = 404
if settings.DEBUG:
data = {
"error_message": unicode(exception),
"traceback": the_trace,
}
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format))
# When DEBUG is False, send an error message to the admins (unless it's
# a 404, in which case we check the setting).
send_broken_links = getattr(settings, 'SEND_BROKEN_LINK_EMAILS', False)
if not response_code == 404 or send_broken_links:
log = logging.getLogger('django.request.tastypie')
log.error('Internal Server Error: %s' % request.path, exc_info=sys.exc_info(), extra={'status_code': response_code, 'request':request})
if django.VERSION < (1, 3, 0):
from django.core.mail import mail_admins
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (the_trace, request_repr)
mail_admins(subject, message, fail_silently=True)
# Prep the data going out.
data = {
"error_message": getattr(settings, 'TASTYPIE_CANNED_ERROR', "Sorry, this request could not be processed. Please try again later."),
}
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format))
def _build_reverse_url(self, name, args=None, kwargs=None):
"""
A convenience hook for overriding how URLs are built.
See ``NamespacedModelResource._build_reverse_url`` for an example.
"""
return reverse(name, args=args, kwargs=kwargs)
def base_urls(self):
"""
The standard URLs this ``Resource`` should respond to.
"""
return [
url(r"^(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name="api_dispatch_list"),
url(r"^(?P<resource_name>%s)/schema%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/set/(?P<%s_list>\w[\w/;-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('get_multiple'), name="api_get_multiple"),
url(r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
def override_urls(self):
"""
Deprecated. Will be removed by v1.0.0. Please use ``prepend_urls`` instead.
"""
return []
def prepend_urls(self):
"""
A hook for adding your own URLs or matching before the default URLs.
"""
return []
@property
def urls(self):
"""
The endpoints this ``Resource`` responds to.
Mostly a standard URLconf, this is suitable for either automatic use
when registered with an ``Api`` class or for including directly in
a URLconf should you choose to.
"""
urls = self.prepend_urls()
if self.override_urls():
warnings.warn("'override_urls' is a deprecated method & will be removed by v1.0.0. Please rename your method to ``prepend_urls``.")
urls += self.override_urls()
urls += self.base_urls()
urlpatterns = patterns('',
*urls
)
return urlpatterns
def determine_format(self, request):
"""
Used to determine the desired format.
Largely relies on ``tastypie.utils.mime.determine_format`` but here
as a point of extension.
"""
return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)
def serialize(self, request, data, format, options=None):
"""
Given a request, data and a desired format, produces a serialized
version suitable for transfer over the wire.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
options = options or {}
if 'text/javascript' in format:
# get JSONP callback name. default to "callback"
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
return self._meta.serializer.serialize(data, format, options)
def deserialize(self, request, data, format='application/json'):
"""
Given a request, data and a format, deserializes the given data.
It relies on the request properly sending a ``CONTENT_TYPE`` header,
falling back to ``application/json`` if not provided.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
deserialized = self._meta.serializer.deserialize(data, format=request.META.get('CONTENT_TYPE', 'application/json'))
return deserialized
def alter_list_data_to_serialize(self, request, data):
"""
A hook to alter list data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for a list of objects, generally also including
meta data.
"""
return data
def alter_detail_data_to_serialize(self, request, data):
"""
A hook to alter detail data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for receiving a single bundle of data.
"""
return data
def alter_deserialized_list_data(self, request, data):
"""
A hook to alter list data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def alter_deserialized_detail_data(self, request, data):
"""
A hook to alter detail data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def dispatch_list(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) over
the entire list of resources.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('list', request, **kwargs)
def dispatch_detail(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) on
a single resource.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('detail', request, **kwargs)
def dispatch(self, request_type, request, **kwargs):
"""
Handles the common operations (allowed HTTP method, authentication,
throttling, method lookup) surrounding most CRUD interactions.
"""
allowed_methods = getattr(self._meta, "%s_allowed_methods" % request_type, None)
if 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META:
request.method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE']
request_method = self.method_check(request, allowed=allowed_methods)
method = getattr(self, "%s_%s" % (request_method, request_type), None)
if method is None:
raise ImmediateHttpResponse(response=http.HttpNotImplemented())
self.is_authenticated(request)
self.is_authorized(request)
self.throttle_check(request)
# All clear. Process the request.
request = convert_post_to_put(request)
response = method(request, **kwargs)
# Add the throttled request.
self.log_throttled_access(request)
# If what comes back isn't a ``HttpResponse``, assume that the
# request was accepted and that some action occurred. This also
# prevents Django from freaking out.
if not isinstance(response, HttpResponse):
return http.HttpNoContent()
return response
def remove_api_resource_names(self, url_dict):
"""
Given a dictionary of regex matches from a URLconf, removes
``api_name`` and/or ``resource_name`` if found.
This is useful for converting URLconf matches into something suitable
for data lookup. For example::
Model.objects.filter(**self.remove_api_resource_names(matches))
"""
kwargs_subset = url_dict.copy()
for key in ['api_name', 'resource_name']:
try:
del(kwargs_subset[key])
except KeyError:
pass
return kwargs_subset
def method_check(self, request, allowed=None):
"""
Ensures that the HTTP method used on the request is allowed to be
handled by the resource.
Takes an ``allowed`` parameter, which should be a list of lowercase
HTTP methods to check against. Usually, this looks like::
# The most generic lookup.
self.method_check(request, self._meta.allowed_methods)
# A lookup against what's allowed for list-type methods.
self.method_check(request, self._meta.list_allowed_methods)
# A useful check when creating a new endpoint that only handles
# GET.
self.method_check(request, ['get'])
"""
if allowed is None:
allowed = []
request_method = request.method.lower()
allows = ','.join(map(str.upper, allowed))
if request_method == "options":
response = HttpResponse(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
if not request_method in allowed:
response = http.HttpMethodNotAllowed(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
return request_method
def is_authorized(self, request, object=None):
"""
Handles checking of permissions to see if the user has authorization
to GET, POST, PUT, or DELETE this resource. If ``object`` is provided,
the authorization backend can apply additional row-level permissions
checking.
"""
auth_result = self._meta.authorization.is_authorized(request, object)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if not auth_result is True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def is_authenticated(self, request):
"""
Handles checking if the user is authenticated and dealing with
unauthenticated users.
Mostly a hook, this uses class assigned to ``authentication`` from
``Resource._meta``.
"""
# Authenticate the request as needed.
auth_result = self._meta.authentication.is_authenticated(request)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if not auth_result is True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def throttle_check(self, request):
"""
Handles checking if the user should be throttled.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
identifier = self._meta.authentication.get_identifier(request)
# Check to see if they should be throttled.
if self._meta.throttle.should_be_throttled(identifier):
# Throttle limit exceeded.
raise ImmediateHttpResponse(response=http.HttpTooManyRequests())
def log_throttled_access(self, request):
"""
Handles the recording of the user's access for throttling purposes.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
request_method = request.method.lower()
self._meta.throttle.accessed(self._meta.authentication.get_identifier(request), url=request.get_full_path(), request_method=request_method)
def build_bundle(self, obj=None, data=None, request=None):
"""
Given either an object, a data dictionary or both, builds a ``Bundle``
for use throughout the ``dehydrate/hydrate`` cycle.
If no object is provided, an empty object from
``Resource._meta.object_class`` is created so that attempts to access
``bundle.obj`` do not fail.
"""
if obj is None:
obj = self._meta.object_class()
return Bundle(obj=obj, data=data, request=request)
def build_filters(self, filters=None):
"""
Allows for the filtering of applicable objects.
This needs to be implemented at the user level.'
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return filters
def apply_sorting(self, obj_list, options=None):
"""
Allows for the sorting of objects being returned.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return obj_list
def get_bundle_detail_data(self, bundle):
"""
Convenience method to return the ``detail_uri_name`` attribute off
``bundle.obj``.
Usually just accesses ``bundle.obj.pk`` by default.
"""
return getattr(bundle.obj, self._meta.detail_uri_name)
# URL-related methods.
def detail_uri_kwargs(self, bundle_or_obj):
"""
This needs to be implemented at the user level.
Given a ``Bundle`` or an object, it returns the extra kwargs needed to
generate a detail URI.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def resource_uri_kwargs(self, bundle_or_obj=None):
"""
Builds a dictionary of kwargs to help generate URIs.
Automatically provides the ``Resource.Meta.resource_name`` (and
optionally the ``Resource.Meta.api_name`` if populated by an ``Api``
object).
If the ``bundle_or_obj`` argument is provided, it calls
``Resource.detail_uri_kwargs`` for additional bits to create
"""
kwargs = {
'resource_name': self._meta.resource_name,
}
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
if bundle_or_obj is not None:
kwargs.update(self.detail_uri_kwargs(bundle_or_obj))
return kwargs
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
"""
Handles generating a resource URI.
If the ``bundle_or_obj`` argument is not provided, it builds the URI
for the list endpoint.
If the ``bundle_or_obj`` argument is provided, it builds the URI for
the detail endpoint.
Return the generated URI. If that URI can not be reversed (not found
in the URLconf), it will return an empty string.
"""
if bundle_or_obj is not None:
url_name = 'api_dispatch_detail'
try:
return self._build_reverse_url(url_name, kwargs=self.resource_uri_kwargs(bundle_or_obj))
except NoReverseMatch:
return ''
def get_via_uri(self, uri, request=None):
"""
This pulls apart the salient bits of the URI and populates the
resource via a ``obj_get``.
Optionally accepts a ``request``.
If you need custom behavior based on other portions of the URI,
simply override this method.
"""
prefix = get_script_prefix()
chomped_uri = uri
if prefix and chomped_uri.startswith(prefix):
chomped_uri = chomped_uri[len(prefix)-1:]
try:
view, args, kwargs = resolve(chomped_uri)
except Resolver404:
raise NotFound("The URL provided '%s' was not a link to a valid resource." % uri)
return self.obj_get(request=request, **self.remove_api_resource_names(kwargs))
# Data preparation.
def full_dehydrate(self, bundle):
"""
Given a bundle with an object instance, extract the information from it
to populate the resource.
"""
# Dehydrate each field.
for field_name, field_object in self.fields.items():
# A touch leaky but it makes URI resolution work.
if getattr(field_object, 'dehydrated_type', None) == 'related':
field_object.api_name = self._meta.api_name
field_object.resource_name = self._meta.resource_name
bundle.data[field_name] = field_object.dehydrate(bundle)
# Check for an optional method to do further dehydration.
method = getattr(self, "dehydrate_%s" % field_name, None)
if method:
bundle.data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
return bundle
def dehydrate(self, bundle):
"""
A hook to allow a final manipulation of data once all fields/methods
have built out the dehydrated data.
Useful if you need to access more than one dehydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def full_hydrate(self, bundle):
"""
Given a populated bundle, distill it and turn it back into
a full-fledged object instance.
"""
if bundle.obj is None:
bundle.obj = self._meta.object_class()
bundle = self.hydrate(bundle)
for field_name, field_object in self.fields.items():
if field_object.readonly is True:
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
# NOTE: We only get back a bundle when it is related field.
if isinstance(value, Bundle) and value.errors.get(field_name):
bundle.errors[field_name] = value.errors[field_name]
if value is not None or field_object.null:
# We need to avoid populating M2M data here as that will
# cause things to blow up.
if not getattr(field_object, 'is_related', False):
setattr(bundle.obj, field_object.attribute, value)
elif not getattr(field_object, 'is_m2m', False):
if value is not None:
setattr(bundle.obj, field_object.attribute, value.obj)
elif field_object.blank:
continue
elif field_object.null:
setattr(bundle.obj, field_object.attribute, value)
return bundle
def hydrate(self, bundle):
"""
A hook to allow an initial manipulation of data before all methods/fields
have built out the hydrated data.
Useful if you need to access more than one hydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def hydrate_m2m(self, bundle):
"""
Populate the ManyToMany data on the instance.
"""
if bundle.obj is None:
raise HydrationError("You must call 'full_hydrate' before attempting to run 'hydrate_m2m' on %r." % self)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if field_object.attribute:
# Note that we only hydrate the data, leaving the instance
# unmodified. It's up to the user's code to handle this.
# The ``ModelResource`` provides a working baseline
# in this regard.
bundle.data[field_name] = field_object.hydrate_m2m(bundle)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
method(bundle)
return bundle
def build_schema(self):
"""
Returns a dictionary of all the fields on the resource and some
properties about those fields.
Used by the ``schema/`` endpoint to describe what will be available.
"""
data = {
'fields': {},
'default_format': self._meta.default_format,
'allowed_list_http_methods': self._meta.list_allowed_methods,
'allowed_detail_http_methods': self._meta.detail_allowed_methods,
'default_limit': self._meta.limit,
}
if self._meta.ordering:
data['ordering'] = self._meta.ordering
if self._meta.filtering:
data['filtering'] = self._meta.filtering
for field_name, field_object in self.fields.items():
data['fields'][field_name] = {
'default': field_object.default,
'type': field_object.dehydrated_type,
'nullable': field_object.null,
'blank': field_object.blank,
'readonly': field_object.readonly,
'help_text': field_object.help_text,
'unique': field_object.unique,
}
if field_object.dehydrated_type == 'related':
if getattr(field_object, 'is_m2m', False):
related_type = 'to_many'
else:
related_type = 'to_one'
data['fields'][field_name]['related_type'] = related_type
return data
def dehydrate_resource_uri(self, bundle):
"""
For the automatically included ``resource_uri`` field, dehydrate
the URI for the given bundle.
Returns empty string if no URI can be generated.
"""
try:
return self.get_resource_uri(bundle)
except NotImplementedError:
return ''
except NoReverseMatch:
return ''
def generate_cache_key(self, *args, **kwargs):
"""
Creates a unique-enough cache key.
This is based off the current api_name/resource_name/args/kwargs.
"""
smooshed = []
for key, value in kwargs.items():
smooshed.append("%s=%s" % (key, value))
# Use a list plus a ``.join()`` because it's faster than concatenation.
return "%s:%s:%s:%s" % (self._meta.api_name, self._meta.resource_name, ':'.join(args), ':'.join(smooshed))
# Data access methods.
def get_object_list(self, request):
"""
A hook to allow making returning the list of available objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def apply_authorization_limits(self, request, object_list):
"""
Allows the ``Authorization`` class to further limit the object list.
Also a hook to customize per ``Resource``.
"""
if hasattr(self._meta.authorization, 'apply_limits'):
object_list = self._meta.authorization.apply_limits(request, object_list)
return object_list
def can_create(self):
"""
Checks to ensure ``post`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'post' in allowed
def can_update(self):
"""
Checks to ensure ``put`` is within ``allowed_methods``.
Used when hydrating related data.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'put' in allowed
def can_delete(self):
"""
Checks to ensure ``delete`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'delete' in allowed
def apply_filters(self, request, applicable_filters):
"""
A hook to alter how the filters are applied to the object list.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_get_list(self, request=None, **kwargs):
"""
Fetches the list of objects available on the resource.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get_list(self, request=None, **kwargs):
"""
A version of ``obj_get_list`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('list', **kwargs)
obj_list = self._meta.cache.get(cache_key)
if obj_list is None:
obj_list = self.obj_get_list(request=request, **kwargs)
self._meta.cache.set(cache_key, obj_list)
return obj_list
def obj_get(self, request=None, **kwargs):
"""
Fetches an individual object on the resource.
This needs to be implemented at the user level. If the object can not
be found, this should raise a ``NotFound`` exception.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get(self, request=None, **kwargs):
"""
A version of ``obj_get`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('detail', **kwargs)
bundle = self._meta.cache.get(cache_key)
if bundle is None:
bundle = self.obj_get(request=request, **kwargs)
self._meta.cache.set(cache_key, bundle)
return bundle
def obj_create(self, bundle, request=None, **kwargs):
"""
Creates a new object based on the provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_update(self, bundle, request=None, **kwargs):
"""
Updates an existing object (or creates a new object) based on the
provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list(self, request=None, **kwargs):
"""
Deletes an entire list of objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete(self, request=None, **kwargs):
"""
Deletes a single object.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format), **response_kwargs)
def error_response(self, errors, request):
if request:
desired_format = self.determine_format(request)
else:
desired_format = self._meta.default_format
serialized = self.serialize(request, errors, desired_format)
response = http.HttpBadRequest(content=serialized, content_type=build_content_type(desired_format))
raise ImmediateHttpResponse(response=response)
def is_valid(self, bundle, request=None):
"""
Handles checking if the data provided by the user is valid.
Mostly a hook, this uses class assigned to ``validation`` from
``Resource._meta``.
If validation fails, an error is raised with the error messages
serialized inside it.
"""
errors = self._meta.validation.is_valid(bundle, request)
if errors:
bundle.errors[self._meta.resource_name] = errors
return False
return True
def rollback(self, bundles):
"""
Given the list of bundles, delete all objects pertaining to those
bundles.
This needs to be implemented at the user level. No exceptions should
be raised if possible.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
# Views.
def get_list(self, request, **kwargs):
"""
Returns a serialized list of resources.
Calls ``obj_get_list`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
# TODO: Uncached for now. Invalidation that works for everyone may be
# impossible.
objects = self.obj_get_list(request=request, **self.remove_api_resource_names(kwargs))
sorted_objects = self.apply_sorting(objects, options=request.GET)
paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_uri(), limit=self._meta.limit, max_limit=self._meta.max_limit, collection_name=self._meta.collection_name)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = [self.build_bundle(obj=obj, request=request) for obj in to_be_serialized[self._meta.collection_name]]
to_be_serialized[self._meta.collection_name] = [self.full_dehydrate(bundle) for bundle in bundles]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def get_detail(self, request, **kwargs):
"""
Returns a single serialized resource.
Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
try:
obj = self.cached_obj_get(request=request, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle)
def put_list(self, request, **kwargs):
"""
Replaces a collection of resources with another collection.
Calls ``delete_list`` to clear out the collection then ``obj_create``
with the provided the data to create the new collection.
Return ``HttpNoContent`` (204 No Content) if
``Meta.always_return_data = False`` (default).
Return ``HttpAccepted`` (202 Accepted) if
``Meta.always_return_data = True``.
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_list_data(request, deserialized)
if not self._meta.collection_name in deserialized:
raise BadRequest("Invalid data sent.")
self.obj_delete_list(request=request, **self.remove_api_resource_names(kwargs))
bundles_seen = []
for object_data in deserialized[self._meta.collection_name]:
bundle = self.build_bundle(data=dict_strip_unicode_keys(object_data), request=request)
# Attempt to be transactional, deleting any previously created
# objects if validation fails.
try:
self.obj_create(bundle, request=request, **self.remove_api_resource_names(kwargs))
bundles_seen.append(bundle)
except ImmediateHttpResponse:
self.rollback(bundles_seen)
raise
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
to_be_serialized = {}
to_be_serialized[self._meta.collection_name] = [self.full_dehydrate(bundle) for bundle in bundles_seen]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized, response_class=http.HttpAccepted)
def put_detail(self, request, **kwargs):
"""
Either updates an existing resource or creates a new one with the
provided data.
Calls ``obj_update`` with the provided data first, but falls back to
``obj_create`` if the object does not already exist.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
If an existing resource is modified and
``Meta.always_return_data = False`` (default), return ``HttpNoContent``
(204 No Content).
If an existing resource is modified and
``Meta.always_return_data = True``, return ``HttpAccepted`` (202
Accepted).
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
try:
updated_bundle = self.obj_update(bundle, request=request, **self.remove_api_resource_names(kwargs))
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpAccepted)
except (NotFound, MultipleObjectsReturned):
updated_bundle = self.obj_create(bundle, request=request, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_list(self, request, **kwargs):
"""
Creates a new resource/object with the provided data.
Calls ``obj_create`` with the provided data and returns a response
with the new resource's location.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
updated_bundle = self.obj_create(bundle, request=request, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_detail(self, request, **kwargs):
"""
Creates a new subcollection of the resource under a resource.
This is not implemented by default because most people's data models
aren't self-referential.
If a new resource is created, return ``HttpCreated`` (201 Created).
"""
return http.HttpNotImplemented()
def delete_list(self, request, **kwargs):
"""
Destroys a collection of resources/objects.
Calls ``obj_delete_list``.
If the resources are deleted, return ``HttpNoContent`` (204 No Content).
"""
self.obj_delete_list(request=request, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
def delete_detail(self, request, **kwargs):
"""
Destroys a single resource/object.
Calls ``obj_delete``.
If the resource is deleted, return ``HttpNoContent`` (204 No Content).
If the resource did not exist, return ``Http404`` (404 Not Found).
"""
try:
self.obj_delete(request=request, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
except NotFound:
return http.HttpNotFound()
def patch_list(self, request, **kwargs):
"""
Updates a collection in-place.
The exact behavior of ``PATCH`` to a list resource is still the matter of
some debate in REST circles, and the ``PATCH`` RFC isn't standard. So the
behavior this method implements (described below) is something of a
stab in the dark. It's mostly cribbed from GData, with a smattering
of ActiveResource-isms and maybe even an original idea or two.
The ``PATCH`` format is one that's similar to the response returned from
a ``GET`` on a list resource::
{
"objects": [{object}, {object}, ...],
"deleted_objects": ["URI", "URI", "URI", ...],
}
For each object in ``objects``:
* If the dict does not have a ``resource_uri`` key then the item is
considered "new" and is handled like a ``POST`` to the resource list.
* If the dict has a ``resource_uri`` key and the ``resource_uri`` refers
to an existing resource then the item is a update; it's treated
like a ``PATCH`` to the corresponding resource detail.
* If the dict has a ``resource_uri`` but the resource *doesn't* exist,
then this is considered to be a create-via-``PUT``.
Each entry in ``deleted_objects`` referes to a resource URI of an existing
resource to be deleted; each is handled like a ``DELETE`` to the relevent
resource.
In any case:
* If there's a resource URI it *must* refer to a resource of this
type. It's an error to include a URI of a different resource.
* ``PATCH`` is all or nothing. If a single sub-operation fails, the
entire request will fail and all resources will be rolled back.
* For ``PATCH`` to work, you **must** have ``put`` in your
:ref:`detail-allowed-methods` setting.
* To delete objects via ``deleted_objects`` in a ``PATCH`` request you
**must** have ``delete`` in your :ref:`detail-allowed-methods`
setting.
Substitute appropriate names for ``objects`` and
``deleted_objects`` if ``Meta.collection_name`` is set to something
other than ``objects`` (default).
"""
request = convert_post_to_patch(request)
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
collection_name = self._meta.collection_name
deleted_collection_name = 'deleted_%s' % collection_name
if collection_name not in deserialized:
raise BadRequest("Invalid data sent: missing '%s'" % collection_name)
if len(deserialized[collection_name]) and 'put' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for data in deserialized[collection_name]:
# If there's a resource_uri then this is either an
# update-in-place or a create-via-PUT.
if "resource_uri" in data:
uri = data.pop('resource_uri')
try:
obj = self.get_via_uri(uri, request=request)
# The object does exist, so this is an update-in-place.
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
self.update_in_place(request, bundle, data)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# The object referenced by resource_uri doesn't exist,
# so this is a create-by-PUT equivalent.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle, request=request)
else:
# There's no resource URI, so this is a create call just
# like a POST to the list resource.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle, request=request)
deleted_collection = deserialized.get(deleted_collection_name, [])
if deleted_collection:
if 'delete' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for uri in deleted_collection:
obj = self.get_via_uri(uri, request=request)
self.obj_delete(request=request, _obj=obj)
return http.HttpAccepted()
def patch_detail(self, request, **kwargs):
"""
Updates a resource in-place.
Calls ``obj_update``.
If the resource is updated, return ``HttpAccepted`` (202 Accepted).
If the resource did not exist, return ``HttpNotFound`` (404 Not Found).
"""
request = convert_post_to_patch(request)
# We want to be able to validate the update, but we can't just pass
# the partial data into the validator since all data needs to be
# present. Instead, we basically simulate a PUT by pulling out the
# original data and updating it in-place.
# So first pull out the original object. This is essentially
# ``get_detail``.
try:
obj = self.cached_obj_get(request=request, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
# Now update the bundle in-place.
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
self.update_in_place(request, bundle, deserialized)
if not self._meta.always_return_data:
return http.HttpAccepted()
else:
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle, response_class=http.HttpAccepted)
def update_in_place(self, request, original_bundle, new_data):
"""
Update the object in original_bundle in-place using new_data.
"""
original_bundle.data.update(**dict_strip_unicode_keys(new_data))
# Now we've got a bundle with the new data sitting in it and we're
# we're basically in the same spot as a PUT request. SO the rest of this
# function is cribbed from put_detail.
self.alter_deserialized_detail_data(request, original_bundle.data)
kwargs = {
self._meta.detail_uri_name: self.get_bundle_detail_data(original_bundle),
'request': request,
}
return self.obj_update(original_bundle, **kwargs)
def get_schema(self, request, **kwargs):
"""
Returns a serialized form of the schema of the resource.
Calls ``build_schema`` to generate the data. This method only responds
to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
self.log_throttled_access(request)
return self.create_response(request, self.build_schema())
def get_multiple(self, request, **kwargs):
"""
Returns a serialized list of resources based on the identifiers
from the URL.
Calls ``obj_get`` to fetch only the objects requested. This method
only responds to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Rip apart the list then iterate.
kwarg_name = '%s_list' % self._meta.detail_uri_name
obj_identifiers = kwargs.get(kwarg_name, '').split(';')
objects = []
not_found = []
for identifier in obj_identifiers:
try:
obj = self.obj_get(request, **{self._meta.detail_uri_name: identifier})
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
objects.append(bundle)
except ObjectDoesNotExist:
not_found.append(identifier)
object_list = {
self._meta.collection_name: objects,
}
if len(not_found):
object_list['not_found'] = not_found
self.log_throttled_access(request)
return self.create_response(request, object_list)
class ModelDeclarativeMetaclass(DeclarativeMetaclass):
def __new__(cls, name, bases, attrs):
meta = attrs.get('Meta')
if meta and hasattr(meta, 'queryset'):
setattr(meta, 'object_class', meta.queryset.model)
new_class = super(ModelDeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
include_fields = getattr(new_class._meta, 'fields', [])
excludes = getattr(new_class._meta, 'excludes', [])
field_names = new_class.base_fields.keys()
for field_name in field_names:
if field_name == 'resource_uri':
continue
if field_name in new_class.declared_fields:
continue
if len(include_fields) and not field_name in include_fields:
del(new_class.base_fields[field_name])
if len(excludes) and field_name in excludes:
del(new_class.base_fields[field_name])
# Add in the new fields.
new_class.base_fields.update(new_class.get_fields(include_fields, excludes))
if getattr(new_class._meta, 'include_absolute_url', True):
if not 'absolute_url' in new_class.base_fields:
new_class.base_fields['absolute_url'] = fields.CharField(attribute='get_absolute_url', readonly=True)
elif 'absolute_url' in new_class.base_fields and not 'absolute_url' in attrs:
del(new_class.base_fields['absolute_url'])
return new_class
class ModelResource(Resource):
"""
A subclass of ``Resource`` designed to work with Django's ``Models``.
This class will introspect a given ``Model`` and build a field list based
on the fields found on the model (excluding relational fields).
Given that it is aware of Django's ORM, it also handles the CRUD data
operations of the resource.
"""
__metaclass__ = ModelDeclarativeMetaclass
@classmethod
def should_skip_field(cls, field):
"""
Given a Django model field, return if it should be included in the
contributed ApiFields.
"""
# Ignore certain fields (related fields).
if getattr(field, 'rel'):
return True
return False
@classmethod
def api_field_from_django_field(cls, f, default=fields.CharField):
"""
Returns the field type that would likely be associated with each
Django type.
"""
result = default
internal_type = f.get_internal_type()
if internal_type in ('DateField', 'DateTimeField'):
result = fields.DateTimeField
elif internal_type in ('BooleanField', 'NullBooleanField'):
result = fields.BooleanField
elif internal_type in ('FloatField',):
result = fields.FloatField
elif internal_type in ('DecimalField',):
result = fields.DecimalField
elif internal_type in ('IntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField', 'AutoField'):
result = fields.IntegerField
elif internal_type in ('FileField', 'ImageField'):
result = fields.FileField
elif internal_type == 'TimeField':
result = fields.TimeField
# TODO: Perhaps enable these via introspection. The reason they're not enabled
# by default is the very different ``__init__`` they have over
# the other fields.
# elif internal_type == 'ForeignKey':
# result = ForeignKey
# elif internal_type == 'ManyToManyField':
# result = ManyToManyField
return result
@classmethod
def get_fields(cls, fields=None, excludes=None):
"""
Given any explicit fields to include and fields to exclude, add
additional fields based on the associated model.
"""
final_fields = {}
fields = fields or []
excludes = excludes or []
if not cls._meta.object_class:
return final_fields
for f in cls._meta.object_class._meta.fields:
# If the field name is already present, skip
if f.name in cls.base_fields:
continue
# If field is not present in explicit field listing, skip
if fields and f.name not in fields:
continue
# If field is in exclude list, skip
if excludes and f.name in excludes:
continue
if cls.should_skip_field(f):
continue
api_field_class = cls.api_field_from_django_field(f)
kwargs = {
'attribute': f.name,
'help_text': f.help_text,
}
if f.null is True:
kwargs['null'] = True
kwargs['unique'] = f.unique
if not f.null and f.blank is True:
kwargs['default'] = ''
kwargs['blank'] = True
if f.get_internal_type() == 'TextField':
kwargs['default'] = ''
if f.has_default():
kwargs['default'] = f.default
if getattr(f, 'auto_now', False):
kwargs['default'] = f.auto_now
if getattr(f, 'auto_now_add', False):
kwargs['default'] = f.auto_now_add
final_fields[f.name] = api_field_class(**kwargs)
final_fields[f.name].instance_name = f.name
return final_fields
def check_filtering(self, field_name, filter_type='exact', filter_bits=None):
"""
Given a field name, a optional filter type and an optional list of
additional relations, determine if a field can be filtered on.
If a filter does not meet the needed conditions, it should raise an
``InvalidFilterError``.
If the filter meets the conditions, a list of attribute names (not
field names) will be returned.
"""
if filter_bits is None:
filter_bits = []
if not field_name in self._meta.filtering:
raise InvalidFilterError("The '%s' field does not allow filtering." % field_name)
# Check to see if it's an allowed lookup type.
if not self._meta.filtering[field_name] in (ALL, ALL_WITH_RELATIONS):
# Must be an explicit whitelist.
if not filter_type in self._meta.filtering[field_name]:
raise InvalidFilterError("'%s' is not an allowed filter on the '%s' field." % (filter_type, field_name))
if self.fields[field_name].attribute is None:
raise InvalidFilterError("The '%s' field has no 'attribute' for searching with." % field_name)
# Check to see if it's a relational lookup and if that's allowed.
if len(filter_bits):
if not getattr(self.fields[field_name], 'is_related', False):
raise InvalidFilterError("The '%s' field does not support relations." % field_name)
if not self._meta.filtering[field_name] == ALL_WITH_RELATIONS:
raise InvalidFilterError("Lookups are not allowed more than one level deep on the '%s' field." % field_name)
# Recursively descend through the remaining lookups in the filter,
# if any. We should ensure that all along the way, we're allowed
# to filter on that field by the related resource.
related_resource = self.fields[field_name].get_related_resource(None)
return [self.fields[field_name].attribute] + related_resource.check_filtering(filter_bits[0], filter_type, filter_bits[1:])
return [self.fields[field_name].attribute]
def filter_value_to_python(self, value, field_name, filters, filter_expr,
filter_type):
"""
Turn the string ``value`` into a python object.
"""
# Simple values
true_values_list = ['true', 'True', True]
false_values_list = ['false', 'False', False]
none_values_list = ('nil', 'none', 'None', None)
if value in true_values_list:
value = True
elif value in false_values_list:
value = False
elif value in none_values_list:
value = None
# Split on ',' if not empty string and either an in or range filter.
if filter_type in ('in', 'range') and len(value):
if hasattr(filters, 'getlist'):
value = []
for part in filters.getlist(filter_expr):
value.extend(part.split(','))
else:
value = value.split(',')
return value
def build_filters(self, filters=None):
"""
Given a dictionary of filters, create the necessary ORM-level filters.
Keys should be resource fields, **NOT** model fields.
Valid values are either a list of Django filter types (i.e.
``['startswith', 'exact', 'lte']``), the ``ALL`` constant or the
``ALL_WITH_RELATIONS`` constant.
"""
# At the declarative level:
# filtering = {
# 'resource_field_name': ['exact', 'startswith', 'endswith', 'contains'],
# 'resource_field_name_2': ['exact', 'gt', 'gte', 'lt', 'lte', 'range'],
# 'resource_field_name_3': ALL,
# 'resource_field_name_4': ALL_WITH_RELATIONS,
# ...
# }
# Accepts the filters as a dict. None by default, meaning no filters.
if filters is None:
filters = {}
qs_filters = {}
if getattr(self._meta, 'queryset', None) is not None:
# Get the possible query terms from the current QuerySet.
if hasattr(self._meta.queryset.query.query_terms, 'keys'):
# Django 1.4 & below compatibility.
query_terms = self._meta.queryset.query.query_terms.keys()
else:
# Django 1.5+.
query_terms = self._meta.queryset.query.query_terms
else:
if hasattr(QUERY_TERMS, 'keys'):
# Django 1.4 & below compatibility.
query_terms = QUERY_TERMS.keys()
else:
# Django 1.5+.
query_terms = QUERY_TERMS
for filter_expr, value in filters.items():
filter_bits = filter_expr.split(LOOKUP_SEP)
field_name = filter_bits.pop(0)
filter_type = 'exact'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
continue
if len(filter_bits) and filter_bits[-1] in query_terms:
filter_type = filter_bits.pop()
lookup_bits = self.check_filtering(field_name, filter_type, filter_bits)
value = self.filter_value_to_python(value, field_name, filters, filter_expr, filter_type)
db_field_name = LOOKUP_SEP.join(lookup_bits)
qs_filter = "%s%s%s" % (db_field_name, LOOKUP_SEP, filter_type)
qs_filters[qs_filter] = value
return dict_strip_unicode_keys(qs_filters)
def apply_sorting(self, obj_list, options=None):
"""
Given a dictionary of options, apply some ORM-level sorting to the
provided ``QuerySet``.
Looks for the ``order_by`` key and handles either ascending (just the
field name) or descending (the field name with a ``-`` in front).
The field name should be the resource field, **NOT** model field.
"""
if options is None:
options = {}
parameter_name = 'order_by'
if not 'order_by' in options:
if not 'sort_by' in options:
# Nothing to alter the order. Return what we've got.
return obj_list
else:
warnings.warn("'sort_by' is a deprecated parameter. Please use 'order_by' instead.")
parameter_name = 'sort_by'
order_by_args = []
if hasattr(options, 'getlist'):
order_bits = options.getlist(parameter_name)
else:
order_bits = options.get(parameter_name)
if not isinstance(order_bits, (list, tuple)):
order_bits = [order_bits]
for order_by in order_bits:
order_by_bits = order_by.split(LOOKUP_SEP)
field_name = order_by_bits[0]
order = ''
if order_by_bits[0].startswith('-'):
field_name = order_by_bits[0][1:]
order = '-'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
raise InvalidSortError("No matching '%s' field for ordering on." % field_name)
if not field_name in self._meta.ordering:
raise InvalidSortError("The '%s' field does not allow ordering." % field_name)
if self.fields[field_name].attribute is None:
raise InvalidSortError("The '%s' field has no 'attribute' for ordering with." % field_name)
order_by_args.append("%s%s" % (order, LOOKUP_SEP.join([self.fields[field_name].attribute] + order_by_bits[1:])))
return obj_list.order_by(*order_by_args)
def apply_filters(self, request, applicable_filters):
"""
An ORM-specific implementation of ``apply_filters``.
The default simply applies the ``applicable_filters`` as ``**kwargs``,
but should make it possible to do more advanced things.
"""
return self.get_object_list(request).filter(**applicable_filters)
def get_object_list(self, request):
"""
An ORM-specific implementation of ``get_object_list``.
Returns a queryset that may have been limited by other overrides.
"""
return self._meta.queryset._clone()
def obj_get_list(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_get_list``.
Takes an optional ``request`` object, whose ``GET`` dictionary can be
used to narrow the query.
"""
filters = {}
if hasattr(request, 'GET'):
# Grab a mutable copy.
filters = request.GET.copy()
# Update with the provided kwargs.
filters.update(kwargs)
applicable_filters = self.build_filters(filters=filters)
try:
base_object_list = self.apply_filters(request, applicable_filters)
return self.apply_authorization_limits(request, base_object_list)
except ValueError:
raise BadRequest("Invalid resource lookup data provided (mismatched type).")
def obj_get(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_get``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
try:
base_object_list = self.get_object_list(request).filter(**kwargs)
object_list = self.apply_authorization_limits(request, base_object_list)
stringified_kwargs = ', '.join(["%s=%s" % (k, v) for k, v in kwargs.items()])
if len(object_list) <= 0:
raise self._meta.object_class.DoesNotExist("Couldn't find an instance of '%s' which matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
elif len(object_list) > 1:
raise MultipleObjectsReturned("More than '%s' matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
return object_list[0]
except ValueError:
raise NotFound("Invalid resource lookup data provided (mismatched type).")
def obj_create(self, bundle, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_create``.
"""
bundle.obj = self._meta.object_class()
for key, value in kwargs.items():
setattr(bundle.obj, key, value)
bundle = self.full_hydrate(bundle)
self.is_valid(bundle,request)
if bundle.errors:
self.error_response(bundle.errors, request)
# Save FKs just in case.
self.save_related(bundle)
# Save parent
bundle.obj.save()
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def lookup_kwargs_with_identifiers(self, bundle, kwargs):
"""
Kwargs here represent uri identifiers Ex: /repos/<user_id>/<repo_name>/
We need to turn those identifiers into Python objects for generating
lookup parameters that can find them in the DB
"""
lookup_kwargs = {}
bundle.obj = self.get_object_list(bundle.request).model()
# Override data values, we rely on uri identifiers
bundle.data.update(kwargs)
# We're going to manually hydrate, as opposed to calling
# ``full_hydrate``, to ensure we don't try to flesh out related
# resources & keep things speedy.
bundle = self.hydrate(bundle)
for identifier in kwargs:
if identifier == self._meta.detail_uri_name:
lookup_kwargs[identifier] = kwargs[identifier]
continue
field_object = self.fields[identifier]
# Skip readonly or related fields.
if field_object.readonly is True or getattr(field_object, 'is_related', False):
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % identifier, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
lookup_kwargs[identifier] = value
return lookup_kwargs
def obj_update(self, bundle, request=None, skip_errors=False, **kwargs):
"""
A ORM-specific implementation of ``obj_update``.
"""
if not bundle.obj or not self.get_bundle_detail_data(bundle):
try:
lookup_kwargs = self.lookup_kwargs_with_identifiers(bundle, kwargs)
except:
# if there is trouble hydrating the data, fall back to just
# using kwargs by itself (usually it only contains a "pk" key
# and this will work fine.
lookup_kwargs = kwargs
try:
bundle.obj = self.obj_get(bundle.request, **lookup_kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
bundle = self.full_hydrate(bundle)
self.is_valid(bundle,request)
if bundle.errors and not skip_errors:
self.error_response(bundle.errors, request)
# Save FKs just in case.
self.save_related(bundle)
# Save the main object.
bundle.obj.save()
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def obj_delete_list(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list``.
Takes optional ``kwargs``, which can be used to narrow the query.
"""
base_object_list = self.get_object_list(request).filter(**kwargs)
authed_object_list = self.apply_authorization_limits(request, base_object_list)
if hasattr(authed_object_list, 'delete'):
# It's likely a ``QuerySet``. Call ``.delete()`` for efficiency.
authed_object_list.delete()
else:
for authed_obj in authed_object_list:
authed_obj.delete()
def obj_delete(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_delete``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
obj = kwargs.pop('_obj', None)
if not hasattr(obj, 'delete'):
try:
obj = self.obj_get(request, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
obj.delete()
@transaction.commit_on_success()
def patch_list(self, request, **kwargs):
"""
An ORM-specific implementation of ``patch_list``.
Necessary because PATCH should be atomic (all-success or all-fail)
and the only way to do this neatly is at the database level.
"""
return super(ModelResource, self).patch_list(request, **kwargs)
def rollback(self, bundles):
"""
A ORM-specific implementation of ``rollback``.
Given the list of bundles, delete all models pertaining to those
bundles.
"""
for bundle in bundles:
if bundle.obj and self.get_bundle_detail_data(bundle):
bundle.obj.delete()
def save_related(self, bundle):
"""
Handles the saving of related non-M2M data.
Calling assigning ``child.parent = parent`` & then calling
``Child.save`` isn't good enough to make sure the ``parent``
is saved.
To get around this, we go through all our related fields &
call ``save`` on them if they have related, non-M2M data.
M2M data is handled by the ``ModelResource.save_m2m`` method.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_related', False):
continue
if getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
if field_object.blank and not bundle.data.has_key(field_name):
continue
# Get the object.
try:
related_obj = getattr(bundle.obj, field_object.attribute)
except ObjectDoesNotExist:
related_obj = None
# Because sometimes it's ``None`` & that's OK.
if related_obj:
if field_object.related_name:
if not self.get_bundle_detail_data(bundle):
bundle.obj.save()
setattr(related_obj, field_object.related_name, bundle.obj)
related_obj.save()
setattr(bundle.obj, field_object.attribute, related_obj)
def save_m2m(self, bundle):
"""
Handles the saving of related M2M data.
Due to the way Django works, the M2M data must be handled after the
main instance, which is why this isn't a part of the main ``save`` bits.
Currently slightly inefficient in that it will clear out the whole
relation and recreate the related data as needed.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
# Get the manager.
related_mngr = None
if isinstance(field_object.attribute, basestring):
related_mngr = getattr(bundle.obj, field_object.attribute)
elif callable(field_object.attribute):
related_mngr = field_object.attribute(bundle)
if not related_mngr:
continue
if hasattr(related_mngr, 'clear'):
# Clear it out, just to be safe.
related_mngr.clear()
related_objs = []
for related_bundle in bundle.data[field_name]:
related_bundle.obj.save()
related_objs.append(related_bundle.obj)
related_mngr.add(*related_objs)
def detail_uri_kwargs(self, bundle_or_obj):
"""
Given a ``Bundle`` or an object (typically a ``Model`` instance),
it returns the extra kwargs needed to generate a detail URI.
By default, it uses the model's ``pk`` in order to create the URI.
"""
kwargs = {}
if isinstance(bundle_or_obj, Bundle):
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj.obj, self._meta.detail_uri_name)
else:
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj, self._meta.detail_uri_name)
return kwargs
class NamespacedModelResource(ModelResource):
"""
A ModelResource subclass that respects Django namespaces.
"""
def _build_reverse_url(self, name, args=None, kwargs=None):
namespaced = "%s:%s" % (self._meta.urlconf_namespace, name)
return reverse(namespaced, args=args, kwargs=kwargs)
# Based off of ``piston.utils.coerce_put_post``. Similarly BSD-licensed.
# And no, the irony is not lost on me.
def convert_post_to_VERB(request, verb):
"""
Force Django to process the VERB.
"""
if request.method == verb:
if hasattr(request, '_post'):
del(request._post)
del(request._files)
try:
request.method = "POST"
request._load_post_and_files()
request.method = verb
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = verb
setattr(request, verb, request.POST)
return request
def convert_post_to_put(request):
return convert_post_to_VERB(request, verb='PUT')
def convert_post_to_patch(request):
return convert_post_to_VERB(request, verb='PATCH')
| VishvajitP/django-tastypie | tastypie/resources.py | Python | bsd-3-clause | 84,667 | 0.001937 |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import RegistrationFactory
from tests.factories import UserFactory
from scripts.approve_registrations import main
class TestApproveRegistrations(OsfTestCase):
def setUp(self):
super(TestApproveRegistrations, self).setUp()
self.user = UserFactory()
self.registration = RegistrationFactory(creator=self.user)
self.registration.is_public = True
self.registration.require_approval(self.user)
def test_new_registration_should_not_be_approved(self):
assert_true(self.registration.is_pending_registration)
main(dry_run=False)
assert_false(self.registration.is_registration_approved)
def test_should_not_approve_pending_registration_less_than_48_hours_old(self):
# RegistrationApproval#iniation_date is read only
self.registration.registration_approval._fields['initiation_date'].__set__(
self.registration.registration_approval,
(datetime.utcnow() - timedelta(hours=47)),
safe=True
)
self.registration.registration_approval.save()
assert_false(self.registration.is_registration_approved)
main(dry_run=False)
assert_false(self.registration.is_registration_approved)
def test_should_approve_pending_registration_that_is_48_hours_old(self):
# RegistrationApproval#iniation_date is read only
self.registration.registration_approval._fields['initiation_date'].__set__(
self.registration.registration_approval,
(datetime.utcnow() - timedelta(hours=48)),
safe=True
)
self.registration.registration_approval.save()
assert_false(self.registration.is_registration_approved)
main(dry_run=False)
assert_true(self.registration.is_registration_approved)
def test_should_approve_pending_registration_more_than_48_hours_old(self):
# RegistrationApproval#iniation_date is read only
self.registration.registration_approval._fields['initiation_date'].__set__(
self.registration.registration_approval,
(datetime.utcnow() - timedelta(days=365)),
safe=True
)
self.registration.registration_approval.save()
assert_false(self.registration.is_registration_approved)
main(dry_run=False)
assert_true(self.registration.is_registration_approved)
def test_registration_adds_to_parent_projects_log(self):
initial_project_logs = len(self.registration.registered_from.logs)
# RegistrationApproval#iniation_date is read only
self.registration.registration_approval._fields['initiation_date'].__set__(
self.registration.registration_approval,
(datetime.utcnow() - timedelta(days=365)),
safe=True
)
self.registration.registration_approval.save()
assert_false(self.registration.is_registration_approved)
main(dry_run=False)
assert_true(self.registration.is_registration_approved)
assert_true(self.registration.is_public)
# Logs: Created, approval initiated, approval initiated, registered, registration complete
assert_equal(len(self.registration.registered_from.logs), initial_project_logs + 2)
| ckc6cz/osf.io | scripts/tests/test_approve_registrations.py | Python | apache-2.0 | 3,410 | 0.002053 |
import os
import re
from collections import Callable
missing = object()
class cached_property(object):
def __init__(self, func):
self.func = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
self.__module__ = func.__module__
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, missing)
if value is missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
def safe_join(base, *paths):
if not os.path.isabs(base):
raise ValueError("%r is not an absolute path." % base)
base = os.path.normpath(base)
path = os.path.normpath(os.path.join(base, *paths))
if not path.startswith(base):
raise ValueError("Path %r is outside of %r" % (path, base))
return path
def unique(iterable, key=lambda x: x):
yielded = set()
for item in iterable:
keyitem = key(item)
if keyitem not in yielded:
yielded.add(keyitem)
yield item
def get_condition_func(condition):
if isinstance(condition, Callable):
return condition
if isinstance(condition, str):
condition = re.compile(condition)
return lambda path: condition.search(path)
| gears/gears | gears/utils.py | Python | isc | 1,323 | 0 |
from quex.engine.generator.languages.address import Address
from quex.blackboard import E_EngineTypes, E_AcceptanceIDs, E_StateIndices, \
E_TransitionN, E_PostContextIDs, E_PreContextIDs, \
setup as Setup
def do(txt, TheState, TheAnalyzer, DefineLabelF=True, MentionStateIndexF=True):
LanguageDB = Setup.language_db
if DefineLabelF:
txt.append(Address("$drop-out", TheState.index))
if MentionStateIndexF:
txt.append(" __quex_debug_drop_out(%i);\n" % TheState.index)
if TheAnalyzer.engine_type == E_EngineTypes.BACKWARD_PRE_CONTEXT:
txt.append(" %s\n" % LanguageDB.GOTO(E_StateIndices.END_OF_PRE_CONTEXT_CHECK))
return
elif TheAnalyzer.engine_type == E_EngineTypes.BACKWARD_INPUT_POSITION:
if TheState.drop_out.reachable_f:
# Backward input position detectors are always isolated state machines.
# => TheAnalyzer.state_machine_id = id of the backward input position detector.
txt.append(' __quex_debug("backward input position %i detected\\n");\n' % \
TheAnalyzer.state_machine_id)
txt.append(" %s\n\n" % LanguageDB.INPUT_P_INCREMENT())
txt.append(" goto %s;\n" \
% LanguageDB.LABEL_NAME_BACKWARD_INPUT_POSITION_RETURN(TheAnalyzer.state_machine_id))
return
info = TheState.drop_out.trivialize()
# (1) Trivial Solution
if info is not None:
for i, easy in enumerate(info):
positioning_str = ""
if easy[1].positioning != 0:
if easy[1].positioning == E_TransitionN.VOID: register = easy[1].position_register
else: register = E_PostContextIDs.NONE
positioning_str = "%s\n" % LanguageDB.POSITIONING(easy[1].positioning, register)
goto_terminal_str = "%s" % LanguageDB.GOTO_TERMINAL(easy[1].acceptance_id)
txt.append(LanguageDB.IF_PRE_CONTEXT(i == 0, easy[0].pre_context_id,
"%s%s" % (positioning_str, goto_terminal_str)))
return
# (2) Separate: Pre-Context Check and Routing to Terminal
# (2.1) Pre-Context Check
for i, element in enumerate(TheState.drop_out.get_acceptance_checker()):
if element.pre_context_id == E_PreContextIDs.NONE \
and element.acceptance_id == E_AcceptanceIDs.VOID:
break
txt.append(
LanguageDB.IF_PRE_CONTEXT(i == 0, element.pre_context_id,
LanguageDB.ASSIGN("last_acceptance",
LanguageDB.ACCEPTANCE(element.acceptance_id)))
)
if element.pre_context_id == E_PreContextIDs.NONE:
break # No check after the unconditional acceptance
# (2.2) Routing to Terminal
# (2.2.1) If the positioning is the same for all entries (except the FAILURE)
# then, again, the routing may be simplified:
#router = TheState.drop_out.router
#prototype = (router[0].positioning, router[0].position_register)
#simple_f = True
#for element in islice(router, 1, None):
# if element.acceptance_id == E_AcceptanceIDs.FAILURE: continue
# if prototype != (element.positioning, element.position_register):
# simple_f = False
# break
#if simple_f:
# txt.append(" %s\n %s\n" %
# (LanguageDB.POSITIONING(element.positioning, element.position_register),
# LanguageDB.GOTO_TERMINAL(E_AcceptanceIDs.VOID)))
#else:
case_list = []
for element in TheState.drop_out.get_terminal_router():
if element.positioning == E_TransitionN.VOID: register = element.position_register
else: register = None
case_list.append((LanguageDB.ACCEPTANCE(element.acceptance_id),
"%s %s" % \
(LanguageDB.POSITIONING(element.positioning, register),
LanguageDB.GOTO_TERMINAL(element.acceptance_id))))
txt.extend(LanguageDB.SELECTION("last_acceptance", case_list))
| coderjames/pascal | quex-0.63.1/quex/engine/generator/state/drop_out.py | Python | bsd-2-clause | 4,373 | 0.011891 |
import time
import plugins
import hangups
def _initialise(bot):
plugins.register_handler(on_hangout_call, type="call")
def on_hangout_call(bot, event, command):
if event.conv_event._event.hangout_event.event_type == hangups.schemas.ClientHangoutEventType.END_HANGOUT:
lastcall = bot.conversation_memory_get(event.conv_id, "lastcall")
if lastcall:
lastcaller = lastcall["caller"]
since = int(time.time() - lastcall["timestamp"])
if since < 120:
humantime = "{} seconds".format(since)
elif since < 7200:
humantime = "{} minutes".format(since // 60)
elif since < 172800:
humantime = "{} hours".format(since // 3600)
else:
humantime = "{} days".format(since // 86400)
if bot.conversations.catalog[event.conv_id]["type"] == "ONE_TO_ONE":
"""subsequent calls for a ONE_TO_ONE"""
bot.send_message_parsed(event.conv_id,
_("<b>It's been {} since the last call. Lonely? I can't reply you as I don't have speech synthesis (or speech recognition either!)</b>").format(humantime))
else:
"""subsequent calls for a GROUP"""
bot.send_message_parsed(event.conv_id,
_("<b>It's been {} since the last call. The last caller was <i>{}</i>.</b>").format(humantime, lastcaller))
else:
"""first ever call for any conversation"""
bot.send_message_parsed(event.conv_id,
_("<b>No prizes for that call</b>"))
bot.conversation_memory_set(event.conv_id, "lastcall", { "caller": event.user.full_name, "timestamp": time.time() })
| ravrahn/HangoutsBot | hangupsbot/plugins/humor_hangoutcalls.py | Python | gpl-3.0 | 1,755 | 0.006838 |
#!/usr/bin/env python
"""
The scripts that compose this module contains a set of functions
needed to process properly a background subtraction of each camera
of a dataset
"""
import cbackground
import cv2
import numpy as np
import sys
from gui import trackbar
from threedgeometry import frameretriever
| lacatus/TFM | bgsubtraction/__init__.py | Python | apache-2.0 | 305 | 0 |
#!/usr/bin/env python
"""
Undistort image.
(C) 2016-2018 1024jp
"""
import math
import os
import sys
import cv2
import numpy as np
from modules import argsparser
from modules.datafile import Data
from modules.undistortion import Undistorter
from modules.projection import Projector
# constants
SUFFIX = "_calib"
class ArgsParser(argsparser.Parser):
description = 'Undistort image based on a location file.'
datafile_name = 'image'
def init_arguments(self):
super(ArgsParser, self).init_arguments()
script = self.add_argument_group('script options')
script.add_argument('--save',
action='store_true',
default=False,
help="save result in a file instead displaying it"
" (default: %(default)s)"
)
script.add_argument('--perspective',
action='store_true',
default=False,
help="also remove perspective"
" (default: %(default)s)"
)
script.add_argument('--stats',
action='store_true',
default=False,
help="display stats"
" (default: %(default)s)"
)
def add_suffix_to_path(path, suffix):
"""Append suffix to file name before file extension.
Arguments:
path (str) -- File path.
suffix (str) -- Suffix string to append.
"""
root, extension = os.path.splitext(path)
return root + suffix + extension
def show_image(image, scale=1.0, window_title='Image'):
"""Display given image in a window.
Arguments:
image () -- Image to display.
scale (float) -- Magnification of image.
window_title (str) -- Title of window.
"""
scaled_image = scale_image(image, scale)
cv2.imshow(window_title, scaled_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def scale_image(image, scale=1.0):
"""Scale up/down given image.
Arguments:
image () -- Image to process.
scale (float) -- Magnification of image.
"""
height, width = [int(scale * length) for length in image.shape[:2]]
return cv2.resize(image, (width, height))
def plot_points(image, points, color=(0, 0, 255)):
"""Draw circles at given locations on image.
Arguments:
image -- Image to draw on.
points -- x,y pairs of points to plot.
"""
# find best radius for image
image_width = image.shape[1]
radius = int(image_width / 400)
# draw
for point in points:
point = tuple(map(int, point))
cv2.circle(image, point, color=color, radius=radius,
thickness=radius/2)
def estimate_clipping_rect(projector, size):
"""
Return:
rect -- NSRect style 2d-tuple.
flipped (bool) -- Whether y-axis is flipped.
"""
# lt -> rt -> lb -> rb
image_corners = [(0, 0), (size[0], 0), (0, size[1]), size]
x_points = []
y_points = []
for corner in image_corners:
x, y = map(int, projector.project_point(*corner))
x_points.append(x)
y_points.append(y)
min_x = min(x_points)
min_y = min(y_points)
max_x = max(x_points)
max_y = max(y_points)
rect = ((min_x, min_y), (max_x - min_x, max_y - min_y))
flipped = y_points[3] < 0
return rect, flipped
def main(data, saves_file=False, removes_perspective=True, shows_stats=False):
imgpath = data.datafile.name
image = cv2.imread(imgpath)
size = image.shape[::-1][1:3]
undistorter = Undistorter.init(data.image_points, data.dest_points, size)
image = undistorter.undistort_image(image)
undistorted_points = undistorter.calibrate_points(data.image_points)
plot_points(image, undistorted_points)
if shows_stats:
print('[stats]')
print('number of points: {}'.format(len(undistorted_points)))
if removes_perspective:
projector = Projector(undistorted_points, data.dest_points)
# show stats if needed
if shows_stats:
diffs = []
for point, (dest_x, dest_y, dest_z) in zip(undistorted_points,
data.dest_points):
x, y = projector.project_point(*point)
diffs.append([x - dest_x, y - dest_y])
abs_diffs = [(abs(x), abs(y)) for x, y in diffs]
print('mean: {:.2f}, {:.2f}'.format(*np.mean(abs_diffs, axis=0)))
print(' std: {:.2f}, {:.2f}'.format(*np.std(abs_diffs, axis=0)))
print(' max: {:.2f}, {:.2f}'.format(*np.max(abs_diffs, axis=0)))
print('diff:')
for x, y in diffs:
print(' {:6.1f},{:6.1f}'.format(x, y))
# transform image by removing perspective
rect, is_flipped = estimate_clipping_rect(projector, size)
image = projector.project_image(image, rect[1], rect[0])
scale = float(size[0]) / image.shape[1]
image = scale_image(image, scale)
for point in data.dest_points:
point = point[0:2]
point = [scale * (l - origin) for l, origin in zip(point, rect[0])]
plot_points(image, [point], color=(255, 128, 0))
# flip image if needed
if is_flipped:
image = cv2.flip(image, 0)
if saves_file:
outpath = add_suffix_to_path(imgpath, SUFFIX)
cv2.imwrite(outpath, image)
else:
show_image(image, scale=1.0/2, window_title='Undistorted Image')
if __name__ == "__main__":
parser = ArgsParser()
args = parser.parse_args()
if args.test:
print("This script doesn't have test.")
sys.exit()
data = Data(args.file, in_cols=args.in_cols)
main(data, saves_file=args.save,
removes_perspective=args.perspective, shows_stats=args.stats)
| 1024jp/LensCalibrator | createimage.py | Python | mit | 6,006 | 0.000167 |
from __future__ import unicode_literals
from django.db import models
class Attendance(models.Model):
pass
| cloudartisan/dojomaster | apps/attendance/models.py | Python | mit | 113 | 0 |
#1/usr/bin/env python3
import setuptools
setuptools.setup(
name = 'rheedsim',
version = '0.1.0',
packages = ['rheedsim'],
entry_points = {
'console_scripts':[
'rheedsim = rheedsim.__main__:main'
]
},
)
| chanjr/rheedsim | src/setup.py | Python | mit | 305 | 0.036066 |
from ..braille import from_unicode
def _render(width, height, text):
# text must already be i18n-ed to Unicode.
data = []
lines = tuple(from_unicode(l) for l in text.split('\n'))
for line in lines:
data.append(line)
# pad page with empty rows
while len(data) % height:
data.append(tuple())
return tuple(data)
def render_book_help(width, height):
# TRANSLATORS: The AsciiDoc markup is used to suggest a page break
# in this string.
text = _('''\
With Canute 360 you can read files in Braille. You can move within a \
file using the three large control buttons on the front panel of \
Canute. Press the large button to the right of centre on the front \
panel labelled "forward" to move forward one page within this help \
file.
<<<
Press the large button to the left of centre, labelled "back" to move \
back one page within a file, similar to turning the pages in a \
physical book. You can move forwards of backwards five pages at a time \
by holding down the "forward" or "back" button. You can access all \
other Canute features, including the library menu, bookmarks and \
system settings by pressing the large central button labelled "menu" \
to access the main menu. Press the circular help button (labelled "H") \
at the top left of the display to return to your book.\
''')
return _render(width, height, text)
def render_home_menu_help(width, height):
text = _('''\
This is the main menu. From the main menu you can access the library, \
insert a bookmark, return to a bookmark that was previously placed \
within a file, or navigate to a page within a book. You can select an \
item from the main menu by pressing the triangular line select button \
to the left of the menu item on the display.
You can choose a new book by pressing the line select button to the \
left of "view library menu" on the display to access the library menu. \
You can navigate to a page within a file by pressing the triangular \
line select button to the left of "go to page" and following the \
instructions on the display. You can insert a bookmark by pressing the \
line select button to the left of "insert bookmark at current page". \
You can retrieve a bookmark by pressing the line select button to the \
left of "choose from existing bookmarks". To make system wide changes, \
including changing the language or Braille code, press the line select \
button to the left of "view system menu" on the display.\
''')
return _render(width, height, text)
| Bristol-Braille/canute-ui | ui/book/help.py | Python | gpl-3.0 | 2,528 | 0.000396 |
import datetime
from django.core.management.base import NoArgsCommand
from django.conf import settings as django_settings
from askbot import models
from askbot import const
from askbot.conf import settings as askbot_settings
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from askbot.utils import mail
from askbot.utils.classes import ReminderSchedule
DEBUG_THIS_COMMAND = False
class Command(NoArgsCommand):
def handle_noargs(self, **options):
if askbot_settings.ENABLE_EMAIL_ALERTS == False:
return
if askbot_settings.ENABLE_ACCEPT_ANSWER_REMINDERS == False:
return
#get questions without answers, excluding closed and deleted
#order it by descending added_at date
schedule = ReminderSchedule(
askbot_settings.DAYS_BEFORE_SENDING_ACCEPT_ANSWER_REMINDER,
askbot_settings.ACCEPT_ANSWER_REMINDER_FREQUENCY,
askbot_settings.MAX_ACCEPT_ANSWER_REMINDERS
)
questions = models.Post.objects.get_questions().exclude(
deleted = True
).added_between(
start = schedule.start_cutoff_date,
end = schedule.end_cutoff_date
).filter(
thread__answer_count__gt = 0
).filter(
thread__accepted_answer__isnull=True #answer_accepted = False
).order_by('-added_at')
#for all users, excluding blocked
#for each user, select a tag filtered subset
#format the email reminder and send it
for user in models.User.objects.exclude(status = 'b'):
user_questions = questions.filter(author = user)
final_question_list = user_questions.get_questions_needing_reminder(
activity_type = const.TYPE_ACTIVITY_ACCEPT_ANSWER_REMINDER_SENT,
user = user,
recurrence_delay = schedule.recurrence_delay
)
#todo: rewrite using query set filter
#may be a lot more efficient
question_count = len(final_question_list)
if question_count == 0:
continue
subject_line = _(
'Accept the best answer for %(question_count)d of your questions'
) % {'question_count': question_count}
#todo - make a template for these
if question_count == 1:
reminder_phrase = _('Please accept the best answer for this question:')
else:
reminder_phrase = _('Please accept the best answer for these questions:')
body_text = '<p>' + reminder_phrase + '</p>'
body_text += '<ul>'
for question in final_question_list:
body_text += '<li><a href="%s%s?sort=latest">%s</a></li>' \
% (
askbot_settings.APP_URL,
question.get_absolute_url(),
question.thread.title
)
body_text += '</ul>'
if DEBUG_THIS_COMMAND:
print "User: %s<br>\nSubject:%s<br>\nText: %s<br>\n" % \
(user.email, subject_line, body_text)
else:
mail.send_mail(
subject_line = subject_line,
body_text = body_text,
recipient_list = (user.email,)
)
| tvenkat/askbot-devel | askbot/management/commands/send_accept_answer_reminders.py | Python | gpl-3.0 | 3,704 | 0.011609 |
import sys
import os
import io
from pkg_resources import parse_version
import wx
if parse_version(wx.__version__) < parse_version('2.9'):
tmpApp = wx.PySimpleApp()
else:
tmpApp = wx.App(False)
from psychopy import experiment
from psychopy.experiment.components import getAllComponents
# usage: generate or compare all Component.param settings & options
# motivation: catch deviations introduced during refactoring
# use --out to re-generate componsTemplate.txt
# ignore attributes that are there because inherit from object
ignoreObjectAttribs = True
# should not need a wx.App with fetchIcons=False
try:
allComp = getAllComponents(fetchIcons=False)
except Exception:
import wx
if parse_version(wx.__version__) < parse_version('2.9'):
tmpApp = wx.PySimpleApp()
else:
tmpApp = wx.App(False)
try:
from psychopy.app import localization
except Exception:
pass # not needed if can't import it
allComp = getAllComponents(fetchIcons=False)
exp = experiment.Experiment()
relPath = os.path.join(os.path.split(__file__)[0], 'componsTemplate.txt')
if not '--out' in sys.argv:
with io.open(relPath, 'r', encoding='utf-8-sig') as f:
target = f.read()
targetLines = target.splitlines()
targetTag = {}
for line in targetLines:
try:
t, val = line.split(':',1)
targetTag[t] = val
except ValueError:
# need more than one value to unpack; this is a weak way to
# handle multi-line default values, eg TextComponent.text.default
targetTag[t] += '\n' + line # previous t value
else:
outfile = open(relPath,'w')
param = experiment.Param('', '') # want its namespace
ignore = ['__doc__', '__init__', '__module__', '__str__', 'next']
if '--out' not in sys.argv:
# these are for display only (cosmetic) but no harm in gathering initially:
ignore += ['hint',
'label', # comment-out to not ignore labels when checking
'categ'
]
for field in dir(param):
if field.startswith("__"):
ignore.append(field)
fields = set(dir(param)).difference(ignore)
mismatches = []
for compName in sorted(allComp):
comp = allComp[compName](parentName='x', exp=exp)
order = '%s.order:%s' % (compName, eval("comp.order"))
out = [order]
if '--out' in sys.argv:
outfile.write(order+'\n')
elif not order+'\n' in target:
tag = order.split(':', 1)[0]
try:
err = order + ' <== ' + targetTag[tag]
except IndexError: # missing
err = order + ' <==> NEW (no matching param in original)'
print(err)
mismatches.append(err)
for parName in sorted(comp.params):
# default is what you get from param.__str__, which returns its value
default = '%s.%s.default:%s' % (compName, parName, comp.params[parName])
out.append(default)
lineFields = []
for field in sorted(fields):
if parName == 'name' and field == 'updates':
continue
# ignore: never want to change the name *during an experiment*
# the default name.updates value varies across components
# skip private attributes
if field.startswith("_"):
continue
# get value of the field
fieldValue = str(eval("comp.params[parName].%s" % field))
# remove memory address from the string representation
if "at 0x" in fieldValue:
fieldValue = fieldValue.split(" at 0x")[0] + ">"
f = '%s.%s.%s:%s' % (compName, parName, field, fieldValue)
lineFields.append(f)
for line in [default] + lineFields:
if '--out' in sys.argv:
if not ignoreObjectAttribs:
outfile.write(line+'\n')
else:
if (not ":<built-in method __" in line and
not ":<method-wrapper '__" in line and
not ":<bound method " in line):
outfile.write(line+'\n')
elif not line+'\n' in target:
# mismatch, so report on the tag from orig file
# match checks tag + multi-line
# because line is multi-line and target is whole file
tag = line.split(':', 1)[0]
try:
err = line + ' <== ' + targetTag[tag]
except KeyError: # missing
err = line + ' <==> NEW (no matching param in original)'
print(err)
mismatches.append(err)
# return mismatches
| psychopy/psychopy | psychopy/tests/test_experiment/needs_wx/genComponsTemplate.py | Python | gpl-3.0 | 4,702 | 0.001489 |
# -*- coding: utf-8 -*-
"""
handler base
~~~~~~~~~~~~
Presents a reasonable base class for a ``Handler`` object, which handles
responding to an arbitrary "request" for action. For example, ``Handler``
is useful for responding to HTTP requests *or* noncyclical realtime-style
requests, and acts as a base class for ``Page`` and ``ServiceHandler``.
:author: Sam Gammon <sg@samgammon.com>
:copyright: (c) Sam Gammon, 2014
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
"""
# stdlib
import itertools
# canteen core, util, logic
from ..core import injection
# noinspection PyUnresolvedReferences
class Handler(object):
""" Base class structure for a ``Handler`` of some request or desired action.
Specifies basic machinery for tracking a ``request`` alongside some form
of ``response``.
Also keeps track of relevant ``environ`` (potentially from WSGI) and sets
up a jump off point for DI-provided tools like logging, config, caching,
template rendering, etc. """
# @TODO(sgammon): HTTPify, convert to decorator
config = property(lambda self: {})
__agent__ = None # current `agent` details
__status__ = 200 # it's a glass-half-full kind of day, why not
__routes__ = None # route map adapter from werkzeug
__context__ = None # holds current runtime context, if any
__logging__ = None # internal logging slot
__runtime__ = None # reference up to the runtime
__environ__ = None # original WSGI environment
__request__ = None # lazy-loaded request object
__headers__ = None # buffer HTTP header access
__response__ = None # lazy-loaded response object
__callback__ = None # callback to send data (sync or async)
__content_type__ = None # response content type
# set owner and injection side
__owner__, __metaclass__ = "Handler", injection.Compound
def __init__(self, environ=None,
start_response=None,
runtime=None,
request=None,
response=None, **context):
""" Initialize a new ``Handler`` object with proper ``environ`` details and
inform it of larger world around it.
``Handler`` objects (much like ``Runtime`` objects) are designed to be
usable independently as a WSGI-style callable. Note that the first two
position parameters of this ``__init__`` are the venerable ``environ``
and ``start_response`` - dispatching this way is totally possible, but
providing ``runtime``, ``request`` and ``response`` allow tighter
integration with the underlying runtime.
Current execution details (internal to Canteen) are passed as ``kwargs``
and compounded as new context items are added.
:param environ: WSGI environment, provided by active runtime. ``dict``
in standard WSGI format.
:param start_response: Callable to begin the response cycle. Usually a
vanilla ``function``.
:param runtime: Currently-active Canteen runtime. Always an instance of
:py:class:`canteen.core.runtime.Runtime` or a subclass thereof.
:param request: Object to use for ``self.request``. Usually an instance
of :py:class:`werkzeug.wrappers.Request`.
:param response: Object to use for ``self.response``. Usually an
instance of :py:class:`werkzeug.wrappers.Response`. """
# startup/assign internals
self.__runtime__, self.__environ__, self.__callback__ = (
runtime, # reference to the active runtime
environ, # reference to WSGI environment
start_response) # reference to WSGI callback
# setup HTTP/dispatch stuff
self.__status__, self.__headers__, self.__content_type__ = (
200, # default response status
{}, # default repsonse headers
'text/html; charset=utf-8') # default content type
# request, response & context
self.__request__, self.__response__, self.__context__ = (
request, response, context)
# expose internals, but write-protect
routes = property(lambda self: self.__runtime__.routes)
status = property(lambda self: self.__status__)
headers = property(lambda self: self.__headers__)
content_type = property(lambda self: self.__content_type__)
# shortcuts & utilities
url_for = link = lambda self, end, **args: self.routes.build(end, args)
# WSGI internals
app = runtime = property(lambda self: self.__runtime__)
environment = environ = property(lambda self: self.__environ__)
start_response = callback = property(lambda self: self.__callback__)
# Context
session = property(lambda self: ( # session is tuple of (session, engine)
self.request.session[0] if self.request.session else None))
# Agent
agent = property(lambda self: (
self.__agent__ if self.__agent__ else (
setattr(self, '__agent__', self.http.agent.scan(self.request)) or (
self.__agent__))))
# Request & Response
request = property(lambda self: (
self.__request__ if self.__request__ else (
setattr(self, '__request__', self.http.new_request(self.__environ__)) or (
self.__request__))))
response = property(lambda self: (
self.__response__ if self.__response__ else (
setattr(self, '__response__', self.http.new_response()) or (
self.__response__))))
@property
def template_context(self):
""" Generate template context to be used in rendering source templates. The
``template_context`` accessor is expected to return a ``dict`` of
``name=>value`` pairs to present to the template API.
:returns: ``dict`` of template context. """
# for javascript context
from canteen.rpc import ServiceHandler
return {
# Default Context
'handler': self,
'config': getattr(self, 'config', {}),
'runtime': self.runtime,
# HTTP Context
'http': {
'agent': getattr(self, 'agent', None),
'request': self.request,
'response': self.response
},
# WSGI internals
'wsgi': {
'environ': self.environ,
'callback': self.callback,
'start_response': self.start_response
},
# Cache API
'cache': {
'get': self.cache.get,
'get_multi': self.cache.get_multi,
'set': self.cache.set,
'set_multi': self.cache.set_multi,
'delete': self.cache.delete,
'delete_multi': self.cache.delete_multi,
'clear': self.cache.clear,
'flush': self.cache.flush
},
# Assets API
'asset': {
'image': self.assets.image_url,
'style': self.assets.style_url,
'script': self.assets.script_url
},
# Service API
'services': {
'list': ServiceHandler.services,
'describe': ServiceHandler.describe
},
# Output API
'output': {
'render': self.template.render,
'environment': self.template.environment
},
# Routing
'link': self.url_for,
'route': {
'build': self.url_for,
'resolve': self.http.resolve_route
}
}
def respond(self, content=None, direct=False):
""" Respond to this ``Handler``'s request with raw ``str`` or ``unicode``
content. UTF-8 encoding happens if necessary.
:param content: Content to respond to. Must be ``str``, ``unicode``, or
a similar string buffer object.
:param direct: Flag indicating that ``self`` should be returned, rather
than ``self.response``. Bool, defaults to ``False`` as this
technically breaks WSGI.
:returns: Generated (filled-in) ``self.response`` object. """
# today is a good day
if not self.status: self.__status__ = 200
if content: self.response.response = content
# set status code and return
return setattr(self.response,
('status_code' if isinstance(self.status, int) else 'status'),
self.status) or (
(i.encode('utf-8').strip() for i in self.response.response),
self.response) if not direct else self
def render(self, template,
headers=None,
content_type='text/html; charset=utf-8',
context=None,
_direct=False, **kwargs):
""" Render a source ``template`` for the purpose of responding to this
``Handler``'s request, given ``context`` and proper ``headers`` for
return.
``kwargs`` are taken as extra template context and overlayed onto
``context`` before render.
:param template: Path to template file to serve. ``str`` or ``unicode``
file path.
:param headers: Extra headers to send with response. ``dict`` or iter of
``(name, value)`` tuples.
:param content_type: Value to send for ``Content-Type`` header. ``str``,
defaults to ``text/html; charset=utf-8``.
:param context: Extra template context to include during render.
``dict`` of items, with keys as names that values are bound to in the
resulting template context.
:param _direct: Flag indicating that ``self`` should be returned, rather
than ``self.response``. Bool, defaults to ``False`` as this
technically breaks WSGI.
:param kwargs: Additional items to add to the template context.
Overrides all other sources of context.
:returns: Rendered template content, added to ``self.response``. """
from canteen.util import config
# set mime type
if content_type: self.response.mimetype = content_type
# collapse and merge HTTP headers (base headers first)
self.response.headers.extend(itertools.chain(
iter(self.http.base_headers),
self.config.get('http', {}).get('headers', {}).iteritems(),
self.headers.iteritems(),
(headers or {}).iteritems()))
# merge template context
_merged_context = dict(itertools.chain(*(i.iteritems() for i in (
self.template.base_context,
self.template_context,
context or {},
kwargs))))
# render template and set as response data
self.response.response, self.response.direct_passthrough = (
self.template.render(
self,
getattr(self.runtime, 'config', None) or config.Config(),
template,
_merged_context)), True
return self.respond(direct=_direct)
def dispatch(self, **url_args):
""" Dispatch a WSGI request through this ``Handler``. Expected to be an
HTTP-style (cyclical) dispatch flow.
:param url_args: Arguments provided from the URI that should be passed
along to any resulting handler calls.
:returns: After filling the local response object (at ``self.response``)
returns it for inspection or reply. """
self.__response__ = (
getattr(self, self.request.method)(**url_args)) or self.__response__
return self.__response__
def __call__(self, url_args, direct=False):
""" Kick off the local response dispatch process, and run any necessary
pre/post hooks (named ``prepare`` and ``destroy``, respectively).
:param url_args: Arguments parsed from URL according to matched route.
``dict`` of ``{param: value}`` pairs.
:param direct: Flag to indicate 'direct' mode, whereby a handler is
returned instead of a response. Bool, defaults to ``False``, as this
technically breaks WSGI.
:returns: ``self.response`` if ``direct`` mode is not active, otherwise
``self`` for chainability. """
# run prepare hook, if specified
if hasattr(self, 'prepare'): self.prepare(url_args, direct=direct)
self.dispatch(**url_args) # dispatch local handler, fills `__response__`
# run destroy hook, if specified
if hasattr(self, 'destroy'): self.destroy(self.__response__)
return self.__response__ if not direct else self
# noinspection PyUnresolvedReferences
class RealtimeHandler(Handler):
""" Provides structure for an acyclically-dispatched web handler, meant for
use in scenarios like WebSockets. Instead of handling things with
methods like ``GET`` or ``POST``, a ``RealtimeHandler`` can specify
hooks for two events - ``on_connect`` and ``on_message``.
The first, ``on_connect``, is dispatched when a realtime connection has
just been successfully negotiated. It is executed once the application
is ready to return an ``HTTP/1.1 Upgrade`` response, so that the
developer has a chance to specify subprotocols/extensions/etc.
The second hook, ``on_message``, is dispatched each time an established
connection receives a message from the client. It takes two parameters -
the ``message`` itself and whether it is ``binary`` or not. """
__socket__ = None # space for currently-active realtime socket
def dispatch(self, **url_args): # pragma: no cover
""" Adapt regular handler dispatch to support an acyclic/realtime-style
dispatch scheme. Accepts same arguments as ``super`` definition, but
dispatches *realtime*-style messages like ``on_connect`` and
``on_message``, so long as the request looks like a WebSocket upgrade.
:param url_args: Arguments provided from the URI that should be passed
along to any resulting handler calls.
:returns: After filling the local response object (at ``self.response``)
returns it for inspection or reply. """
# fallback to standard dispatch
if self.realtime.hint not in self.environ:
return super(RealtimeHandler, self).dispatch(**url_args)
try:
# websocket upgrade and session
self.__socket__ = self.realtime.on_connect(self)
self.realtime.on_message(self, self.__socket__)
except NotImplementedError:
return self.error(400) # raised when a non-websocket handler is hit
@staticmethod
def terminate(graceful=True): # pragma: no cover
""" Terminate the currently-active ``RealtimeSocket`` communication
channel.
:param graceful: ``bool`` parameter, whether to end the connection
gracefully or not.
:returns: ``TERMINATE`` sentinel, to be yielded so the connection can be
terminated. """
from canteen.logic import realtime
if graceful: return realtime.TERMINATE
raise realtime.TerminateSocket(graceful=False)
@staticmethod
def on_connect(): # pragma: no cover
""" Hook function that is dispatched upon successful handshake for a
realtime-style connection between a client and this server. Local
handler should be prepared by this point with all information necessary
to satisfy messages.
Implementors are expected to provide a method that makes use of object-
level context (i.e. not a static or classmethod).
:returns: ``NotImplemented`` by default, which simply indicates that
the implementor elects not to run code ``on_connect``. """
return NotImplemented
def on_message(self, message, binary): # pragma: no cover
""" Hook that is dispatched per message sent from a live client. Called
subsequent to a connection being properly established from a previous
call to ``on_connect``.
:param message: WebSocket message passed from the client.
:param binary: ``bool`` flag - ``True`` if ``message`` is binary,
``False`` otherwise.
:raises NotImplementedError: By default, since not many people use
WebSockets and there's no such thing as a ``400`` without HTTP. :)
:returns: Not expected to return anything. If a return is used, any
value or iterable of values will be collapsed and sent to the client.
Optionally, the developer may implement ``on_message`` as a coroutine-
style Python generator, in which case new messages will be ``sent``
in from the client and messages to the client can be yielded upwards
to be sent. """
raise NotImplementedError('Handler "%s" fails to implement hook'
' `on_message` so it does not support'
' realtime-style communications.' % repr(self))
# noinspection PyUnusedLocal
@staticmethod
def on_close(graceful): # pragma: no cover
""" Hook function that is dispatched upon closure of an existing realtime
communications session.
:param graceful: ``bool`` parameter indicating whether the connection
was closed gracefully (i.e. electively) or because of some error
condition.
:returns: ``NotImplemented`` by default, which simply indicates that
the implementor elects not to run code ``on_connect``. """
return NotImplemented
__all__ = ('Handler',)
| momentum/canteen | canteen/base/handler.py | Python | mit | 17,022 | 0.004935 |
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
xsd='''<?xml version="1.0" encoding="utf-8"?>
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:element name="root" type="xsd:string"/>
</xsd:schema>'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#open('binding0116.py', 'w').write(code)
rv = compile(code, 'test', 'exec')
eval(rv)
import unittest
class TestTrac0116 (unittest.TestCase):
xmls = '''<?xml version="1.0" encoding="utf-8"?><root foo='boo'/>'''
def testBasic (self):
self.assertRaises(pyxb.AttributeOnSimpleTypeError, CreateFromDocument, self.xmls)
if __name__ == '__main__':
unittest.main()
| CantemoInternal/pyxb | tests/trac/test-trac-0116.py | Python | apache-2.0 | 834 | 0.013189 |
#!/usr/bin/env python
from distutils.core import setup
version='1.0.1'
setup(
name='funny-codes',
version=version,
author='Mikhail Korobov',
author_email='kmike84@gmail.com',
packages=['funny_codes'],
url='https://bitbucket.org/kmike/funny-codes/',
license = 'MIT license',
description = "Generate randoms strings of a given pattern",
long_description = open('README.rst').read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| kmike/funny-codes | setup.py | Python | mit | 712 | 0.009831 |
import json
from google.appengine.api import users
from raceways.handler import BaseHandler, using_template
from stravalib import unithelper
class HomepageHandler(BaseHandler):
@using_template('test.html')
def get(self):
# basic user login
strava_auth_uri = None
if self.user:
# store strava auth in session (eventually want to store this
# along side the user!)
strava_credentials = self.strava_storage.get()
if not strava_credentials:
strava_auth_uri = self.strava_flow.step1_get_authorize_url()
else:
strava_credentials.authorize(self.http)
else:
strava_credentials = None
template = self.get_template('test.html')
strava_credentials_json = {}
if strava_credentials:
strava_credentials_json = json.loads(strava_credentials.to_json())
print json.dumps(strava_credentials_json, indent=4)
else:
athlete = None
activities = []
stream = []
print help(self.user)
print "User has: %s" % dir(self.user)
template_values = {
'strava_credentials': strava_credentials_json,
'strava_login_url': strava_auth_uri,
'logout_url': '/logout',
'login_url': '/login',
'user': self.user,
}
return template_values
| alecf/strava-raceways | raceways/handlers/homepage.py | Python | mit | 1,439 | 0.000695 |
import autocomplete_light
from association.models import Word
from django.utils.translation import ugettext as _
class WordAutocomplete(autocomplete_light.AutocompleteModelBase):
search_fields=['name']
choice_html_format = '<span class="block os" data-value="%s">%s (%s)</span>'
attrs={
'placeholder': _('Filter'),
'data-autocomplete-minimum-characters': 1,
}
widget_attrs={
'data-widget-maximum-values': 6,
'class': 'modern-style',
}
def choice_html(self, choice):
return self.choice_html_format % (self.choice_value(choice), self.choice_label(choice), choice.language)
autocomplete_light.register(Word, WordAutocomplete)
| Tima-Is-My-Association/TIMA | association/autocomplete_light_registry.py | Python | lgpl-3.0 | 694 | 0.010086 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for wrapped_credentials.py."""
import datetime
import json
import httplib2
from google.auth import credentials
from google.auth import external_account
from google.auth import identity_pool
from gslib.tests import testcase
from gslib.utils.wrapped_credentials import WrappedCredentials
import oauth2client
from six import add_move, MovedModule
add_move(MovedModule("mock", "mock", "unittest.mock"))
from six.moves import mock
ACCESS_TOKEN = "foo"
CONTENT = "content"
RESPONSE = httplib2.Response({
"content-type": "text/plain",
"status": "200",
"content-length": len(CONTENT),
})
class MockCredentials(external_account.Credentials):
def __init__(self, token=None, expiry=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._audience = None
self.expiry = expiry
self.token = None
def side_effect(*args, **kwargs):
self.token = token
self.refresh = mock.Mock(side_effect=side_effect)
def retrieve_subject_token():
pass
class HeadersWithAuth(dict):
"""A utility class to use to make sure a set of headers includes specific authentication"""
def __init__(self, token):
self.token = token or ""
def __eq__(self, headers):
return headers[b"Authorization"] == bytes("Bearer " + self.token, "utf-8")
class TestWrappedCredentials(testcase.GsUtilUnitTestCase):
"""Test logic for interacting with Wrapped Credentials the way we intend to use them."""
@mock.patch.object(httplib2, "Http", autospec=True)
def testWrappedCredentialUsage(self, http):
http.return_value.request.return_value = (RESPONSE, CONTENT)
req = http.return_value.request
creds = WrappedCredentials(
MockCredentials(token=ACCESS_TOKEN,
audience="foo",
subject_token_type="bar",
token_url="baz",
credential_source="qux"))
http = oauth2client.transport.get_http_object()
creds.authorize(http)
response, content = http.request(uri="www.google.com")
self.assertEquals(content, CONTENT)
creds._base.refresh.assert_called_once_with(mock.ANY)
# Make sure the default request gets called with the correct token.
req.assert_called_once_with("www.google.com",
method="GET",
headers=HeadersWithAuth(ACCESS_TOKEN),
body=None,
connection_type=mock.ANY,
redirections=mock.ANY)
def testWrappedCredentialSerialization(self):
"""Test logic for converting Wrapped Credentials to and from JSON for serialization."""
creds = WrappedCredentials(
identity_pool.Credentials(audience="foo",
subject_token_type="bar",
token_url="baz",
credential_source={"url": "www.google.com"}))
creds.access_token = ACCESS_TOKEN
creds.token_expiry = datetime.datetime(2001, 12, 5, 0, 0)
creds_json = creds.to_json()
json_values = json.loads(creds_json)
self.assertEquals(json_values["client_id"], "foo")
self.assertEquals(json_values['access_token'], ACCESS_TOKEN)
self.assertEquals(json_values['token_expiry'], "2001-12-05T00:00:00Z")
self.assertEquals(json_values["_base"]["audience"], "foo")
self.assertEquals(json_values["_base"]["subject_token_type"], "bar")
self.assertEquals(json_values["_base"]["token_url"], "baz")
self.assertEquals(json_values["_base"]["credential_source"]["url"],
"www.google.com")
creds2 = WrappedCredentials.from_json(creds_json)
self.assertIsInstance(creds2, WrappedCredentials)
self.assertIsInstance(creds2._base, identity_pool.Credentials)
self.assertEquals(creds2.client_id, "foo")
self.assertEquals(creds2.access_token, ACCESS_TOKEN)
self.assertEquals(creds2.token_expiry, creds.token_expiry)
| GoogleCloudPlatform/gsutil | gslib/tests/test_wrapped_credentials.py | Python | apache-2.0 | 4,593 | 0.003048 |
n = int(raw_input('Ingrese n: '))
es_primo = True
d = 2
while d < n:
if n % d == 0:
es_primo = False
d = d + 1
if es_primo:
print(n, 'es primo')
else:
print(n, 'es compuesto')
| sebastiandres/iwi131 | ipynb/06-Funciones/iwi131_code/es_primo_v1.py | Python | cc0-1.0 | 200 | 0 |
"""Test zha sensor."""
from unittest import mock
import pytest
import zigpy.zcl.clusters.general as general
import zigpy.zcl.clusters.homeautomation as homeautomation
import zigpy.zcl.clusters.measurement as measurement
import zigpy.zcl.clusters.smartenergy as smartenergy
from homeassistant.components.sensor import DOMAIN
import homeassistant.config as config_util
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_UNIT_SYSTEM,
CONF_UNIT_SYSTEM_IMPERIAL,
CONF_UNIT_SYSTEM_METRIC,
POWER_WATT,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
UNIT_PERCENTAGE,
)
from homeassistant.helpers import restore_state
from homeassistant.util import dt as dt_util
from .common import (
async_enable_traffic,
async_test_rejoin,
find_entity_id,
send_attribute_report,
send_attributes_report,
)
async def async_test_humidity(hass, cluster, entity_id):
"""Test humidity sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 1000, 2: 100})
assert_state(hass, entity_id, "10.0", UNIT_PERCENTAGE)
async def async_test_temperature(hass, cluster, entity_id):
"""Test temperature sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 2900, 2: 100})
assert_state(hass, entity_id, "29.0", TEMP_CELSIUS)
async def async_test_pressure(hass, cluster, entity_id):
"""Test pressure sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 1000, 2: 10000})
assert_state(hass, entity_id, "1000", "hPa")
await send_attributes_report(hass, cluster, {0: 1000, 20: -1, 16: 10000})
assert_state(hass, entity_id, "1000", "hPa")
async def async_test_illuminance(hass, cluster, entity_id):
"""Test illuminance sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 10, 2: 20})
assert_state(hass, entity_id, "1.0", "lx")
async def async_test_metering(hass, cluster, entity_id):
"""Test metering sensor."""
await send_attributes_report(hass, cluster, {1025: 1, 1024: 12345, 1026: 100})
assert_state(hass, entity_id, "12345.0", "unknown")
async def async_test_electrical_measurement(hass, cluster, entity_id):
"""Test electrical measurement sensor."""
with mock.patch(
(
"homeassistant.components.zha.core.channels.homeautomation"
".ElectricalMeasurementChannel.divisor"
),
new_callable=mock.PropertyMock,
) as divisor_mock:
divisor_mock.return_value = 1
await send_attributes_report(hass, cluster, {0: 1, 1291: 100, 10: 1000})
assert_state(hass, entity_id, "100", POWER_WATT)
await send_attributes_report(hass, cluster, {0: 1, 1291: 99, 10: 1000})
assert_state(hass, entity_id, "99", POWER_WATT)
divisor_mock.return_value = 10
await send_attributes_report(hass, cluster, {0: 1, 1291: 1000, 10: 5000})
assert_state(hass, entity_id, "100", POWER_WATT)
await send_attributes_report(hass, cluster, {0: 1, 1291: 99, 10: 5000})
assert_state(hass, entity_id, "9.9", POWER_WATT)
@pytest.mark.parametrize(
"cluster_id, test_func, report_count",
(
(measurement.RelativeHumidity.cluster_id, async_test_humidity, 1),
(measurement.TemperatureMeasurement.cluster_id, async_test_temperature, 1),
(measurement.PressureMeasurement.cluster_id, async_test_pressure, 1),
(measurement.IlluminanceMeasurement.cluster_id, async_test_illuminance, 1),
(smartenergy.Metering.cluster_id, async_test_metering, 1),
(
homeautomation.ElectricalMeasurement.cluster_id,
async_test_electrical_measurement,
1,
),
),
)
async def test_sensor(
hass,
zigpy_device_mock,
zha_device_joined_restored,
cluster_id,
test_func,
report_count,
):
"""Test zha sensor platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [cluster_id, general.Basic.cluster_id],
"out_cluster": [],
"device_type": 0x0000,
}
}
)
cluster = zigpy_device.endpoints[1].in_clusters[cluster_id]
zha_device = await zha_device_joined_restored(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
await async_enable_traffic(hass, [zha_device], enabled=False)
await hass.async_block_till_done()
# ensure the sensor entity was created
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and devices
await async_enable_traffic(hass, [zha_device])
# test that the sensor now have a state of unknown
assert hass.states.get(entity_id).state == STATE_UNKNOWN
# test sensor associated logic
await test_func(hass, cluster, entity_id)
# test rejoin
await async_test_rejoin(hass, zigpy_device, [cluster], (report_count,))
def assert_state(hass, entity_id, state, unit_of_measurement):
"""Check that the state is what is expected.
This is used to ensure that the logic in each sensor class handled the
attribute report it received correctly.
"""
hass_state = hass.states.get(entity_id)
assert hass_state.state == state
assert hass_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == unit_of_measurement
@pytest.fixture
def hass_ms(hass):
"""Hass instance with measurement system."""
async def _hass_ms(meas_sys):
await config_util.async_process_ha_core_config(
hass, {CONF_UNIT_SYSTEM: meas_sys}
)
await hass.async_block_till_done()
return hass
return _hass_ms
@pytest.fixture
def core_rs(hass_storage):
"""Core.restore_state fixture."""
def _storage(entity_id, uom, state):
now = dt_util.utcnow().isoformat()
hass_storage[restore_state.STORAGE_KEY] = {
"version": restore_state.STORAGE_VERSION,
"key": restore_state.STORAGE_KEY,
"data": [
{
"state": {
"entity_id": entity_id,
"state": str(state),
"attributes": {ATTR_UNIT_OF_MEASUREMENT: uom},
"last_changed": now,
"last_updated": now,
"context": {
"id": "3c2243ff5f30447eb12e7348cfd5b8ff",
"user_id": None,
},
},
"last_seen": now,
}
],
}
return
return _storage
@pytest.mark.parametrize(
"uom, raw_temp, expected, restore",
[
(TEMP_CELSIUS, 2900, 29, False),
(TEMP_CELSIUS, 2900, 29, True),
(TEMP_FAHRENHEIT, 2900, 84, False),
(TEMP_FAHRENHEIT, 2900, 84, True),
],
)
async def test_temp_uom(
uom,
raw_temp,
expected,
restore,
hass_ms,
core_rs,
zigpy_device_mock,
zha_device_restored,
):
"""Test zha temperature sensor unit of measurement."""
entity_id = "sensor.fake1026_fakemodel1026_004f3202_temperature"
if restore:
core_rs(entity_id, uom, state=(expected - 2))
hass = await hass_ms(
CONF_UNIT_SYSTEM_METRIC if uom == TEMP_CELSIUS else CONF_UNIT_SYSTEM_IMPERIAL
)
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
measurement.TemperatureMeasurement.cluster_id,
general.Basic.cluster_id,
],
"out_cluster": [],
"device_type": 0x0000,
}
}
)
cluster = zigpy_device.endpoints[1].temperature
zha_device = await zha_device_restored(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
if not restore:
await async_enable_traffic(hass, [zha_device], enabled=False)
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and devices
await async_enable_traffic(hass, [zha_device])
# test that the sensors now have a state of unknown
if not restore:
assert hass.states.get(entity_id).state == STATE_UNKNOWN
await send_attribute_report(hass, cluster, 0, raw_temp)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert round(float(state.state)) == expected
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == uom
async def test_electrical_measurement_init(
hass, zigpy_device_mock, zha_device_joined,
):
"""Test proper initialization of the electrical measurement cluster."""
cluster_id = homeautomation.ElectricalMeasurement.cluster_id
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [cluster_id, general.Basic.cluster_id],
"out_cluster": [],
"device_type": 0x0000,
}
}
)
cluster = zigpy_device.endpoints[1].in_clusters[cluster_id]
zha_device = await zha_device_joined(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
# allow traffic to flow through the gateway and devices
await async_enable_traffic(hass, [zha_device])
# test that the sensor now have a state of unknown
assert hass.states.get(entity_id).state == STATE_UNKNOWN
await send_attributes_report(hass, cluster, {0: 1, 1291: 100, 10: 1000})
assert int(hass.states.get(entity_id).state) == 100
channel = zha_device.channels.pools[0].all_channels["1:0x0b04"]
assert channel.divisor == 1
assert channel.multiplier == 1
# update power divisor
await send_attributes_report(hass, cluster, {0: 1, 1291: 20, 0x0403: 5, 10: 1000})
assert channel.divisor == 5
assert channel.multiplier == 1
assert hass.states.get(entity_id).state == "4.0"
await send_attributes_report(hass, cluster, {0: 1, 1291: 30, 0x0605: 10, 10: 1000})
assert channel.divisor == 10
assert channel.multiplier == 1
assert hass.states.get(entity_id).state == "3.0"
# update power multiplier
await send_attributes_report(hass, cluster, {0: 1, 1291: 20, 0x0402: 6, 10: 1000})
assert channel.divisor == 10
assert channel.multiplier == 6
assert hass.states.get(entity_id).state == "12.0"
await send_attributes_report(hass, cluster, {0: 1, 1291: 30, 0x0604: 20, 10: 1000})
assert channel.divisor == 10
assert channel.multiplier == 20
assert hass.states.get(entity_id).state == "60.0"
| titilambert/home-assistant | tests/components/zha/test_sensor.py | Python | apache-2.0 | 10,606 | 0.001037 |
from settings import *
import sys
from os.path import abspath, dirname, join
MEDIA_ROOT = '/home/xiaoye/workspace/douquan/site_media'
ADMIN_MEDIA_ROOT = '/home/xiaoye/workspace/douquan/admin_media/'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/home/xiaoye/workspace/douquan/templates',
)
DATABASE_ENGINE = 'mysql' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'douquan' # Or path to database file if using sqlite3.
DATABASE_USER = 'douquan' # Not used with sqlite3.
DATABASE_PASSWORD = 'douquan' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = ''
PROJECT_ROOT = '/home/xiaoye/workspace/douquan'
sys.path.insert(0, join(PROJECT_ROOT, "libs/"))
#DEBUG = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
#TEMPLATE_DEBUG = False
| masiqi/douquan | xiaoye.py | Python | mit | 1,068 | 0.005618 |
h1hoft = TimeSeries.fetch_open_data('H1', 'Sep 14 2015 09:50:29', 'Sep 14 2015 09:51:01')
ax = plot.gca()
ax.plot(h1hoft)
plot.refresh()
| gwpy/gwpy.github.io | docs/v0.5/timeseries/plot-4.py | Python | gpl-3.0 | 137 | 0.007299 |
class Solution:
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
"""
arr_pre_order = preorder.split(',')
stack = []
for node in arr_pre_order:
stack.append(node)
while len(stack) > 1 and stack[-1] == '#' and stack[-2] == '#':
stack.pop()
stack.pop()
if len(stack) < 1:
return False
stack[-1] = '#'
if len(stack) == 1 and stack[0] == '#':
return True
return False
| MingfeiPan/leetcode | stack/331.py | Python | apache-2.0 | 615 | 0.003252 |
"""
Django settings for SciMs project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%pc)w23=6r(!))ca)to%o1g^cb-f5h)nna-9%c%1zks!cgpu)3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'scims.apps.ScimsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'scims.templatetags.scims_extras',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'SciMs.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'SciMs.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
TIME_ZONE = 'Europe/Paris'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
'/var/www/static/',
]
LOGIN_REDIRECT_URL = '/' | BdTNLM/SciMS | SciMs/settings.py | Python | mit | 3,325 | 0.001805 |
# Simulate data
if 0:
read("d.nex")
d = Data()
if 1:
nTax = 5
taxNames = list(string.uppercase[:nTax])
a = func.newEmptyAlignment(dataType='dna', taxNames=taxNames, length=200)
d = Data([a])
if 0:
read("t.nex")
t = var.trees[0]
#t.taxNames = taxNames
if 0:
read('(B:0.5, ((D:0.4, A:0.3), C:0.5), E:0.5);')
t = var.trees[0]
t.taxNames = taxNames
if 1:
t = func.randomTree(taxNames=taxNames)
t.data = d
t.newComp(free=0, spec='specified', val=[0.1, 0.2, 0.3])
t.newRMatrix(free=0, spec='specified', val=[2., 3., 4., 5., 6., 7.])
t.setNGammaCat(nGammaCat=4)
t.newGdasrv(free=0, val=0.5)
t.setPInvar(free=0, val=0.2)
func.reseedCRandomizer(os.getpid())
t.simulate()
if 1:
d.writeNexus('d.nex', writeDataBlock=True)
if 0:
d.alignments[0].writePhylip("d.phy")
| blaiseli/p4-phylogenetics | share/Examples/W_recipes/sSim.py | Python | gpl-2.0 | 819 | 0.002442 |
"""
WSGI config for photoboard project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "photoboard.settings")
application = get_wsgi_application()
| photoboard/photoboard-django | photoboard/wsgi.py | Python | mit | 398 | 0 |
#!/usr/bin/env python
from binary_tools.constants import *
from binary_tools.binary import kicks
from binary_tools.binary.orbits import *
import matplotlib.pyplot as plt
from scipy.stats import maxwell
from scipy.integrate import quad
import random as rd
import numpy as np
__author__ = "Kaliroe Pappas"
__credits__ = ["Kaliroe Pappas"]
__license__ = "GPL"
__version__ = "3.0"
__maintainer__ = "Kaliroe Pappas"
__email__ = "kmpappa2@illinois.edu"
"""Test the functions in the kicks.py module
"""
def test_rand_phi(num_sample=10000, nbins=20, tolerance = 1e-3, seed="Jean", plot=False, save=True):
"""Test that phi is sampled as flat from zero to pi
Arguments:
- num_sample: number of random phi generated
- nbins: random sampled numbers will be binned to compute
probabilities and compare to expected values. This variable
specifies the number of bins used.
- tolerance: tolerance for the test
- seed: the seed used for the random number generator
- plot: if true, plot results
- save: if true, saves the plot
Returns: True if the test is succesful, False otherwise
"""
rd.seed(seed)
phi_array = np.zeros(num_sample)
for i in range(0,len(phi_array)):
phi_array[i] = kicks.rand_phi()
#do a histogram
#TODO: use numpy histogram, to avoid plotting if its not neccesary
vals_phi, bins_phi = np.histogram(phi_array, bins=np.linspace(0,2*np.pi,nbins))
prob_test = np.zeros(len(vals_phi))
for k in range(0,len(vals_phi)):
prob_test[k] = (1/(2*np.pi))*(bins_phi[k+1]-bins_phi[k])
test_array = []
for j in range(0,len(prob_test)):
test_array = np.append(test_array, np.ones(int(round(prob_test[j]*num_sample)))*bins_phi[j])
if plot:
plt.hist(phi_array, bins=np.linspace(0,2*np.pi,nbins), alpha = 0.5, label = "function output")
plt.hist(test_array, bins=np.linspace(0,2*np.pi,nbins), alpha = 0.5, label = "expected value")
plt.title("phi distribution")
plt.xlabel("phi value")
plt.ylabel("distribution")
plt.legend(loc='upper right')
plt.show()
plt.close()
if save:
plt.hist(phi_array, bins=np.linspace(0,2*np.pi,nbins), alpha = 0.5, label = "function output")
plt.hist(test_array, bins=np.linspace(0,2*np.pi,nbins), alpha = 0.5, label = "expected value")
plt.title("phi distribution")
plt.xlabel("phi value")
plt.ylabel("distribution")
plt.legend(loc='upper right')
plt.savefig("phi_distribution.png")
plt.close()
#check if the probability computed for each bin is within the tolerance
success = True
tolerance = max(vals_phi)*tolerance
for k in range(0,len(vals_phi)):
prob_hist = vals_phi[k]/(sum(vals_phi))
if abs(prob_test[k]-prob_hist)>tolerance:
success = False
break
#re-seed the random number generator
rd.seed()
return success
def test_rand_theta(num_sample=10000, nbins=20, tolerance = 1e-3, seed="Jubilee", plot=False, save=True):
"""Test that theta is sampled as a sign graph from zero to pi
Arguments:
- num_sample: number of random theta generated
- nbins: random sampled numbers will be binned to compute
probabilities and compare to expected values. This variable
specifies the number of bins used.
- tolerance: tolerance for the test
- seed: the seed used for the random number generator
- plot: if true, plot results
- save: if true, saves the plot
Returns: True if the test is succesful, False otherwise
"""
rd.seed(seed)
theta_array = np.zeros(num_sample)
for i in range(0,len(theta_array)):
theta_array[i] = kicks.rand_theta()
#do a histogram
#TODO: use numpy histogram, to avoid plotting if its not neccesary
vals_theta, bins_theta = np.histogram(theta_array, bins=np.linspace(0,np.pi,nbins))
prob_test = np.zeros(len(vals_theta))
for k in range(0,len(vals_theta)):
prob_test[k] = -(np.cos(bins_theta[k+1])-np.cos(bins_theta[k]))/2
test_array = []
for j in range(0,len(prob_test)):
test_array = np.append(test_array, np.ones(int(round(prob_test[j]*num_sample)))*bins_theta[j])
if plot:
plt.hist(theta_array, bins=np.linspace(0,np.pi,nbins), alpha = 0.5, label = "function output")
plt.hist(test_array, bins=np.linspace(0,np.pi,nbins), alpha = 0.5, label = "expected value")
plt.title("theta distribution")
plt.xlabel("theta value")
plt.ylabel("distribution")
plt.legend(loc='upper right')
plt.show()
plt.close()
if save:
plt.hist(theta_array, bins=np.linspace(0,np.pi,nbins), alpha = 0.5, label = "function output")
plt.hist(test_array, bins=np.linspace(0,np.pi,nbins), alpha = 0.5, label = "expected value")
plt.title("theta distribution")
plt.xlabel("theta value")
plt.ylabel("distribution")
plt.legend(loc='upper right')
plt.savefig("theta_distribution.png")
plt.close()
#check if the probability computed for each bin is within the tolerance
success = True
tolerance = max(vals_theta)*tolerance
for k in range(0,len(vals_theta)):
prob_hist = vals_theta[k]/(sum(vals_theta))
if abs(prob_test[k]-prob_hist)>tolerance:
success = False
break
#re-seed the random number generator
rd.seed()
return success
def test_rand_velocity(sigma, num_sample=10000, nbins=20, tolerance=1e-3, seed="Dimitris", plot=False, save=True):
"""Test that the velocity output is sampled as a maxwellian
Arguments:
- sigma: argument needed to run rand_velocity
- num_sample: number of random theta generated
- nbins: random sampled numbers will be binned to compute
probabilities and compare to expected values. This variable
specifies the number of bins used.
- tolerance: tolerance for the test
- seed: the seed used for the random number generator
- plot: if true, plot results
- save: if true, saves the plot
Returns: True if the test is succesful, False otherwise
"""
rd.seed(seed)
velocity_array = np.zeros(num_sample)
for k in range(0,len(velocity_array)):
velocity_array[k] = kicks.rand_velocity(sigma)
#do a histogram
vals_velocity, bins_velocity = np.histogram(velocity_array, bins=np.linspace(0,3*sigma,nbins))
prob_test = np.zeros(len(vals_velocity))
for k in range(0,len(vals_velocity)):
prob_test[k] = maxwell.cdf(bins_velocity[k+1],0,sigma) - maxwell.cdf(bins_velocity[k],0,sigma)
test_array = []
for j in range(0,len(prob_test)):
test_array = np.append(test_array, np.ones(int(round(prob_test[j]*num_sample)))*bins_velocity[j])
if plot:
plt.hist(velocity_array, bins=np.linspace(0,3*sigma,nbins), alpha = 0.5, label = "function output")
plt.hist(test_array, bins=np.linspace(0,3*sigma,nbins), alpha = 0.5, label = "expected value")
plt.title("velocity distribution")
plt.xlabel("velocity value")
plt.ylabel("distribution")
plt.legend(loc='upper right')
plt.show()
plt.close()
if save:
plt.hist(velocity_array, bins=np.linspace(0,3*sigma,nbins), alpha = 0.5, label = "function output")
plt.hist(test_array, bins=np.linspace(0,3*sigma,nbins), alpha = 0.5, label = "expected value")
plt.title("velocity distribution")
plt.xlabel("velocity value")
plt.ylabel("distribution")
plt.legend(loc='upper right')
plt.savefig("velocity_distribution.png")
plt.close()
#check if the probability computed from each bin is within the tolerance
success = True
tolerance = max(vals_velocity)*tolerance
for k in range(0,len(vals_velocity)):
prob_hist = vals_velocity[k]/(sum(vals_velocity))
if abs(prob_test[k]-prob_hist)>tolerance:
success = False
break
#re-seed the random number generator
rd.seed()
return success
def test_rand_true_anomaly(e,num_sample=10000, nbins=20, tolerance = 1e-3, seed="Rhysand", plot=False, save=True):
"""Test that phi is sampled as flat from zero to pi
Arguments:
- e: eccentricity of the orbit
- num_sample: number of random phi generated
- nbins: random sampled numbers will be binned to compute
probabilities and compare to expected values. This variable
specifies the number of bins used.
- tolerance: tolerance for the test
- seed: the seed used for the random number generator
- plot: if true, plot results
- save: if true, saves the plot
Returns: True if the test is succesful, False otherwise
"""
rd.seed(seed)
true_anomaly_array = np.zeros(num_sample)
for i in range(0,len(true_anomaly_array)):
true_anomaly_array[i] = kicks.rand_true_anomaly(e)
#do a histogram
#TODO: use numpy histogram, to avoid plotting if its not neccesary
vals_true_anomaly, bins_true_anomaly = np.histogram(true_anomaly_array, bins=np.linspace(0,2*np.pi,nbins))
prob_test = np.zeros(len(vals_true_anomaly))
def func(x):
return np.sqrt(1/(4*np.pi))*(1-e**2)**(1.5)/((1+e*np.cos(x))**2)
normalize = quad(func,bins_true_anomaly[0],bins_true_anomaly[nbins-1])[0]
for k in range(0,len(vals_true_anomaly)):
prob_test[k] = quad(func,bins_true_anomaly[k],bins_true_anomaly[k+1])[0]
prob_test = prob_test/normalize
test_array = []
for j in range(0,len(prob_test)):
test_array = np.append(test_array, np.ones(int(round(prob_test[j]*num_sample)))*bins_true_anomaly[j])
if plot:
plt.hist(true_anomaly_array, bins=np.linspace(0,2*np.pi,nbins), alpha = 0.5, label = "function output")
plt.hist(test_array, bins=np.linspace(0,2*np.pi,nbins), alpha = 0.5, label = "expected value")
plt.title("true anomaly distribution")
plt.xlabel("true anomaly value")
plt.ylabel("distribution")
plt.legend(loc='upper right')
plt.show()
plt.close()
if save:
plt.hist(true_anomaly_array, bins=np.linspace(0,2*np.pi,nbins), alpha = 0.5, label = "function output")
plt.hist(test_array, bins=np.linspace(0,2*np.pi,nbins), alpha = 0.5, label = "expected value")
plt.title("true anomaly distribution")
plt.xlabel("true anomaly value")
plt.ylabel("distribution")
plt.legend(loc='upper right')
plt.savefig("true_anomaly_distribution.png")
plt.close()
#check if the probability computed for each bin is within the tolerance
success = True
tolerance = max(vals_true_anomaly)*tolerance
for k in range(0,len(vals_true_anomaly)):
prob_hist = vals_true_anomaly[k]/(sum(vals_true_anomaly))
if abs(prob_test[k]-prob_hist)>tolerance:
success = False
break
#re-seed the random number generator
rd.seed()
return success
def testing_circular_function_momentum(ai=133, m1=5.5, m2=55, m1f=1.4, test_sigma=100, num_sample=1000, seed = "Lela", tolerance=1e-3):
"""Test that the post_explosion_params_circular function produces
a correct angular momentum against a calculated angular momentum. This
angular momentum is calculated by first finding the anglar velocity, and
the individual relative velocities in the center of mass frame pre-
explosion. The components of the kick velocity were then added to the
velocity of mass 1, which is the super nova mass. The separation in the
final center of mass frame accounting for the loss of mass of mass 1 is
crossed with the velocities to get the components of angular momentum. The
total angular momentum is calculated by taking the absolute value of these
components.
Arguments:
- ai: the initial semi-major axis of the system
pre-explosion in Rsun
- m1: mass of the pre-explosion star in Msun
- m2: smass of the companion in Msun
- m1f: post explosion mass of the exploding star in Msun
- test_sigma: a sample sigma for the rand_velocity function in km/s
- num_sample: number of points sampled
- seed: the seed used for the random number generator
- tolerance: tolerance for the test
Returns: True or False as to whether the test was successful
"""
rd.seed(seed)
for i in range(num_sample):
#establishing random parameters
Vk = kicks.rand_velocity(test_sigma)*1e5
theta = kicks.rand_theta()
phi = kicks.rand_phi()
#getting values from the post_explosion_params_circular function
semi_major, e, angle, boolean = kicks.post_explosion_params_circular(ai, m1, m2, m1f,theta,phi,Vk)
#calculating the momentum using the results from the function
Momentum_function = orbital_angular_momentum(semi_major,m1f,m2,e)
#Calculating the momentum without using the results of the function
#establishing angular velocity
omega = np.sqrt(cgrav*Msun*(m1+m2)/(ai*Rsun)**3) #rad/second
#velocities of the masses before the kick
V1_initial = m2/(m1+m2)*omega*ai*Rsun #cm/second
V2 = m1/(m1+m2)*omega*ai*Rsun #cm/second,-y direction
#velocities after the kick, V2 is unaffected by the kick
V1x_final = Vk*1e5*np.sin(phi)*np.cos(theta)
V1y_final = V1_initial + Vk*1e5*np.cos(theta)
V1z_final = Vk*1e5*np.sin(phi)*np.sin(theta)
#separations from the center of mass in the center of mass frame post-explosion
R2 = ai*Rsun*m1f/(m1f+m2) #cm
R1 = ai*Rsun - R2 #cm
#calculating the components of the angular momentum using the cross product
Momentum_1y = -R1*V1z_final*m1f*Msun
Momentum_1z = R1*V1y_final*m1f*Msun
Momentum_2 = R2*V2*m2*Msun #z direction
#absolute value of the components of the angular momentum
Momentum_calculated = np.sqrt(Momentum_1y**2 + Momentum_1z**2 + Momentum_2**2)
#checking that the two momentums are relatively equal
if abs(Momentum_function - Momentum_calculated)/Momentum_function>tolerance:
return False
rd.seed()
return True
def testing_circular_function_graph(test_sigma = 100, test_m1 = 5.5, test_m2 = 55, test_ai = 133, test_m1f = 1.4, seed="Flay",sample_velocity = 100, npoints =10000, plot=False, save =True):
"""Test that the graph of the eccentricity vs the period looks correct
Arguments:
- test_sigma: a sample sigma for the rand_velocity function in km/s
- test_m1: mass of the pre-explosion star in Msun
- test_m2: smass of the companion in Msun
- test_ai: the initial semi-major axis of the system
pre-explosion in Rsun
- test_m1f: post explosion mass of the exploding star in Msun
- seed: the seed used for the random number generator
- sample_velocity: a constant velocity over which a line is
drawn on the graph
- npoints: number of points sampled
- plot: if true, plot results
- save: if true, saves the plot
Returns: 'Compare graph to paper'
"""
rd.seed(seed)
testing_function = np.zeros([npoints,2])
constant_velocity = np.zeros([npoints,2])
for i in range(len(testing_function)):
semi_major, e, angle, boolean = kicks.post_explosion_params_circular(test_ai, test_m1, test_m2, test_m1f, kicks.rand_theta(),kicks.rand_phi(),kicks.rand_velocity(test_sigma))
if semi_major > 0:
testing_function[i][0] = semi_major
testing_function[i][1] = e
theta = np.linspace(0,3.14,npoints)
velocity = np.linspace(0,400,npoints)
for j in range(len(constant_velocity)):
semi_major, e, angle, boolean = kicks.post_explosion_params_circular(test_ai, test_m1, test_m2, test_m1f,theta[j],0,sample_velocity)
if semi_major > 0:
constant_velocity[j][0] = semi_major
constant_velocity[j][1] = e
"""changing the semi-major axis to period values in days"""
for k in range(len(testing_function)):
testing_function[k][0] = kepler3_P(testing_function[k][0],test_m2,test_m1f)
constant_velocity[k][0] = kepler3_P(constant_velocity[k][0],test_m2,test_m1f)
if plot:
plt.plot(testing_function[:,0], testing_function[:,1], "o")
plt.xlim(0,50)
plt.ylim(0,1)
plt.plot(constant_velocity[:,0], constant_velocity[:,1], 'k-')
plt.title("post-explosion results")
plt.xlabel("Period in days")
plt.ylabel("Eccentricity")
plt.show()
plt.close()
if save:
plt.plot(testing_function[:,0], testing_function[:,1], "o")
plt.xlim(0,50)
plt.ylim(0,1)
plt.plot(constant_velocity[:,0], constant_velocity[:,1], 'k-')
plt.title("post-explosion results")
plt.xlabel("Period in days")
plt.ylabel("Eccentricity")
plt.savefig("post_explosion_circular_graph.png")
plt.close()
rd.seed()
return "True"
def testing_eccentric_function_graph(test_sigma = 100, test_M1 = 5.5, test_M2 = 55, test_Ai = 133, test_Mns = 1.4, seed="David Berne",sample_velocity = 100,npoints=10000, plot=True, save =False):
"""Test that the graph of the eccentricity vs the period looks correct
Arguments:
- test_sigma: a sample sigma for the rand_velocity function
- test_M1: solar mass of the first mass pre-explosion
- test_M2: solar mass of the second mass
- test_Ai: the initial semi-major axis of the system
- test_Mns: solar mass of the first mass post-explosion
- seed: the seed used for the random number generator
- sample_velocity: a constant velocity over which a line is
drawn on the graph
- npoints: number of points sampled
- plot: if true, plot results
- save: if true, saves the plot
Returns: 'Compare graph to paper'
"""
rd.seed(seed)
testing_function = np.zeros([npoints,2])
constant_velocity = np.zeros([npoints,2])
for i in range(len(testing_function)):
semi_major, e, boolean = kicks.post_explosion_params_general(test_Ai,\
test_M1, test_M2, test_Mns, 0, kicks.rand_theta(), kicks.rand_phi(),\
kicks.rand_velocity(test_sigma),kicks.rand_true_anomaly(0))
if semi_major > 0:
testing_function[i][0] = semi_major
testing_function[i][1] = e
theta = np.linspace(0,3.14,npoints)
velocity = np.linspace(0,400,npoints)
for j in range(len(constant_velocity)):
semi_major, e, boolean = kicks.post_explosion_params_general(test_Ai, test_M1, test_M2, test_Mns,0,theta[j],0,sample_velocity,0)
if semi_major > 0:
constant_velocity[j][0] = semi_major
constant_velocity[j][1] = e
"""changing the semi-major axis to period values in days"""
for k in range(len(testing_function)):
testing_function[k][0] = keplers_third_law(testing_function[k][0],test_M2,test_Mns)
constant_velocity[k][0] = keplers_third_law(constant_velocity[k][0],test_M2,test_Mns)
if plot:
plt.plot(testing_function[:,0], testing_function[:,1], "o")
plt.xlim(0,50)
plt.ylim(0,1)
plt.plot(constant_velocity[:,0], constant_velocity[:,1], 'k-')
plt.title("post-explosion results")
plt.xlabel("Period in days")
plt.ylabel("Eccentricity")
plt.show()
plt.close()
if save:
plt.plot(testing_function[:,0], testing_function[:,1], "o")
plt.xlim(0,50)
plt.ylim(0,1)
plt.plot(constant_velocity[:,0], constant_velocity[:,1], 'k-')
plt.title("post-explosion results")
plt.xlabel("Period in days")
plt.ylabel("Eccentricity")
plt.savefig("post_explosion_circular_graph.png")
plt.close()
rd.seed()
return "True"
def testing_eccentric_function_momentum(Ai=133, M1=5.5, M2=55, Mns=1.4, test_sigma=100, num_sample=10000, seed = "Clara", tolerance=1e-3):
"""Test that the post_explosion_params_general function produces
a correct angular momentum against a calculated angular momentum using an
eccentricityof zero. This angular momentum is calculated by first finding
the anglar velocity, and the individual relative velocities in the center
of mass frame pre-explosion. The components of the kick velocity were then
added to the velocity of mass 1, which is the super nova mass. The
separation in the final center of mass frame accounting for the loss of
mass in mass 1 is crossed with the velocities to get the components of
angular momentum. The total angular momentum is calculated by taking the
absolute value of these components.
Arguments:
- Ai: the initial semi-major axis of the system
- M1: solar mass of the first mass pre-explosion
- M2: solar mass of the second mass
- Mns: solar mass of the first mass post-explosion
- test_sigma: a sample sigma for the rand_velocity function
- num_sample: number of points sampled
- seed: the seed used for the random number generator
- tolerance: tolerance for the test
Returns: True or False as to whether the test was successful
"""
rd.seed(seed)
for i in range(num_sample):
#establishing random parameters
Vk = kicks.rand_velocity(test_sigma)*1e5
theta = kicks.rand_theta()
true_anomaly = kicks.rand_true_anomaly(0)
phi = kicks.rand_phi()
#getting values from the post_explosion_params_general function
semi_major, e, boolean = kicks.post_explosion_params_general(Ai, M1, M2, Mns,0,theta,phi,Vk,true_anomaly)
#calculating the momentum using the results from the function
Momentum_function = angular_momentum(semi_major,Mns,M2,e)
#Calculating the momentum without using the results of the function
#establishing angular velocity
omega = np.sqrt(cgrav*Msun*(M1+M2)/(Ai*Rsun)**3) #rad/second
#velocities of the masses before the kick
V1_initial = M2/(M1+M2)*omega*Ai*Rsun #cm/second
V2 = M1/(M1+M2)*omega*Ai*Rsun #cm/second,-y direction
#velocities after the kick, V2 is unaffected by the kick
V1x_final = Vk*1e5*np.sin(phi)*np.cos(theta)
V1y_final = V1_initial + Vk*1e5*np.cos(theta)
V1z_final = Vk*1e5*np.sin(phi)*np.sin(theta)
#separations from the center of mass in the center of mass frame post-explosion
R2 = Ai*Rsun*Mns/(Mns+M2) #cm
R1 = Ai*Rsun - R2 #cm
#calculating the components of the angular momentum using the cross product
Momentum_1y = -R1*V1z_final*Mns*Msun
Momentum_1z = R1*V1y_final*Mns*Msun
Momentum_2 = R2*V2*M2*Msun #z direction
#absolute value of the components of the angular momentum
Momentum_calculated = np.sqrt(Momentum_1y**2 + Momentum_1z**2 + Momentum_2**2)
#checking that the two momentums are relatively equal
if abs(Momentum_function - Momentum_calculated)/Momentum_function>tolerance:
print(Vk, theta, phi, true_anomaly, Momentum_calculated, Momentum_function, i)
return False
rd.seed()
return True
def testing_eccentric_kick(Ai=133, M1=5.5, M2=55, Mns=1.4, num_sample=100, seed = "Guarnaschelli",tolerance=1e-4):
"""Test for the posta-explosion_params_general that calulates
the necessary kick at perigee and appgee to cause a circular
orbital, then plug that result into the function to ensure it
returns an eccentricity of zero.
Arguments:
- Ai: the initial semi-major axis of the system
- M1: solar mass of the first mass pre-explosion
- M2: solar mass of the second mass
- Mns: solar mass of the first mass post-explosion
- num_sample: number of points sampled
- seed: the seed used for the random number generator
- tolerance: tolerance for the test
Returns: True or False as to whether the test was successful
"""
rd.seed(seed)
M1 = M1*Msun
M2 = M2*Msun
Mns = Mns*Msun
Ai = Ai*Rsun
e_samples = np.linspace(0,.99,num_sample)
for e in e_samples:
V_apogee = np.sqrt(cgrav*(M1+M2)*(1-e)/(Ai*(1+e)))
V_perigee = np.sqrt(cgrav*(M1+M2)*(1+e)/(Ai*(1-e)))
V_circular_apogee = np.sqrt(cgrav*(Mns+M2)/(Ai*(1+e)))
V_circular_perigee = np.sqrt(cgrav*(Mns+M2)/(Ai*(1-e)))
V_kick_apogee = np.absolute((V_apogee - V_circular_apogee)*1e-5)
V_kick_perigee = np.absolute((V_circular_perigee - V_perigee)*1e-5)
theta_apogee = np.pi
theta_perigee = np.pi
if V_circular_apogee > V_apogee:
theta_apogee = 0
if V_circular_perigee > V_perigee:
theta_perigee = 0
semi_major_a, e_a, boulean_a = kicks.post_explosion_params_general(Ai/Rsun,M1/Msun,M2/Msun,Mns/Msun,e,theta_apogee,0,V_kick_apogee,np.pi)
semi_major_p, e_p, boulean_p = kicks.post_explosion_params_general(Ai/Rsun,M1/Msun,M2/Msun,Mns/Msun,e,theta_perigee,0,V_kick_perigee,0)
if e_a > tolerance or e_p > tolerance:
return False
rd.seed()
return True
def testing_inverse_kick(Ai=133, M1=5.5, M2=55, Mns=1.4, test_sigma=1000, num_sample=100, seed="Tamlin",tolerance=1e-4):
"""Test for the post_explosions_params_general function that kicks
a circular system with mass loss, then reverses that with mass
gain back into a circular orbit. There are four different possible
ways to reverse the kick that are dependent on the true anomaly
and theta. 1) the inital kick sends the masses into an eccentric
orbit in the same initial direction, with a true anomaly between
0 and pi. 2) the inital kick sends the masses into an eccentric
orbit in the same initial direction, with a true anomaly between
pi and 2pi. 3)the inital kick sends the masses into an eccentric
orbit in the opposit direction, with a true anomaly between
0 and pi. 4) the inital kick sends the masses into an eccentric
orbit in the opposit direction, with a true anomaly between
pi and 2pi.
Arguments:
- Ai: the initial semi-major axis of the system
- M1: solar mass of the first mass pre-explosion
- M2: solar mass of the second mass
- Mns: solar mass of the first mass post-explosion
- test_sigma: a sample sigma for the rand_velocity function
- num_sample: number of points sampled
- seed: the seed used for the random number generator
- tolerance: tolerance for the test
Returns: True or False as to whether the test was successful
"""
rd.seed(seed)
for i in range(num_sample):
theta = kicks.rand_theta()
V_kick = kicks.rand_velocity(test_sigma)
semi_major_i, e_i, boulean_i = kicks.post_explosion_params_general(Ai,M1,M2,Mns,0,theta,0,V_kick,0)
k = semi_major_i*(1-e_i**2)/(e_i*Ai) - 1/e_i
true_anomaly = np.arccos(k)
semi_major_f, e_f, boulean_f = kicks.post_explosion_params_general(semi_major_i,Mns,M2,M1,e_i,np.pi-theta,np.pi,V_kick,true_anomaly)
Worked = True
if e_f > tolerance:
true_anomaly = 2*np.pi - true_anomaly
semi_major_f, e_f, boulean_f = kicks.post_explosion_params_general(semi_major_i,Mns,M2,M1,e_i,np.pi-theta,np.pi,V_kick,true_anomaly)
if e_f > tolerance:
semi_major_f, e_f, boulean_f = kicks.post_explosion_params_general(semi_major_i,Mns,M2,M1,e_i,theta,np.pi,V_kick,true_anomaly)
if e_f > tolerance:
true_anomaly = 2*np.pi - true_anomaly
semi_major_f, e_f, boulean_f = kicks.post_explosion_params_general(semi_major_i,Mns,M2,M1,e_i,theta,np.pi,V_kick,true_anomaly)
if e_f > tolerance:
Worked = False
rd.seed()
return Worked
def testing_momentum_full_eccentric(Ai=133, M1=5.5, M2=55, Mns=1.4, test_sigma=15, num_sample=100, seed="Lucien",tolerance=1e-4):
"""Test that the post_explosion_params_general function produces
a correct angular momentum against a calculated angular momentum. This
angular momentum is calculated by first finding the velocity of M1 pre-
explosion, then adding to that the components of the kick to get a velocity
in the theta, phi, and radial direction. This velocity is then changed to
x and y components. Next the center of mass possition and velocity are
calculated, and using those values the relative velocities and postion are
calculated. From there, the angular momentum is calculated using the cross-
product method, and adding the resulting values.
Arguments:
- Ai: the initial semi-major axis of the system
- M1: solar mass of the first mass pre-explosion
- M2: solar mass of the second mass
- Mns: solar mass of the first mass post-explosion
- test_sigma: a sample sigma for the rand_velocity function
- num_sample: number of points sampled
- seed: the seed used for the random number generator
- tolerance: tolerance for the test
Returns: True or False as to whether the test was successful
"""
rd.seed(seed)
e_samples = np.linspace(0,.99,num_sample)
for e in e_samples:
#establishing random parameters
Vk = kicks.rand_velocity(test_sigma)*1e5
theta = kicks.rand_theta()
phi = kicks.rand_phi()
true_anomaly = kicks.rand_true_anomaly(e)
separation_i = Rsun*separation_function(Ai,e,true_anomaly)
#getting values from the post_explosion_params_general function
semi_major_f, e_f, boolean = kicks.post_explosion_params_general(Ai, M1, M2, Mns,e,theta,phi,Vk*1e-5,true_anomaly)
#calculating the momentum using the results from the function
Momentum_function = angular_momentum(semi_major_f,Mns,M2,e_f)
V_theta_i = np.sqrt(cgrav*(M1+M2)*Msun*Rsun*Ai*(1-e**2))/separation_i
V_radius_i = np.sqrt(cgrav*(M1+M2)*Msun*(2/separation_i-1/(Rsun*Ai)-Ai*Rsun*(1-e**2)/(separation_i**2)))
V_radius = V_radius_i + Vk*np.sin(theta)*np.cos(phi)
V_theta = V_theta_i + Vk*np.cos(theta)
V_phi = Vk*np.sin(theta)*np.sin(phi)
V1x = V_radius
V1y = np.sqrt(V_theta**2+V_phi**2)
R_cm = Mns*separation_i/(Mns+M2) #x direction
V_cm_x = Mns*V1x/(Mns+M2) #x dirrection
V_cm_y = Mns*V1y/(Mns+M2) #y dirrection
Vx1_prime = V1x - V_cm_x
Vy1_prime = V1y - V_cm_y
Rx1_prime = separation_i - R_cm #+x direction
Ry1_prime = 0
Vx2_prime = -V_cm_x
Vy2_prime = -V_cm_y
Rx2_prime = 0 - R_cm #-x direction
Ry2_prime = 0
momentum1x = Vx1_prime*Mns*Msun
momentum1y = Vy1_prime*Mns*Msun
momentum2x = Vx2_prime*M2*Msun
momentum2y = Vy2_prime*M2*Msun
angular1 = Rx1_prime*momentum1y - Ry1_prime*momentum1x #z direction
angular2 = Rx2_prime*momentum2y - Ry2_prime*momentum2x #z direction
Momentum_calculated = (angular1 + angular2)
if abs(Momentum_function - Momentum_calculated)/Momentum_function>tolerance:
print(Vk, theta, phi, true_anomaly, Momentum_calculated, Momentum_function, e)
return False
rd.seed()
return True
| orlox/binary_tools | binary/tests/test_kicks.py | Python | gpl-3.0 | 32,399 | 0.019815 |
"""Test iterating through a vtkCollection with the standard python syntax"""
import vtk
from vtk.test import Testing
class TestIterateCollection(Testing.vtkTest):
def setUp(self):
self.vtkObjs = [vtk.vtkObject() for _ in range(30)]
self.collection = vtk.vtkCollection()
for obj in self.vtkObjs:
self.collection.AddItem(obj)
self.emptyCollection = vtk.vtkCollection()
def testIterateCollection(self):
newObjsList = [obj for obj in self.collection]
self.assertEqual(self.vtkObjs, newObjsList)
counter = 0
for _ in self.collection:
counter += 1
self.assertEqual(counter, 30)
counter = 0
for _ in self.emptyCollection:
counter += 1
self.assertEqual(counter, 0)
def testCollectionChild(self):
#check that iteration is being inherited correctly
dataArray = vtk.vtkIntArray()
dataArrayCollection = vtk.vtkDataArrayCollection()
dataArrayCollection.AddItem(dataArray)
self.assertEqual([obj for obj in dataArrayCollection],
[dataArray])
def testOperators(self):
self.assertTrue(self.vtkObjs[0] in self.collection)
self.assertEqual(list(self.collection), self.vtkObjs)
def testReferenceCounting(self):
initialReferenceCount = self.collection.GetReferenceCount()
list(self.collection)
self.assertEqual(self.collection.GetReferenceCount(), initialReferenceCount)
if __name__ == "__main__":
Testing.main([(TestIterateCollection, 'test')])
| keithroe/vtkoptix | Common/Core/Testing/Python/TestIterateCollection.py | Python | bsd-3-clause | 1,600 | 0.001875 |
# -*- coding: utf-8 -*-
'''
script.screensaver.football.panel - A Football Panel for Kodi
RSS Feeds, Livescores and League tables as a screensaver or
program addon
Copyright (C) 2016 enen92
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import xbmc
import xbmcaddon
import xbmcvfs
import os
import pytz
addon = xbmcaddon.Addon(id='script.screensaver.football.panel')
addon_path = addon.getAddonInfo('path')
addon_userdata = xbmc.translatePath(addon.getAddonInfo('profile')).decode('utf-8')
addon_name = addon.getAddonInfo('name')
addon_userdata_cached_leagues = os.path.join(addon_userdata,"leagues")
addon_userdata_cached_leagueteams = os.path.join(addon_userdata,"leagueteams")
addon_userdata_cached_teams = os.path.join(addon_userdata,"teams")
ignored_league_list_file = os.path.join(addon_userdata,"ignored.txt")
livescores_update_time = int(addon.getSetting("livescores-update-time"))
tables_update_time = int(addon.getSetting("tables-update-time"))
rss_update_time = int(addon.getSetting("rss-update-time"))
my_timezone = addon.getSetting("timezone")
my_location = pytz.timezone(pytz.all_timezones[int(my_timezone)])
hide_notstarted = addon.getSetting("hide-notstarted")
hide_finished = addon.getSetting("hide-finished")
show_alternative = addon.getSetting("use-alternative-name")
LIVESCORES_PANEL_CONTROL_1 = 32500
LIVESCORES_PANEL_CONTROL_2 = 32501
LEAGUETABLES_LIST_CONTROL = 32552
LEAGUETABLES_CLEARART = 32503
OPTIONS_PANEL = 6
OPTIONS_OK = 5
OPTIONS_CANCEL = 7
RSS_FEEDS = 32504
NO_GAMES = 32505
ACTION_LEFT = 1
ACTION_BACK1 = 10
ACTION_BACK2 = 92
def removeNonAscii(s):
return "".join(filter(lambda x: ord(x)<128, s))
def translate(text):
return addon.getLocalizedString(text).encode('utf-8')
| enen92/script.screensaver.football.panel | resources/lib/common_addon.py | Python | gpl-2.0 | 2,346 | 0.005968 |
from collections import defaultdict
import logging
import math
logger = logging.getLogger('lobster.algo')
class Algo(object):
"""A task creation algorithm
Attempts to be fair when creating tasks by making sure that tasks are
created evenly for every category and every workflow in each category
based on the remaining work per workflow and cores used.
Parameters
----------
config : Configuration
The Lobster configuration to use.
"""
def __init__(self, config):
self.__config = config
def run(self, total_cores, queued, remaining):
"""Run the task creation algorithm.
If not enough tasks can be created for a workflow, the available
tasks are scaled down in size/runtime ("tapered") to ensure that
the available resources are used most efficiently.
Steps
-----
1. Calculate remaining workload, weighed by cores, per category
2. Determine how many cores need to be filled
3. Go through workflows:
1. Determine the fraction of category workload versus the total
workload
2. Do the same for the workflow workload versus the category
workload
3. Use the first fraction to calculate how many tasks should be
created for the category
4. Adjust for mininum queued and maximum total task requirements
5. Subtract already queued tasks
6. Calculate how many tasks should be created for the current
workflow based on the previously calculated fraction
7. Adjust task size taper based on available tasks and needed
tasks
Parameters
----------
total_cores : int
The number of cores that `WorkQueue` currently is in
control of.
queued : dict
A dictionary containing information about the queue on a
per category basis. Keys are category names, values are
dictionaries with the keys `running` and `queued`, denoting
how many category tasks fall into each bin.
remaining : dict
A dictionary with workflows as keys, and a tuple containing
the following as value:
* if all units for the workflow are available
* how many units are left to process
* how many tasks can still be created with the default size
Returns
-------
data : list
A list containing workflow label, how many tasks to create,
and the task taper adjustment.
"""
# Remaining workload
workloads = defaultdict(int)
for wflow, (complete, units, tasks) in remaining.items():
if not complete and tasks < 1.:
logger.debug("workflow {} has not enough units available to form new tasks".format(wflow.label))
continue
elif units == 0:
continue
task_cores = wflow.category.cores or 1
workloads[wflow.category.name] += task_cores * tasks
# How many cores we need to occupy: have at least 10% of the
# available cores provisioned with waiting work
fill_cores = total_cores + max(int(0.1 * total_cores), self.__config.advanced.payload)
total_workload = sum(workloads.values())
if total_workload == 0:
return []
# contains (workflow label, tasks, taper)
data = []
for wflow, (complete, units, tasks) in remaining.items():
if not complete and tasks < 1. or units == 0:
continue
task_cores = wflow.category.cores or 1
category_fraction = workloads[wflow.category.name] / float(total_workload)
workflow_fraction = task_cores * tasks / float(workloads[wflow.category.name])
needed_category_tasks = category_fraction * fill_cores / task_cores
if wflow.category.tasks_max:
allowed = wflow.category.tasks_max - sum(queued[wflow.category.name].values())
needed_category_tasks = min(allowed, needed_category_tasks)
if wflow.category.tasks_min:
required = wflow.category.tasks_min - queued[wflow.category.name]['queued']
needed_category_tasks = max(required, needed_category_tasks)
needed_category_tasks -= queued[wflow.category.name]['queued']
needed_workflow_tasks = max(0, int(math.ceil(needed_category_tasks * workflow_fraction)))
if needed_category_tasks <= 0:
continue
taper = 1.
if tasks < needed_workflow_tasks and complete:
taper = min(1., tasks / float(needed_workflow_tasks))
logger.debug(("creating tasks for {w.label} (category: {w.category.name}):\n" +
"\tcategory task limit: ({w.category.tasks_min}, {w.category.tasks_max})\n" +
"\tcategory tasks needed: {0}\n" +
"\tworkflow tasks needed: {1}\n" +
"\tworkflow tasks available: {2} (complete: {4})\n" +
"\ttask taper: {3}").format(needed_category_tasks, needed_workflow_tasks, tasks, taper, complete, w=wflow))
data.append((wflow.label, needed_workflow_tasks, taper))
# adjust accounting for next workflow
queued[wflow.category.name]['queued'] += needed_workflow_tasks
return data
| matz-e/lobster | lobster/core/create.py | Python | mit | 5,617 | 0.00178 |
import re
import datetime
import dateutil.parser
from django.conf import settings
from django.utils import feedgenerator
from django.utils.html import linebreaks
from apps.social.models import MSocialServices
from apps.reader.models import UserSubscription
from utils import log as logging
from vendor.facebook import GraphAPIError
class FacebookFetcher:
def __init__(self, feed, options=None):
self.feed = feed
self.options = options or {}
def fetch(self):
page_name = self.extract_page_name()
if not page_name:
return
facebook_user = self.facebook_user()
if not facebook_user:
return
# If 'video', use video API to get embed:
# f.get_object('tastyvegetarian', fields='posts')
# f.get_object('1992797300790726', fields='embed_html')
feed = self.fetch_page_feed(facebook_user, page_name, 'name,about,posts,videos,photos')
data = {}
data['title'] = feed.get('name', "%s on Facebook" % page_name)
data['link'] = feed.get('link', "https://facebook.com/%s" % page_name)
data['description'] = feed.get('about', "%s on Facebook" % page_name)
data['lastBuildDate'] = datetime.datetime.utcnow()
data['generator'] = 'NewsBlur Facebook API Decrapifier - %s' % settings.NEWSBLUR_URL
data['docs'] = None
data['feed_url'] = self.feed.feed_address
rss = feedgenerator.Atom1Feed(**data)
merged_data = []
posts = feed.get('posts', {}).get('data', None)
if posts:
for post in posts:
story_data = self.page_posts_story(facebook_user, post)
if not story_data:
continue
merged_data.append(story_data)
videos = feed.get('videos', {}).get('data', None)
if videos:
for video in videos:
story_data = self.page_video_story(facebook_user, video)
if not story_data:
continue
for seen_data in merged_data:
if story_data['link'] == seen_data['link']:
# Video wins over posts (and attachments)
seen_data['description'] = story_data['description']
seen_data['title'] = story_data['title']
break
for story_data in merged_data:
rss.add_item(**story_data)
return rss.writeString('utf-8')
def extract_page_name(self):
page = None
try:
page_groups = re.search('facebook.com/(\w+)/?', self.feed.feed_address)
if not page_groups:
return
page = page_groups.group(1)
except IndexError:
return
return page
def facebook_user(self):
facebook_api = None
social_services = None
if self.options.get('requesting_user_id', None):
social_services = MSocialServices.get_user(self.options.get('requesting_user_id'))
facebook_api = social_services.facebook_api()
if not facebook_api:
logging.debug(u' ***> [%-30s] ~FRFacebook fetch failed: %s: No facebook API for %s' %
(self.feed.log_title[:30], self.feed.feed_address, self.options))
return
else:
usersubs = UserSubscription.objects.filter(feed=self.feed)
if not usersubs:
logging.debug(u' ***> [%-30s] ~FRFacebook fetch failed: %s: No subscriptions' %
(self.feed.log_title[:30], self.feed.feed_address))
return
for sub in usersubs:
social_services = MSocialServices.get_user(sub.user_id)
if not social_services.facebook_uid:
continue
facebook_api = social_services.facebook_api()
if not facebook_api:
continue
else:
break
if not facebook_api:
logging.debug(u' ***> [%-30s] ~FRFacebook fetch failed: %s: No facebook API for %s' %
(self.feed.log_title[:30], self.feed.feed_address, usersubs[0].user.username))
return
return facebook_api
def fetch_page_feed(self, facebook_user, page, fields):
try:
stories = facebook_user.get_object(page, fields=fields)
except GraphAPIError, e:
message = str(e).lower()
if 'session has expired' in message:
logging.debug(u' ***> [%-30s] ~FRFacebook page failed/expired, disconnecting facebook: %s: %s' %
(self.feed.log_title[:30], self.feed.feed_address, e))
self.feed.save_feed_history(560, "Facebook Error: Expired token")
return {}
if not stories:
return {}
return stories
def page_posts_story(self, facebook_user, page_story):
categories = set()
if 'message' not in page_story:
# Probably a story shared on the page's timeline, not a published story
return
message = linebreaks(page_story['message'])
created_date = page_story['created_time']
if isinstance(created_date, unicode):
created_date = dateutil.parser.parse(created_date)
fields = facebook_user.get_object(page_story['id'], fields='permalink_url,link,attachments')
permalink = fields.get('link', fields['permalink_url'])
attachments_html = ""
if fields.get('attachments', None) and fields['attachments']['data']:
for attachment in fields['attachments']['data']:
if 'media' in attachment:
attachments_html += "<img src=\"%s\" />" % attachment['media']['image']['src']
if attachment.get('subattachments', None):
for subattachment in attachment['subattachments']['data']:
attachments_html += "<img src=\"%s\" />" % subattachment['media']['image']['src']
content = """<div class="NB-facebook-rss">
<div class="NB-facebook-rss-message">%s</div>
<div class="NB-facebook-rss-picture">%s</div>
</div>""" % (
message,
attachments_html
)
story = {
'title': message,
'link': permalink,
'description': content,
'categories': list(categories),
'unique_id': "fb_post:%s" % page_story['id'],
'pubdate': created_date,
}
return story
def page_video_story(self, facebook_user, page_story):
categories = set()
if 'description' not in page_story:
return
message = linebreaks(page_story['description'])
created_date = page_story['updated_time']
if isinstance(created_date, unicode):
created_date = dateutil.parser.parse(created_date)
permalink = facebook_user.get_object(page_story['id'], fields='permalink_url')['permalink_url']
embed_html = facebook_user.get_object(page_story['id'], fields='embed_html')
if permalink.startswith('/'):
permalink = "https://www.facebook.com%s" % permalink
content = """<div class="NB-facebook-rss">
<div class="NB-facebook-rss-message">%s</div>
<div class="NB-facebook-rss-embed">%s</div>
</div>""" % (
message,
embed_html.get('embed_html', '')
)
story = {
'title': page_story.get('story', message),
'link': permalink,
'description': content,
'categories': list(categories),
'unique_id': "fb_post:%s" % page_story['id'],
'pubdate': created_date,
}
return story
def favicon_url(self):
page_name = self.extract_page_name()
facebook_user = self.facebook_user()
if not facebook_user:
logging.debug(u' ***> [%-30s] ~FRFacebook icon failed, disconnecting facebook: %s' %
(self.feed.log_title[:30], self.feed.feed_address))
return
try:
picture_data = facebook_user.get_object(page_name, fields='picture')
except GraphAPIError, e:
message = str(e).lower()
if 'session has expired' in message:
logging.debug(u' ***> [%-30s] ~FRFacebook icon failed/expired, disconnecting facebook: %s: %s' %
(self.feed.log_title[:30], self.feed.feed_address, e))
return
if 'picture' in picture_data:
return picture_data['picture']['data']['url']
| AlphaCluster/NewsBlur | utils/facebook_fetcher.py | Python | mit | 9,072 | 0.006944 |
#!/usr/bin/python
# write an experiment that raises an exception
import sys
import os
BOREALISPATH = os.environ['BOREALISPATH']
sys.path.append(BOREALISPATH)
import experiments.superdarn_common_fields as scf
from experiment_prototype.experiment_prototype import ExperimentPrototype
from experiment_prototype.decimation_scheme.decimation_scheme import \
DecimationScheme, DecimationStage, create_firwin_filter_by_attenuation
class TestExperiment(ExperimentPrototype):
def __init__(self):
cpid = 1
# Filter_taps is not a list
rates = [5.0e6, 500.0e3, 100.0e3, 50.0e3/3]
dm_rates = [10, 5, 6, 5]
transition_widths = [150.0e3, 40.0e3, 15.0e3, 1.0e3]
cutoffs = [20.0e3, 10.0e3, 10.0e3, 5.0e3]
ripple_dbs = [150.0, 80.0, 35.0, 9.0]
scaling_factors = [10.0, 100.0, 100.0, 100.0]
all_stages = []
for stage in range(0, len(rates)):
filter_taps = list(
scaling_factors[stage] * create_firwin_filter_by_attenuation(
rates[stage], transition_widths[stage], cutoffs[stage],
ripple_dbs[stage]))
all_stages.append(DecimationStage(stage, rates[stage],
dm_rates[stage], set(filter_taps))) # filter_taps is not a list, should fail
# changed from 10e3/3->10e3
decimation_scheme = (DecimationScheme(rates[0], rates[-1]/dm_rates[-1], stages=all_stages))
super(TestExperiment, self).__init__(
cpid, output_rx_rate=decimation_scheme.output_sample_rate,
decimation_scheme=decimation_scheme)
if scf.IS_FORWARD_RADAR:
beams_to_use = scf.STD_16_FORWARD_BEAM_ORDER
else:
beams_to_use = scf.STD_16_REVERSE_BEAM_ORDER
if scf.opts.site_id in ["cly", "rkn", "inv"]:
num_ranges = scf.POLARDARN_NUM_RANGES
if scf.opts.site_id in ["sas", "pgr"]:
num_ranges = scf.STD_NUM_RANGES
slice_1 = { # slice_id = 0, there is only one slice.
"pulse_sequence": scf.SEQUENCE_7P,
"tau_spacing": scf.TAU_SPACING_7P,
"pulse_len": scf.PULSE_LEN_45KM,
"num_ranges": num_ranges,
"first_range": scf.STD_FIRST_RANGE,
"intt": 3500, # duration of an integration, in ms
"beam_angle": scf.STD_16_BEAM_ANGLE,
"beam_order": beams_to_use,
"scanbound": [i * 3.5 for i in range(len(beams_to_use))], #1 min scan
"txfreq" : scf.COMMON_MODE_FREQ_1, #kHz
"acf": True,
"xcf": True, # cross-correlation processing
"acfint": True, # interferometer acfs
}
self.add_slice(slice_1)
| SuperDARNCanada/borealis | experiments/testing_archive/test_taps_not_list.py | Python | gpl-3.0 | 2,729 | 0.004397 |
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from datetime import datetime
from hexwick.forms import UserForm
from hexwick.forms import NewAccountForm
from hexwick.user import User
from hexwick.models import PlaidUser
def index(request):
request.session.set_test_cookie()
context = RequestContext(request)
response = render_to_response('index.html', context)
visits = int(request.COOKIES.get('visits', '0'))
if 'last_visit' in request.COOKIES:
last_visit = request.COOKIES['last_visit']
last_visit_time = datetime.strptime(
last_visit[:-7],
"%Y-%m-%d %H:%M:%S"
)
if (datetime.now() - last_visit_time).days > 0:
response.set_cookie('visits', visits + 1)
response.set_cookie('last_visit', datetime.now())
else:
response.set_cookie('last_visit', datetime.now())
return render_to_response('index.html', context)
@login_required
def restricted(request):
user = PlaidUser.objects.get(user=request.user)
if request.method == 'POST':
form = NewAccountForm(request.POST)
emp1 = User(request.user)
if form.is_valid():
emp1.new_plaid_user(
request.POST['username'],
request.POST['password'],
request.POST['accountType'],
user,
request
)
return HttpResponseRedirect('/restricted/')
context = RequestContext(request)
if request.user.is_authenticated():
emp1 = User(request.user)
plaidinformation = []
try:
plaidinformation = emp1.get_info(request)
except Exception, e:
raise e
return render_to_response('site.html', plaidinformation, context)
@login_required
def refresh(request):
if request.user.is_authenticated():
emp1 = User(request.user)
emp1.refresh(request)
return HttpResponse(status=201)
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect('/')
def register(request):
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
context = RequestContext(request)
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
if user_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
registered = True
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
login(request, user)
PlaidUser.objects.create(
user=request.user
).save()
return HttpResponseRedirect("/restricted/")
else:
user_form = UserForm()
return render_to_response(
'register.html',
{
'user_form': user_form,
'registered': registered
},
context
)
def user_login(request):
context = RequestContext(request)
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect('../restricted/')
else:
return HttpResponse("Your Rango account is disabled.")
else:
return HttpResponse("Invalid login details supplied.")
else:
return render_to_response('login.html', {}, context)
| dperconti/hexwick_python | hexwick/views.py | Python | agpl-3.0 | 3,890 | 0 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, copy
import os
import json
from frappe.utils import cstr, flt, getdate
from frappe import _
from frappe.utils.file_manager import save_file
from .default_website import website_maker
import install_fixtures
from .sample_data import make_sample_data
from erpnext.accounts.doctype.account.account import RootNotEditable
from frappe.core.doctype.communication.comment import add_info_comment
from erpnext.setup.setup_wizard.domainify import setup_domain
def setup_complete(args=None):
if frappe.db.sql("select name from tabCompany"):
frappe.throw(_("Setup Already Complete!!"))
install_fixtures.install(args.get("country"))
create_price_lists(args)
create_fiscal_year_and_company(args)
create_sales_tax(args)
create_users(args)
set_defaults(args)
create_territories()
create_feed_and_todo()
create_email_digest()
create_letter_head(args)
create_taxes(args)
create_items(args)
create_customers(args)
create_suppliers(args)
if args.get('setup_website'):
website_maker(args)
create_logo(args)
frappe.local.message_log = []
setup_domain(args.get('domain'))
frappe.db.commit()
login_as_first_user(args)
frappe.db.commit()
frappe.clear_cache()
if args.get("add_sample_data"):
try:
make_sample_data(args)
frappe.clear_cache()
except:
# clear message
if frappe.message_log:
frappe.message_log.pop()
pass
def create_fiscal_year_and_company(args):
if (args.get('fy_start_date')):
curr_fiscal_year = get_fy_details(args.get('fy_start_date'), args.get('fy_end_date'))
frappe.get_doc({
"doctype":"Fiscal Year",
'year': curr_fiscal_year,
'year_start_date': args.get('fy_start_date'),
'year_end_date': args.get('fy_end_date'),
}).insert()
args["curr_fiscal_year"] = curr_fiscal_year
# Company
if (args.get('company_name')):
frappe.get_doc({
"doctype":"Company",
'company_name':args.get('company_name').strip(),
'abbr':args.get('company_abbr'),
'default_currency':args.get('currency'),
'country': args.get('country'),
'create_chart_of_accounts_based_on': 'Standard Template',
'chart_of_accounts': args.get('chart_of_accounts'),
'domain': args.get('domain')
}).insert()
#Enable shopping cart
enable_shopping_cart(args)
# Bank Account
create_bank_account(args)
def enable_shopping_cart(args):
frappe.get_doc({
"doctype": "Shopping Cart Settings",
"enabled": 1,
'company': args.get('company_name').strip(),
'price_list': frappe.db.get_value("Price List", {"selling": 1}),
'default_customer_group': _("Individual"),
'quotation_series': "QTN-",
}).insert()
def create_bank_account(args):
if args.get("bank_account"):
company_name = args.get('company_name').strip()
bank_account_group = frappe.db.get_value("Account",
{"account_type": "Bank", "is_group": 1, "root_type": "Asset",
"company": company_name})
if bank_account_group:
bank_account = frappe.get_doc({
"doctype": "Account",
'account_name': args.get("bank_account"),
'parent_account': bank_account_group,
'is_group':0,
'company': company_name,
"account_type": "Bank",
})
try:
return bank_account.insert()
except RootNotEditable:
frappe.throw(_("Bank account cannot be named as {0}").format(args.get("bank_account")))
except frappe.DuplicateEntryError:
# bank account same as a CoA entry
pass
def create_price_lists(args):
for pl_type, pl_name in (("Selling", _("Standard Selling")), ("Buying", _("Standard Buying"))):
frappe.get_doc({
"doctype": "Price List",
"price_list_name": pl_name,
"enabled": 1,
"buying": 1 if pl_type == "Buying" else 0,
"selling": 1 if pl_type == "Selling" else 0,
"currency": args["currency"]
}).insert()
def set_defaults(args):
# enable default currency
frappe.db.set_value("Currency", args.get("currency"), "enabled", 1)
global_defaults = frappe.get_doc("Global Defaults", "Global Defaults")
global_defaults.update({
'current_fiscal_year': args.curr_fiscal_year,
'default_currency': args.get('currency'),
'default_company':args.get('company_name').strip(),
"country": args.get("country"),
})
global_defaults.save()
frappe.db.set_value("System Settings", None, "email_footer_address", args.get("company"))
accounts_settings = frappe.get_doc("Accounts Settings")
accounts_settings.auto_accounting_for_stock = 1
accounts_settings.save()
stock_settings = frappe.get_doc("Stock Settings")
stock_settings.item_naming_by = "Item Code"
stock_settings.valuation_method = "FIFO"
stock_settings.default_warehouse = frappe.db.get_value('Warehouse', {'warehouse_name': _('Stores')})
stock_settings.stock_uom = _("Nos")
stock_settings.auto_indent = 1
stock_settings.auto_insert_price_list_rate_if_missing = 1
stock_settings.automatically_set_serial_nos_based_on_fifo = 1
stock_settings.save()
selling_settings = frappe.get_doc("Selling Settings")
selling_settings.cust_master_name = "Customer Name"
selling_settings.so_required = "No"
selling_settings.dn_required = "No"
selling_settings.allow_multiple_items = 1
selling_settings.save()
buying_settings = frappe.get_doc("Buying Settings")
buying_settings.supp_master_name = "Supplier Name"
buying_settings.po_required = "No"
buying_settings.pr_required = "No"
buying_settings.maintain_same_rate = 1
buying_settings.allow_multiple_items = 1
buying_settings.save()
notification_control = frappe.get_doc("Notification Control")
notification_control.quotation = 1
notification_control.sales_invoice = 1
notification_control.purchase_order = 1
notification_control.save()
hr_settings = frappe.get_doc("HR Settings")
hr_settings.emp_created_by = "Naming Series"
hr_settings.save()
def create_feed_and_todo():
"""update Activity feed and create todo for creation of item, customer, vendor"""
add_info_comment(**{
"subject": _("ERPNext Setup Complete!")
})
def create_email_digest():
from frappe.utils.user import get_system_managers
system_managers = get_system_managers(only_name=True)
if not system_managers:
return
companies = frappe.db.sql_list("select name FROM `tabCompany`")
for company in companies:
if not frappe.db.exists("Email Digest", "Default Weekly Digest - " + company):
edigest = frappe.get_doc({
"doctype": "Email Digest",
"name": "Default Weekly Digest - " + company,
"company": company,
"frequency": "Weekly",
"recipient_list": "\n".join(system_managers)
})
for df in edigest.meta.get("fields", {"fieldtype": "Check"}):
if df.fieldname != "scheduler_errors":
edigest.set(df.fieldname, 1)
edigest.insert()
# scheduler errors digest
if companies:
edigest = frappe.new_doc("Email Digest")
edigest.update({
"name": "Scheduler Errors",
"company": companies[0],
"frequency": "Daily",
"recipient_list": "\n".join(system_managers),
"scheduler_errors": 1,
"enabled": 1
})
edigest.insert()
def get_fy_details(fy_start_date, fy_end_date):
start_year = getdate(fy_start_date).year
if start_year == getdate(fy_end_date).year:
fy = cstr(start_year)
else:
fy = cstr(start_year) + '-' + cstr(start_year + 1)
return fy
def create_sales_tax(args):
country_wise_tax = get_country_wise_tax(args.get("country"))
if country_wise_tax and len(country_wise_tax) > 0:
for sales_tax, tax_data in country_wise_tax.items():
make_tax_account_and_template(args.get("company_name").strip(),
tax_data.get('account_name'), tax_data.get('tax_rate'), sales_tax)
def get_country_wise_tax(country):
data = {}
with open (os.path.join(os.path.dirname(__file__), "data", "country_wise_tax.json")) as countrywise_tax:
data = json.load(countrywise_tax).get(country)
return data
def create_taxes(args):
for i in xrange(1,6):
if args.get("tax_" + str(i)):
# replace % in case someone also enters the % symbol
tax_rate = cstr(args.get("tax_rate_" + str(i)) or "").replace("%", "")
account_name = args.get("tax_" + str(i))
make_tax_account_and_template(args.get("company_name").strip(), account_name, tax_rate)
def make_tax_account_and_template(company, account_name, tax_rate, template_name=None):
try:
account = make_tax_account(company, account_name, tax_rate)
if account:
make_sales_and_purchase_tax_templates(account, template_name)
except frappe.NameError, e:
if e.args[2][0]==1062:
pass
else:
raise
except RootNotEditable, e:
pass
def get_tax_account_group(company):
tax_group = frappe.db.get_value("Account",
{"account_name": "Duties and Taxes", "is_group": 1, "company": company})
if not tax_group:
tax_group = frappe.db.get_value("Account", {"is_group": 1, "root_type": "Liability",
"account_type": "Tax", "company": company})
return tax_group
def make_tax_account(company, account_name, tax_rate):
tax_group = get_tax_account_group(company)
if tax_group:
return frappe.get_doc({
"doctype":"Account",
"company": company,
"parent_account": tax_group,
"account_name": account_name,
"is_group": 0,
"report_type": "Balance Sheet",
"root_type": "Liability",
"account_type": "Tax",
"tax_rate": flt(tax_rate) if tax_rate else None
}).insert(ignore_permissions=True)
def make_sales_and_purchase_tax_templates(account, template_name=None):
if not template_name:
template_name = account.name
sales_tax_template = {
"doctype": "Sales Taxes and Charges Template",
"title": template_name,
"company": account.company,
"taxes": [{
"category": "Valuation and Total",
"charge_type": "On Net Total",
"account_head": account.name,
"description": "{0} @ {1}".format(account.account_name, account.tax_rate),
"rate": account.tax_rate
}]
}
# Sales
frappe.get_doc(copy.deepcopy(sales_tax_template)).insert(ignore_permissions=True)
# Purchase
purchase_tax_template = copy.deepcopy(sales_tax_template)
purchase_tax_template["doctype"] = "Purchase Taxes and Charges Template"
frappe.get_doc(purchase_tax_template).insert(ignore_permissions=True)
def create_items(args):
for i in xrange(1,6):
item = args.get("item_" + str(i))
if item:
item_group = args.get("item_group_" + str(i))
is_sales_item = args.get("is_sales_item_" + str(i))
is_purchase_item = args.get("is_purchase_item_" + str(i))
is_stock_item = item_group!=_("Services")
default_warehouse = ""
if is_stock_item:
default_warehouse = frappe.db.get_value("Warehouse", filters={
"warehouse_name": _("Finished Goods") if is_sales_item else _("Stores"),
"company": args.get("company_name").strip()
})
try:
frappe.get_doc({
"doctype":"Item",
"item_code": item,
"item_name": item,
"description": item,
"show_in_website": 1,
"is_sales_item": is_sales_item,
"is_purchase_item": is_purchase_item,
"is_stock_item": is_stock_item and 1 or 0,
"item_group": item_group,
"stock_uom": args.get("item_uom_" + str(i)),
"default_warehouse": default_warehouse
}).insert()
if args.get("item_img_" + str(i)):
item_image = args.get("item_img_" + str(i)).split(",")
if len(item_image)==3:
filename, filetype, content = item_image
fileurl = save_file(filename, content, "Item", item, decode=True).file_url
frappe.db.set_value("Item", item, "image", fileurl)
if args.get("item_price_" + str(i)):
item_price = flt(args.get("item_price_" + str(i)))
if is_sales_item:
price_list_name = frappe.db.get_value("Price List", {"selling": 1})
make_item_price(item, price_list_name, item_price)
if is_purchase_item:
price_list_name = frappe.db.get_value("Price List", {"buying": 1})
make_item_price(item, price_list_name, item_price)
except frappe.NameError:
pass
def make_item_price(item, price_list_name, item_price):
frappe.get_doc({
"doctype": "Item Price",
"price_list": price_list_name,
"item_code": item,
"price_list_rate": item_price
}).insert()
def create_customers(args):
for i in xrange(1,6):
customer = args.get("customer_" + str(i))
if customer:
try:
doc = frappe.get_doc({
"doctype":"Customer",
"customer_name": customer,
"customer_type": "Company",
"customer_group": _("Commercial"),
"territory": args.get("country"),
"company": args.get("company_name").strip()
}).insert()
if args.get("customer_contact_" + str(i)):
create_contact(args.get("customer_contact_" + str(i)),
"Customer", doc.name)
except frappe.NameError:
pass
def create_suppliers(args):
for i in xrange(1,6):
supplier = args.get("supplier_" + str(i))
if supplier:
try:
doc = frappe.get_doc({
"doctype":"Supplier",
"supplier_name": supplier,
"supplier_type": _("Local"),
"company": args.get("company_name").strip()
}).insert()
if args.get("supplier_contact_" + str(i)):
create_contact(args.get("supplier_contact_" + str(i)),
"Supplier", doc.name)
except frappe.NameError:
pass
def create_contact(contact, party_type, party):
"""Create contact based on given contact name"""
contact = contact.strip().split(" ")
contact = frappe.get_doc({
"doctype":"Contact",
"first_name":contact[0],
"last_name": len(contact) > 1 and contact[1] or ""
})
contact.append('links', dict(link_doctype=party_type, link_name=party))
contact.insert()
def create_letter_head(args):
if args.get("attach_letterhead"):
frappe.get_doc({
"doctype":"Letter Head",
"letter_head_name": _("Standard"),
"is_default": 1
}).insert()
attach_letterhead = args.get("attach_letterhead").split(",")
if len(attach_letterhead)==3:
filename, filetype, content = attach_letterhead
fileurl = save_file(filename, content, "Letter Head", _("Standard"), decode=True).file_url
frappe.db.set_value("Letter Head", _("Standard"), "content", "<img src='%s' style='max-width: 100%%;'>" % fileurl)
def create_logo(args):
if args.get("attach_logo"):
attach_logo = args.get("attach_logo").split(",")
if len(attach_logo)==3:
filename, filetype, content = attach_logo
fileurl = save_file(filename, content, "Website Settings", "Website Settings",
decode=True).file_url
frappe.db.set_value("Website Settings", "Website Settings", "brand_html",
"<img src='{0}' style='max-width: 40px; max-height: 25px;'> {1}".format(fileurl, args.get("company_name").strip()))
def create_territories():
"""create two default territories, one for home country and one named Rest of the World"""
from frappe.utils.nestedset import get_root_of
country = frappe.db.get_default("country")
root_territory = get_root_of("Territory")
for name in (country, _("Rest Of The World")):
if name and not frappe.db.exists("Territory", name):
frappe.get_doc({
"doctype": "Territory",
"territory_name": name.replace("'", ""),
"parent_territory": root_territory,
"is_group": "No"
}).insert()
def login_as_first_user(args):
if args.get("email") and hasattr(frappe.local, "login_manager"):
frappe.local.login_manager.login_as(args.get("email"))
def create_users(args):
if frappe.session.user == 'Administrator':
return
# create employee for self
emp = frappe.get_doc({
"doctype": "Employee",
"employee_name": " ".join(filter(None, [args.get("first_name"), args.get("last_name")])),
"user_id": frappe.session.user,
"status": "Active",
"company": args.get("company_name")
})
emp.flags.ignore_mandatory = True
emp.insert(ignore_permissions = True)
for i in xrange(1,5):
email = args.get("user_email_" + str(i))
fullname = args.get("user_fullname_" + str(i))
if email:
if not fullname:
fullname = email.split("@")[0]
parts = fullname.split(" ", 1)
user = frappe.get_doc({
"doctype": "User",
"email": email,
"first_name": parts[0],
"last_name": parts[1] if len(parts) > 1 else "",
"enabled": 1,
"user_type": "System User"
})
# default roles
user.append_roles("Projects Manager", "Stock User", "Support Team")
if args.get("user_sales_" + str(i)):
user.append_roles("Sales User", "Sales Manager", "Accounts User")
if args.get("user_purchaser_" + str(i)):
user.append_roles("Purchase User", "Purchase Manager", "Accounts User")
if args.get("user_accountant_" + str(i)):
user.append_roles("Accounts Manager", "Accounts User")
user.flags.delay_emails = True
if not frappe.db.get_value("User", email):
user.insert(ignore_permissions=True)
# create employee
emp = frappe.get_doc({
"doctype": "Employee",
"employee_name": fullname,
"user_id": email,
"status": "Active",
"company": args.get("company_name")
})
emp.flags.ignore_mandatory = True
emp.insert(ignore_permissions = True)
def create_academic_term():
at = ["Semester 1", "Semester 2", "Semester 3"]
ay = ["2013-14", "2014-15", "2015-16", "2016-17", "2017-18"]
for y in ay:
for t in at:
academic_term = frappe.new_doc("Academic Term")
academic_term.academic_year = y
academic_term.term_name = t
try:
academic_term.save()
except frappe.DuplicateEntryError:
pass
def create_academic_year():
ac = ["2013-14", "2014-15", "2015-16", "2016-17", "2017-18"]
for d in ac:
academic_year = frappe.new_doc("Academic Year")
academic_year.academic_year_name = d
try:
academic_year.save()
except frappe.DuplicateEntryError:
pass
def create_program(args):
for i in xrange(1,6):
if args.get("program_" + str(i)):
program = frappe.new_doc("Program")
program.program_code = args.get("program_" + str(i))
program.program_name = args.get("program_" + str(i))
try:
program.save()
except frappe.DuplicateEntryError:
pass
def create_course(args):
for i in xrange(1,6):
if args.get("course_" + str(i)):
course = frappe.new_doc("Course")
course.course_code = args.get("course_" + str(i))
course.course_name = args.get("course_" + str(i))
try:
course.save()
except frappe.DuplicateEntryError:
pass
def create_instructor(args):
for i in xrange(1,6):
if args.get("instructor_" + str(i)):
instructor = frappe.new_doc("Instructor")
instructor.instructor_name = args.get("instructor_" + str(i))
try:
instructor.save()
except frappe.DuplicateEntryError:
pass
def create_room(args):
for i in xrange(1,6):
if args.get("room_" + str(i)):
room = frappe.new_doc("Room")
room.room_name = args.get("room_" + str(i))
room.seating_capacity = args.get("room_capacity_" + str(i))
try:
room.save()
except frappe.DuplicateEntryError:
pass
| bpshetty/erpnext | erpnext/setup/setup_wizard/setup_wizard.py | Python | gpl-3.0 | 18,648 | 0.030566 |
#!/usr/bin/env python
import nest
# Example of uses
def set_duration(duration=15):
if nest.get_variable('is_online') == True:
if nest,get_variable('has_fan') == True:
nest.get_variable('fan_timer_duration', duration)
return True
return False
def fan_on():
if nest.get_variable('is_online') == True:
if nest.get_variable('has_fan') == True:
print nest.set_variable('fan_timer_active', True)
return True
return False
def is_running():
if nest.get_variable('is_online') == True:
if nest.get_variable('hvac_state') != 'off':
return True
return False
def too_damn_hot():
if arrow.get(get_timeout()) < arrow.now():
if not is_running():
set_duration()
fan_on()
if __name__ == '__main__':
print is_running()
| gb1035/simple_nest | examples.py | Python | gpl-3.0 | 855 | 0.014035 |
from django.template import Library, Node, resolve_variable, TemplateSyntaxError
from django.core.urlresolvers import reverse
register = Library()
@register.simple_tag
def active(request, pattern):
import re
if re.search(pattern, request.get_full_path()):
return 'active'
return '' | Kami/munin_exchange | munin_exchange/apps/core/templatetags/navclass.py | Python | bsd-3-clause | 307 | 0.013029 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CheckNameRequest(Model):
"""CheckNameRequest.
:param name: Workspace collection name
:type name: str
:param type: Resource type. Default value:
"Microsoft.PowerBI/workspaceCollections" .
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, name=None, type="Microsoft.PowerBI/workspaceCollections"):
self.name = name
self.type = type
| rjschwei/azure-sdk-for-python | azure-mgmt-powerbiembedded/azure/mgmt/powerbiembedded/models/check_name_request.py | Python | mit | 1,018 | 0.001965 |
from __future__ import unicode_literals
import os.path
import optparse
import shlex
import sys
from .downloader.external import list_external_downloaders
from .compat import (
compat_expanduser,
compat_get_terminal_size,
compat_getenv,
compat_kwargs,
)
from .utils import (
preferredencoding,
write_string,
)
from .version import __version__
def parseOpts(overrideArguments=None):
def _readOptions(filename_bytes, default=[]):
try:
optionf = open(filename_bytes)
except IOError:
return default # silently skip if file is not present
try:
res = []
for l in optionf:
res += shlex.split(l, comments=True)
finally:
optionf.close()
return res
def _readUserConf():
xdg_config_home = compat_getenv('XDG_CONFIG_HOME')
if xdg_config_home:
userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
else:
userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl.conf')
userConf = _readOptions(userConfFile, None)
if userConf is None:
appdata_dir = compat_getenv('appdata')
if appdata_dir:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser('~'), 'youtube-dl.conf'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser('~'), 'youtube-dl.conf.txt'),
default=None)
if userConf is None:
userConf = []
return userConf
def _format_option_string(option):
''' ('-o', '--option') -> -o, --format METAVAR'''
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, ', ')
if option.takes_value():
opts.append(' %s' % option.metavar)
return "".join(opts)
def _comma_separated_values_options_callback(option, opt_str, value, parser):
setattr(parser.values, option.dest, value.split(','))
def _hide_login_info(opts):
opts = list(opts)
for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
try:
i = opts.index(private_opt)
opts[i + 1] = 'PRIVATE'
except ValueError:
pass
return opts
# No need to wrap help messages if we're on a wide console
columns = compat_get_terminal_size().columns
max_width = columns if columns else 80
max_help_position = 80
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
fmt.format_option_strings = _format_option_string
kw = {
'version': __version__,
'formatter': fmt,
'usage': '%prog [OPTIONS] URL [URL...]',
'conflict_handler': 'resolve',
}
parser = optparse.OptionParser(**compat_kwargs(kw))
general = optparse.OptionGroup(parser, 'General Options')
general.add_option(
'-h', '--help',
action='help',
help='Print this help text and exit')
general.add_option(
'-v', '--version',
action='version',
help='Print program version and exit')
general.add_option(
'-U', '--update',
action='store_true', dest='update_self',
help='Update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
general.add_option(
'-i', '--ignore-errors',
action='store_true', dest='ignoreerrors', default=False,
help='Continue on download errors, for example to skip unavailable videos in a playlist')
general.add_option(
'--abort-on-error',
action='store_false', dest='ignoreerrors',
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
general.add_option(
'--dump-user-agent',
action='store_true', dest='dump_user_agent', default=False,
help='Display the current browser identification')
general.add_option(
'--list-extractors',
action='store_true', dest='list_extractors', default=False,
help='List all supported extractors')
general.add_option(
'--extractor-descriptions',
action='store_true', dest='list_extractor_descriptions', default=False,
help='Output descriptions of all supported extractors')
general.add_option(
'--force-generic-extractor',
action='store_true', dest='force_generic_extractor', default=False,
help='Force extraction to use the generic extractor')
general.add_option(
'--default-search',
dest='default_search', metavar='PREFIX',
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
general.add_option(
'--ignore-config',
action='store_true',
help='Do not read configuration files. '
'When given in the global configuration file /etc/youtube-dl.conf: '
'Do not read the user configuration in ~/.config/youtube-dl/config '
'(%APPDATA%/youtube-dl/config.txt on Windows)')
general.add_option(
'--flat-playlist',
action='store_const', dest='extract_flat', const='in_playlist',
default=False,
help='Do not extract the videos of a playlist, only list them.')
general.add_option(
'--no-color', '--no-colors',
action='store_true', dest='no_color',
default=False,
help='Do not emit color codes in output')
network = optparse.OptionGroup(parser, 'Network Options')
network.add_option(
'--proxy', dest='proxy',
default=None, metavar='URL',
help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
network.add_option(
'--socket-timeout',
dest='socket_timeout', type=float, default=None, metavar='SECONDS',
help='Time to wait before giving up, in seconds')
network.add_option(
'--source-address',
metavar='IP', dest='source_address', default=None,
help='Client-side IP address to bind to (experimental)',
)
network.add_option(
'-4', '--force-ipv4',
action='store_const', const='0.0.0.0', dest='source_address',
help='Make all connections via IPv4 (experimental)',
)
network.add_option(
'-6', '--force-ipv6',
action='store_const', const='::', dest='source_address',
help='Make all connections via IPv6 (experimental)',
)
network.add_option(
'--cn-verification-proxy',
dest='cn_verification_proxy', default=None, metavar='URL',
help='Use this proxy to verify the IP address for some Chinese sites. '
'The default proxy specified by --proxy (or none, if the options is not present) is used for the actual downloading. (experimental)'
)
selection = optparse.OptionGroup(parser, 'Video Selection')
selection.add_option(
'--playlist-start',
dest='playliststart', metavar='NUMBER', default=1, type=int,
help='Playlist video to start at (default is %default)')
selection.add_option(
'--playlist-end',
dest='playlistend', metavar='NUMBER', default=None, type=int,
help='Playlist video to end at (default is last)')
selection.add_option(
'--playlist-items',
dest='playlist_items', metavar='ITEM_SPEC', default=None,
help='Playlist video items to download. Specify indices of the videos in the playlist seperated by commas like: "--playlist-items 1,2,5,8" if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.')
selection.add_option(
'--match-title',
dest='matchtitle', metavar='REGEX',
help='Download only matching titles (regex or caseless sub-string)')
selection.add_option(
'--reject-title',
dest='rejecttitle', metavar='REGEX',
help='Skip download for matching titles (regex or caseless sub-string)')
selection.add_option(
'--max-downloads',
dest='max_downloads', metavar='NUMBER', type=int, default=None,
help='Abort after downloading NUMBER files')
selection.add_option(
'--min-filesize',
metavar='SIZE', dest='min_filesize', default=None,
help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)')
selection.add_option(
'--max-filesize',
metavar='SIZE', dest='max_filesize', default=None,
help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)')
selection.add_option(
'--date',
metavar='DATE', dest='date', default=None,
help='Download only videos uploaded in this date')
selection.add_option(
'--datebefore',
metavar='DATE', dest='datebefore', default=None,
help='Download only videos uploaded on or before this date (i.e. inclusive)')
selection.add_option(
'--dateafter',
metavar='DATE', dest='dateafter', default=None,
help='Download only videos uploaded on or after this date (i.e. inclusive)')
selection.add_option(
'--min-views',
metavar='COUNT', dest='min_views', default=None, type=int,
help='Do not download any videos with less than COUNT views')
selection.add_option(
'--max-views',
metavar='COUNT', dest='max_views', default=None, type=int,
help='Do not download any videos with more than COUNT views')
selection.add_option(
'--match-filter',
metavar='FILTER', dest='match_filter', default=None,
help=(
'Generic video filter (experimental). '
'Specify any key (see help for -o for a list of available keys) to'
' match if the key is present, '
'!key to check if the key is not present,'
'key > NUMBER (like "comment_count > 12", also works with '
'>=, <, <=, !=, =) to compare against a number, and '
'& to require multiple matches. '
'Values which are not known are excluded unless you'
' put a question mark (?) after the operator.'
'For example, to only match videos that have been liked more than '
'100 times and disliked less than 50 times (or the dislike '
'functionality is not available at the given service), but who '
'also have a description, use --match-filter '
'"like_count > 100 & dislike_count <? 50 & description" .'
))
selection.add_option(
'--no-playlist',
action='store_true', dest='noplaylist', default=False,
help='Download only the video, if the URL refers to a video and a playlist.')
selection.add_option(
'--yes-playlist',
action='store_false', dest='noplaylist', default=False,
help='Download the playlist, if the URL refers to a video and a playlist.')
selection.add_option(
'--age-limit',
metavar='YEARS', dest='age_limit', default=None, type=int,
help='Download only videos suitable for the given age')
selection.add_option(
'--download-archive', metavar='FILE',
dest='download_archive',
help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
selection.add_option(
'--include-ads',
dest='include_ads', action='store_true',
help='Download advertisements as well (experimental)')
authentication = optparse.OptionGroup(parser, 'Authentication Options')
authentication.add_option(
'-u', '--username',
dest='username', metavar='USERNAME',
help='Login with this account ID')
authentication.add_option(
'-p', '--password',
dest='password', metavar='PASSWORD',
help='Account password. If this option is left out, youtube-dl will ask interactively.')
authentication.add_option(
'-2', '--twofactor',
dest='twofactor', metavar='TWOFACTOR',
help='Two-factor auth code')
authentication.add_option(
'-n', '--netrc',
action='store_true', dest='usenetrc', default=False,
help='Use .netrc authentication data')
authentication.add_option(
'--video-password',
dest='videopassword', metavar='PASSWORD',
help='Video password (vimeo, smotri)')
video_format = optparse.OptionGroup(parser, 'Video Format Options')
video_format.add_option(
'-f', '--format',
action='store', dest='format', metavar='FORMAT', default=None,
help='Video format code, see the "FORMAT SELECTION" for all the info')
video_format.add_option(
'--all-formats',
action='store_const', dest='format', const='all',
help='Download all available video formats')
video_format.add_option(
'--prefer-free-formats',
action='store_true', dest='prefer_free_formats', default=False,
help='Prefer free video formats unless a specific one is requested')
video_format.add_option(
'-F', '--list-formats',
action='store_true', dest='listformats',
help='List all available formats')
video_format.add_option(
'--youtube-include-dash-manifest',
action='store_true', dest='youtube_include_dash_manifest', default=True,
help=optparse.SUPPRESS_HELP)
video_format.add_option(
'--youtube-skip-dash-manifest',
action='store_false', dest='youtube_include_dash_manifest',
help='Do not download the DASH manifests and related data on YouTube videos')
video_format.add_option(
'--merge-output-format',
action='store', dest='merge_output_format', metavar='FORMAT', default=None,
help=(
'If a merge is required (e.g. bestvideo+bestaudio), '
'output to given container format. One of mkv, mp4, ogg, webm, flv. '
'Ignored if no merge is required'))
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
subtitles.add_option(
'--write-sub', '--write-srt',
action='store_true', dest='writesubtitles', default=False,
help='Write subtitle file')
subtitles.add_option(
'--write-auto-sub', '--write-automatic-sub',
action='store_true', dest='writeautomaticsub', default=False,
help='Write automatic subtitle file (YouTube only)')
subtitles.add_option(
'--all-subs',
action='store_true', dest='allsubtitles', default=False,
help='Download all the available subtitles of the video')
subtitles.add_option(
'--list-subs',
action='store_true', dest='listsubtitles', default=False,
help='List all available subtitles for the video')
subtitles.add_option(
'--sub-format',
action='store', dest='subtitlesformat', metavar='FORMAT', default='best',
help='Subtitle format, accepts formats preference, for example: "srt" or "ass/srt/best"')
subtitles.add_option(
'--sub-lang', '--sub-langs', '--srt-lang',
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
default=[], callback=_comma_separated_values_options_callback,
help='Languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
downloader = optparse.OptionGroup(parser, 'Download Options')
downloader.add_option(
'-r', '--rate-limit',
dest='ratelimit', metavar='LIMIT',
help='Maximum download rate in bytes per second (e.g. 50K or 4.2M)')
downloader.add_option(
'-R', '--retries',
dest='retries', metavar='RETRIES', default=10,
help='Number of retries (default is %default), or "infinite".')
downloader.add_option(
'--buffer-size',
dest='buffersize', metavar='SIZE', default='1024',
help='Size of download buffer (e.g. 1024 or 16K) (default is %default)')
downloader.add_option(
'--no-resize-buffer',
action='store_true', dest='noresizebuffer', default=False,
help='Do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.')
downloader.add_option(
'--test',
action='store_true', dest='test', default=False,
help=optparse.SUPPRESS_HELP)
downloader.add_option(
'--playlist-reverse',
action='store_true',
help='Download playlist videos in reverse order')
downloader.add_option(
'--xattr-set-filesize',
dest='xattr_set_filesize', action='store_true',
help='Set file xattribute ytdl.filesize with expected filesize (experimental)')
downloader.add_option(
'--hls-prefer-native',
dest='hls_prefer_native', action='store_true',
help='Use the native HLS downloader instead of ffmpeg (experimental)')
downloader.add_option(
'--external-downloader',
dest='external_downloader', metavar='COMMAND',
help='Use the specified external downloader. '
'Currently supports %s' % ','.join(list_external_downloaders()))
downloader.add_option(
'--external-downloader-args',
dest='external_downloader_args', metavar='ARGS',
help='Give these arguments to the external downloader')
workarounds = optparse.OptionGroup(parser, 'Workarounds')
workarounds.add_option(
'--encoding',
dest='encoding', metavar='ENCODING',
help='Force the specified encoding (experimental)')
workarounds.add_option(
'--no-check-certificate',
action='store_true', dest='no_check_certificate', default=False,
help='Suppress HTTPS certificate validation')
workarounds.add_option(
'--prefer-insecure',
'--prefer-unsecure', action='store_true', dest='prefer_insecure',
help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
workarounds.add_option(
'--user-agent',
metavar='UA', dest='user_agent',
help='Specify a custom user agent')
workarounds.add_option(
'--referer',
metavar='URL', dest='referer', default=None,
help='Specify a custom referer, use if the video access is restricted to one domain',
)
workarounds.add_option(
'--add-header',
metavar='FIELD:VALUE', dest='headers', action='append',
help='Specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
)
workarounds.add_option(
'--bidi-workaround',
dest='bidi_workaround', action='store_true',
help='Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
workarounds.add_option(
'--sleep-interval', metavar='SECONDS',
dest='sleep_interval', type=float,
help='Number of seconds to sleep before each download.')
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
verbosity.add_option(
'-q', '--quiet',
action='store_true', dest='quiet', default=False,
help='Activate quiet mode')
verbosity.add_option(
'--no-warnings',
dest='no_warnings', action='store_true', default=False,
help='Ignore warnings')
verbosity.add_option(
'-s', '--simulate',
action='store_true', dest='simulate', default=False,
help='Do not download the video and do not write anything to disk')
verbosity.add_option(
'--skip-download',
action='store_true', dest='skip_download', default=False,
help='Do not download the video')
verbosity.add_option(
'-g', '--get-url',
action='store_true', dest='geturl', default=False,
help='Simulate, quiet but print URL')
verbosity.add_option(
'-e', '--get-title',
action='store_true', dest='gettitle', default=False,
help='Simulate, quiet but print title')
verbosity.add_option(
'--get-id',
action='store_true', dest='getid', default=False,
help='Simulate, quiet but print id')
verbosity.add_option(
'--get-thumbnail',
action='store_true', dest='getthumbnail', default=False,
help='Simulate, quiet but print thumbnail URL')
verbosity.add_option(
'--get-description',
action='store_true', dest='getdescription', default=False,
help='Simulate, quiet but print video description')
verbosity.add_option(
'--get-duration',
action='store_true', dest='getduration', default=False,
help='Simulate, quiet but print video length')
verbosity.add_option(
'--get-filename',
action='store_true', dest='getfilename', default=False,
help='Simulate, quiet but print output filename')
verbosity.add_option(
'--get-format',
action='store_true', dest='getformat', default=False,
help='Simulate, quiet but print output format')
verbosity.add_option(
'-j', '--dump-json',
action='store_true', dest='dumpjson', default=False,
help='Simulate, quiet but print JSON information. See --output for a description of available keys.')
verbosity.add_option(
'-J', '--dump-single-json',
action='store_true', dest='dump_single_json', default=False,
help='Simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.')
verbosity.add_option(
'--print-json',
action='store_true', dest='print_json', default=False,
help='Be quiet and print the video information as JSON (video is still being downloaded).',
)
verbosity.add_option(
'--newline',
action='store_true', dest='progress_with_newline', default=False,
help='Output progress bar as new lines')
verbosity.add_option(
'--no-progress',
action='store_true', dest='noprogress', default=False,
help='Do not print progress bar')
verbosity.add_option(
'--console-title',
action='store_true', dest='consoletitle', default=False,
help='Display progress in console titlebar')
verbosity.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Print various debugging information')
verbosity.add_option(
'--dump-pages', '--dump-intermediate-pages',
action='store_true', dest='dump_intermediate_pages', default=False,
help='Print downloaded pages encoded using base64 to debug problems (very verbose)')
verbosity.add_option(
'--write-pages',
action='store_true', dest='write_pages', default=False,
help='Write downloaded intermediary pages to files in the current directory to debug problems')
verbosity.add_option(
'--youtube-print-sig-code',
action='store_true', dest='youtube_print_sig_code', default=False,
help=optparse.SUPPRESS_HELP)
verbosity.add_option(
'--print-traffic', '--dump-headers',
dest='debug_printtraffic', action='store_true', default=False,
help='Display sent and read HTTP traffic')
verbosity.add_option(
'-C', '--call-home',
dest='call_home', action='store_true', default=False,
help='Contact the youtube-dl server for debugging')
verbosity.add_option(
'--no-call-home',
dest='call_home', action='store_false', default=False,
help='Do NOT contact the youtube-dl server for debugging')
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
filesystem.add_option(
'-a', '--batch-file',
dest='batchfile', metavar='FILE',
help='File containing URLs to download (\'-\' for stdin)')
filesystem.add_option(
'--id', default=False,
action='store_true', dest='useid', help='Use only video ID in file name')
filesystem.add_option(
'-o', '--output',
dest='outtmpl', metavar='TEMPLATE',
help=('Output filename template. Use %(title)s to get the title, '
'%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
'%(autonumber)s to get an automatically incremented number, '
'%(ext)s for the filename extension, '
'%(format)s for the format description (like "22 - 1280x720" or "HD"), '
'%(format_id)s for the unique id of the format (like YouTube\'s itags: "137"), '
'%(upload_date)s for the upload date (YYYYMMDD), '
'%(extractor)s for the provider (youtube, metacafe, etc), '
'%(id)s for the video id, '
'%(playlist_title)s, %(playlist_id)s, or %(playlist)s (=title if present, ID otherwise) for the playlist the video is in, '
'%(playlist_index)s for the position in the playlist. '
'%(height)s and %(width)s for the width and height of the video format. '
'%(resolution)s for a textual description of the resolution of the video format. '
'%% for a literal percent. '
'Use - to output to stdout. Can also be used to download to a different directory, '
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
filesystem.add_option(
'--autonumber-size',
dest='autonumber_size', metavar='NUMBER',
help='Specify the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
filesystem.add_option(
'--restrict-filenames',
action='store_true', dest='restrictfilenames', default=False,
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames')
filesystem.add_option(
'-A', '--auto-number',
action='store_true', dest='autonumber', default=False,
help='[deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000')
filesystem.add_option(
'-t', '--title',
action='store_true', dest='usetitle', default=False,
help='[deprecated] Use title in file name (default)')
filesystem.add_option(
'-l', '--literal', default=False,
action='store_true', dest='usetitle',
help='[deprecated] Alias of --title')
filesystem.add_option(
'-w', '--no-overwrites',
action='store_true', dest='nooverwrites', default=False,
help='Do not overwrite files')
filesystem.add_option(
'-c', '--continue',
action='store_true', dest='continue_dl', default=True,
help='Force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.')
filesystem.add_option(
'--no-continue',
action='store_false', dest='continue_dl',
help='Do not resume partially downloaded files (restart from beginning)')
filesystem.add_option(
'--no-part',
action='store_true', dest='nopart', default=False,
help='Do not use .part files - write directly into output file')
filesystem.add_option(
'--no-mtime',
action='store_false', dest='updatetime', default=True,
help='Do not use the Last-modified header to set the file modification time')
filesystem.add_option(
'--write-description',
action='store_true', dest='writedescription', default=False,
help='Write video description to a .description file')
filesystem.add_option(
'--write-info-json',
action='store_true', dest='writeinfojson', default=False,
help='Write video metadata to a .info.json file')
filesystem.add_option(
'--write-annotations',
action='store_true', dest='writeannotations', default=False,
help='Write video annotations to a .annotations.xml file')
filesystem.add_option(
'--load-info',
dest='load_info_filename', metavar='FILE',
help='JSON file containing the video information (created with the "--write-info-json" option)')
filesystem.add_option(
'--cookies',
dest='cookiefile', metavar='FILE',
help='File to read cookies from and dump cookie jar in')
filesystem.add_option(
'--cache-dir', dest='cachedir', default=None, metavar='DIR',
help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
filesystem.add_option(
'--no-cache-dir', action='store_const', const=False, dest='cachedir',
help='Disable filesystem caching')
filesystem.add_option(
'--rm-cache-dir',
action='store_true', dest='rm_cachedir',
help='Delete all filesystem cache files')
thumbnail = optparse.OptionGroup(parser, 'Thumbnail images')
thumbnail.add_option(
'--write-thumbnail',
action='store_true', dest='writethumbnail', default=False,
help='Write thumbnail image to disk')
thumbnail.add_option(
'--write-all-thumbnails',
action='store_true', dest='write_all_thumbnails', default=False,
help='Write all thumbnail image formats to disk')
thumbnail.add_option(
'--list-thumbnails',
action='store_true', dest='list_thumbnails', default=False,
help='Simulate and list all available thumbnail formats')
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
postproc.add_option(
'-x', '--extract-audio',
action='store_true', dest='extractaudio', default=False,
help='Convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
postproc.add_option(
'--audio-format', metavar='FORMAT', dest='audioformat', default='best',
help='Specify audio format: "best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "%default" by default')
postproc.add_option(
'--audio-quality', metavar='QUALITY',
dest='audioquality', default='5',
help='Specify ffmpeg/avconv audio quality, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)')
postproc.add_option(
'--recode-video',
metavar='FORMAT', dest='recodevideo', default=None,
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv|avi)')
postproc.add_option(
'--postprocessor-args',
dest='postprocessor_args', metavar='ARGS',
help='Give these arguments to the postprocessor')
postproc.add_option(
'-k', '--keep-video',
action='store_true', dest='keepvideo', default=False,
help='Keep the video file on disk after the post-processing; the video is erased by default')
postproc.add_option(
'--no-post-overwrites',
action='store_true', dest='nopostoverwrites', default=False,
help='Do not overwrite post-processed files; the post-processed files are overwritten by default')
postproc.add_option(
'--embed-subs',
action='store_true', dest='embedsubtitles', default=False,
help='Embed subtitles in the video (only for mkv and mp4 videos)')
postproc.add_option(
'--embed-thumbnail',
action='store_true', dest='embedthumbnail', default=False,
help='Embed thumbnail in the audio as cover art')
postproc.add_option(
'--add-metadata',
action='store_true', dest='addmetadata', default=False,
help='Write metadata to the video file')
postproc.add_option(
'--metadata-from-title',
metavar='FORMAT', dest='metafromtitle',
help='Parse additional metadata like song title / artist from the video title. '
'The format syntax is the same as --output, '
'the parsed parameters replace existing values. '
'Additional templates: %(album)s, %(artist)s. '
'Example: --metadata-from-title "%(artist)s - %(title)s" matches a title like '
'"Coldplay - Paradise"')
postproc.add_option(
'--xattrs',
action='store_true', dest='xattrs', default=False,
help='Write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
postproc.add_option(
'--fixup',
metavar='POLICY', dest='fixup', default='detect_or_warn',
help='Automatically correct known faults of the file. '
'One of never (do nothing), warn (only emit a warning), '
'detect_or_warn (the default; fix file if we can, warn otherwise)')
postproc.add_option(
'--prefer-avconv',
action='store_false', dest='prefer_ffmpeg',
help='Prefer avconv over ffmpeg for running the postprocessors (default)')
postproc.add_option(
'--prefer-ffmpeg',
action='store_true', dest='prefer_ffmpeg',
help='Prefer ffmpeg over avconv for running the postprocessors')
postproc.add_option(
'--ffmpeg-location', '--avconv-location', metavar='PATH',
dest='ffmpeg_location',
help='Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory.')
postproc.add_option(
'--exec',
metavar='CMD', dest='exec_cmd',
help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
postproc.add_option(
'--convert-subtitles', '--convert-subs',
metavar='FORMAT', dest='convertsubtitles', default=None,
help='Convert the subtitles to other format (currently supported: srt|ass|vtt)')
parser.add_option_group(general)
parser.add_option_group(network)
parser.add_option_group(selection)
parser.add_option_group(downloader)
parser.add_option_group(filesystem)
parser.add_option_group(thumbnail)
parser.add_option_group(verbosity)
parser.add_option_group(workarounds)
parser.add_option_group(video_format)
parser.add_option_group(subtitles)
parser.add_option_group(authentication)
parser.add_option_group(postproc)
if overrideArguments is not None:
opts, args = parser.parse_args(overrideArguments)
if opts.verbose:
write_string('[debug] Override config: ' + repr(overrideArguments) + '\n')
else:
def compat_conf(conf):
if sys.version_info < (3,):
return [a.decode(preferredencoding(), 'replace') for a in conf]
return conf
command_line_conf = compat_conf(sys.argv[1:])
if '--ignore-config' in command_line_conf:
system_conf = []
user_conf = []
else:
system_conf = compat_conf(_readOptions('/etc/youtube-dl.conf'))
if '--ignore-config' in system_conf:
user_conf = []
else:
user_conf = compat_conf(_readUserConf())
argv = system_conf + user_conf + command_line_conf
opts, args = parser.parse_args(argv)
if opts.verbose:
write_string('[debug] System config: ' + repr(_hide_login_info(system_conf)) + '\n')
write_string('[debug] User config: ' + repr(_hide_login_info(user_conf)) + '\n')
write_string('[debug] Command-line args: ' + repr(_hide_login_info(command_line_conf)) + '\n')
return parser, opts, args
| lulufei/youtube-dl | youtube_dl/options.py | Python | unlicense | 36,996 | 0.002054 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for metadata service."""
import base64
from copy import copy
import json
import re
import webob
from nova.api.metadata import base
from nova.api.metadata import handler
from nova import block_device
from nova import db
from nova.db.sqlalchemy import api
from nova import exception
from nova.network import api as network_api
from nova.openstack.common import cfg
from nova import test
from nova.tests import fake_network
CONF = cfg.CONF
USER_DATA_STRING = ("This is an encoded string")
ENCODE_USER_DATA_STRING = base64.b64encode(USER_DATA_STRING)
INSTANCES = (
{'id': 1,
'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2',
'name': 'fake',
'project_id': 'test',
'key_name': "mykey",
'key_data': "ssh-rsa AAAAB3Nzai....N3NtHw== someuser@somehost",
'host': 'test',
'launch_index': 1,
'instance_type': {'name': 'm1.tiny'},
'reservation_id': 'r-xxxxxxxx',
'user_data': ENCODE_USER_DATA_STRING,
'image_ref': 7,
'vcpus': 1,
'fixed_ips': [],
'root_device_name': '/dev/sda1',
'info_cache': {'network_info': []},
'hostname': 'test.novadomain',
'display_name': 'my_displayname',
},
)
def return_non_existing_address(*args, **kwarg):
raise exception.NotFound()
def fake_InstanceMetadata(stubs, inst_data, address=None,
sgroups=None, content=[], extra_md={}):
if sgroups is None:
sgroups = [{'name': 'default'}]
def sg_get(*args, **kwargs):
return sgroups
stubs.Set(api, 'security_group_get_by_instance', sg_get)
return base.InstanceMetadata(inst_data, address=address,
content=content, extra_md=extra_md)
def fake_request(stubs, mdinst, relpath, address="127.0.0.1",
fake_get_metadata=None, headers=None,
fake_get_metadata_by_instance_id=None):
def get_metadata_by_remote_address(address):
return mdinst
app = handler.MetadataRequestHandler()
if fake_get_metadata is None:
fake_get_metadata = get_metadata_by_remote_address
if stubs:
stubs.Set(app, 'get_metadata_by_remote_address', fake_get_metadata)
if fake_get_metadata_by_instance_id:
stubs.Set(app, 'get_metadata_by_instance_id',
fake_get_metadata_by_instance_id)
request = webob.Request.blank(relpath)
request.remote_addr = address
if headers is not None:
request.headers.update(headers)
response = request.get_response(app)
return response
class MetadataTestCase(test.TestCase):
def setUp(self):
super(MetadataTestCase, self).setUp()
self.instance = INSTANCES[0]
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
spectacular=True)
def test_user_data(self):
inst = copy(self.instance)
inst['user_data'] = base64.b64encode("happy")
md = fake_InstanceMetadata(self.stubs, inst)
self.assertEqual(
md.get_ec2_metadata(version='2009-04-04')['user-data'], "happy")
def test_no_user_data(self):
inst = copy(self.instance)
del inst['user_data']
md = fake_InstanceMetadata(self.stubs, inst)
obj = object()
self.assertEqual(
md.get_ec2_metadata(version='2009-04-04').get('user-data', obj),
obj)
def test_security_groups(self):
inst = copy(self.instance)
sgroups = [{'name': 'default'}, {'name': 'other'}]
expected = ['default', 'other']
md = fake_InstanceMetadata(self.stubs, inst, sgroups=sgroups)
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['security-groups'], expected)
def test_local_hostname_fqdn(self):
md = fake_InstanceMetadata(self.stubs, copy(self.instance))
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-hostname'],
"%s.%s" % (self.instance['hostname'], CONF.dhcp_domain))
def test_format_instance_mapping(self):
"""Make sure that _format_instance_mappings works"""
ctxt = None
instance_ref0 = {'id': 0,
'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
'root_device_name': None}
instance_ref1 = {'id': 0,
'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2',
'root_device_name': '/dev/sda1'}
def fake_bdm_get(ctxt, uuid):
return [{'volume_id': 87654321,
'snapshot_id': None,
'no_device': None,
'virtual_name': None,
'delete_on_termination': True,
'device_name': '/dev/sdh'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'swap',
'delete_on_termination': None,
'device_name': '/dev/sdc'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'ephemeral0',
'delete_on_termination': None,
'device_name': '/dev/sdb'}]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_bdm_get)
expected = {'ami': 'sda1',
'root': '/dev/sda1',
'ephemeral0': '/dev/sdb',
'swap': '/dev/sdc',
'ebs0': '/dev/sdh'}
self.assertEqual(base._format_instance_mapping(ctxt, instance_ref0),
block_device._DEFAULT_MAPPINGS)
self.assertEqual(base._format_instance_mapping(ctxt, instance_ref1),
expected)
def test_pubkey(self):
md = fake_InstanceMetadata(self.stubs, copy(self.instance))
pubkey_ent = md.lookup("/2009-04-04/meta-data/public-keys")
self.assertEqual(base.ec2_md_print(pubkey_ent),
"0=%s" % self.instance['key_name'])
self.assertEqual(base.ec2_md_print(pubkey_ent['0']['openssh-key']),
self.instance['key_data'])
def test_image_type_ramdisk(self):
inst = copy(self.instance)
inst['ramdisk_id'] = 'ari-853667c0'
md = fake_InstanceMetadata(self.stubs, inst)
data = md.lookup("/latest/meta-data/ramdisk-id")
self.assertTrue(data is not None)
self.assertTrue(re.match('ari-[0-9a-f]{8}', data))
def test_image_type_kernel(self):
inst = copy(self.instance)
inst['kernel_id'] = 'aki-c2e26ff2'
md = fake_InstanceMetadata(self.stubs, inst)
data = md.lookup("/2009-04-04/meta-data/kernel-id")
self.assertTrue(re.match('aki-[0-9a-f]{8}', data))
self.assertEqual(
md.lookup("/ec2/2009-04-04/meta-data/kernel-id"), data)
del inst['kernel_id']
md = fake_InstanceMetadata(self.stubs, inst)
self.assertRaises(base.InvalidMetadataPath,
md.lookup, "/2009-04-04/meta-data/kernel-id")
def test_check_version(self):
inst = copy(self.instance)
md = fake_InstanceMetadata(self.stubs, inst)
self.assertTrue(md._check_version('1.0', '2009-04-04'))
self.assertFalse(md._check_version('2009-04-04', '1.0'))
self.assertFalse(md._check_version('2009-04-04', '2008-09-01'))
self.assertTrue(md._check_version('2008-09-01', '2009-04-04'))
self.assertTrue(md._check_version('2009-04-04', '2009-04-04'))
class OpenStackMetadataTestCase(test.TestCase):
def setUp(self):
super(OpenStackMetadataTestCase, self).setUp()
self.instance = INSTANCES[0]
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
spectacular=True)
def test_top_level_listing(self):
# request for /openstack/<version>/ should show metadata.json
inst = copy(self.instance)
mdinst = fake_InstanceMetadata(self.stubs, inst)
listing = mdinst.lookup("/openstack/")
result = mdinst.lookup("/openstack")
# trailing / should not affect anything
self.assertEqual(result, mdinst.lookup("/openstack"))
# the 'content' should not show up in directory listing
self.assertTrue(base.CONTENT_DIR not in result)
self.assertTrue('2012-08-10' in result)
self.assertTrue('latest' in result)
def test_version_content_listing(self):
# request for /openstack/<version>/ should show metadata.json
inst = copy(self.instance)
mdinst = fake_InstanceMetadata(self.stubs, inst)
listing = mdinst.lookup("/openstack/2012-08-10")
self.assertTrue("meta_data.json" in listing)
def test_metadata_json(self):
inst = copy(self.instance)
content = [
('/etc/my.conf', "content of my.conf"),
('/root/hello', "content of /root/hello"),
]
mdinst = fake_InstanceMetadata(self.stubs, inst,
content=content)
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
mdjson = mdinst.lookup("/openstack/latest/meta_data.json")
mddict = json.loads(mdjson)
self.assertEqual(mddict['uuid'], self.instance['uuid'])
self.assertTrue('files' in mddict)
self.assertTrue('public_keys' in mddict)
self.assertEqual(mddict['public_keys'][self.instance['key_name']],
self.instance['key_data'])
self.assertTrue('launch_index' in mddict)
self.assertEqual(mddict['launch_index'], self.instance['launch_index'])
# verify that each of the things we put in content
# resulted in an entry in 'files', that their content
# there is as expected, and that /content lists them.
for (path, content) in content:
fent = [f for f in mddict['files'] if f['path'] == path]
self.assertTrue((len(fent) == 1))
fent = fent[0]
found = mdinst.lookup("/openstack%s" % fent['content_path'])
self.assertEqual(found, content)
def test_extra_md(self):
# make sure extra_md makes it through to metadata
inst = copy(self.instance)
extra = {'foo': 'bar', 'mylist': [1, 2, 3],
'mydict': {"one": 1, "two": 2}}
mdinst = fake_InstanceMetadata(self.stubs, inst, extra_md=extra)
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
mddict = json.loads(mdjson)
for key, val in extra.iteritems():
self.assertEqual(mddict[key], val)
def test_userdata(self):
inst = copy(self.instance)
mdinst = fake_InstanceMetadata(self.stubs, inst)
userdata_found = mdinst.lookup("/openstack/2012-08-10/user_data")
self.assertEqual(USER_DATA_STRING, userdata_found)
# since we had user-data in this instance, it should be in listing
self.assertTrue('user_data' in mdinst.lookup("/openstack/2012-08-10"))
del inst['user_data']
mdinst = fake_InstanceMetadata(self.stubs, inst)
# since this instance had no user-data it should not be there.
self.assertFalse('user-data' in mdinst.lookup("/openstack/2012-08-10"))
self.assertRaises(base.InvalidMetadataPath,
mdinst.lookup, "/openstack/2012-08-10/user_data")
class MetadataHandlerTestCase(test.TestCase):
"""Test that metadata is returning proper values."""
def setUp(self):
super(MetadataHandlerTestCase, self).setUp()
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
spectacular=True)
self.instance = INSTANCES[0]
self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
address=None, sgroups=None)
def test_root(self):
expected = "\n".join(base.VERSIONS) + "\nlatest"
response = fake_request(self.stubs, self.mdinst, "/")
self.assertEqual(response.body, expected)
response = fake_request(self.stubs, self.mdinst, "/foo/../")
self.assertEqual(response.body, expected)
def test_version_root(self):
response = fake_request(self.stubs, self.mdinst, "/2009-04-04")
self.assertEqual(response.body, 'meta-data/\nuser-data')
response = fake_request(self.stubs, self.mdinst, "/9999-99-99")
self.assertEqual(response.status_int, 404)
def test_user_data_non_existing_fixed_address(self):
self.stubs.Set(network_api.API, 'get_fixed_ip_by_address',
return_non_existing_address)
response = fake_request(None, self.mdinst, "/2009-04-04/user-data",
"127.1.1.1")
self.assertEqual(response.status_int, 404)
def test_fixed_address_none(self):
response = fake_request(None, self.mdinst,
relpath="/2009-04-04/user-data", address=None)
self.assertEqual(response.status_int, 500)
def test_invalid_path_is_404(self):
response = fake_request(self.stubs, self.mdinst,
relpath="/2009-04-04/user-data-invalid")
self.assertEqual(response.status_int, 404)
def test_user_data_with_use_forwarded_header(self):
expected_addr = "192.192.192.2"
def fake_get_metadata(address):
if address == expected_addr:
return self.mdinst
else:
raise Exception("Expected addr of %s, got %s" %
(expected_addr, address))
self.flags(use_forwarded_for=True)
response = fake_request(self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="168.168.168.1",
fake_get_metadata=fake_get_metadata,
headers={'X-Forwarded-For': expected_addr})
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body,
base64.b64decode(self.instance['user_data']))
response = fake_request(self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="168.168.168.1",
fake_get_metadata=fake_get_metadata,
headers=None)
self.assertEqual(response.status_int, 500)
def test_user_data_with_quantum_instance_id(self):
expected_instance_id = 'a-b-c-d'
def fake_get_metadata(instance_id, remote_address):
if instance_id == expected_instance_id:
return self.mdinst
else:
# raise the exception to aid with 500 response code test
raise Exception("Expected instance_id of %s, got %s" %
(expected_instance_id, instance_id))
signed = ('d98d0dd53b026a24df2c06b464ffa5da'
'db922ae41af7bd3ecc3cae75aef65771')
# try a request with service disabled
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
headers={'X-Instance-ID': 'a-b-c-d',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 200)
# now enable the service
self.flags(service_quantum_metadata_proxy=True)
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=fake_get_metadata,
headers={'X-Instance-ID': 'a-b-c-d',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body,
base64.b64decode(self.instance['user_data']))
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=fake_get_metadata,
headers={'X-Instance-ID': 'a-b-c-d',
'X-Instance-ID-Signature': ''})
self.assertEqual(response.status_int, 403)
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=fake_get_metadata,
headers={'X-Instance-ID': 'z-z-z-z',
'X-Instance-ID-Signature': '81f42e3fc77ba3a3e8d83142746e0'
'8387b96cbc5bd2474665192d2ec28'
'8ffb67'})
self.assertEqual(response.status_int, 500)
| aristanetworks/arista-ovs-nova | nova/tests/test_metadata.py | Python | apache-2.0 | 17,823 | 0.000673 |
import os
from scytale import create_app
from scytale.ciphers import MixedAlphabet, Playfair, Fleissner, Trifid, Myszkowski
from scytale.models import db, Group, Message
from scytale.forms import MessageForm
def create_group(name):
print("Creating admin group ({})".format(name))
group = Group()
group.name = name
group.set_password(os.environ["ADMIN_PASSWORD"])
return group
def create_message(group, cipher, key, plaintext, ciphertext):
print("Creating message:")
print(" Group:", group.name)
print(" Cipher:", cipher)
print(" Key:", key)
print(" Plaintext:", plaintext)
print(" Ciphertext:", ciphertext)
form = MessageForm(cipher=cipher, key=key, plaintext=plaintext, ciphertext=ciphertext, csrf_enabled=False)
if form.validate():
m = Message()
m.group = group
m.cipher = cipher
m.key = key
m.plaintext = plaintext
m.ciphertext = ciphertext
return m
raise Exception('Invalid message: ' + str(form.errors))
def create_admin_messages(group):
# yield create_message(
# group,"Checkerboard", "RAIN OTS EQWYUPDFGHJKLZXCVBM_.",
# "WELCOME TO VILLIERS PARK",
# "419818458798865888528181290788441080")
# yield create_message(
# group, "Checkerboard", "RAIN OTS EQWYUPDFGHJKLZXCVBM_.",
# "I THINK YOU MEAN DRAUGHTS BOARD",
# "419818458798865888528181290788441080")
# yield create_message(
# group, "Mixed Alphabet", "QWERTYUIOPASDFG_HJKLZXCVBNM",
# "WELCOME TO VILLIERS PARK",
# "CTSEGDTMLGMXOSSOTJKM QJA")
# yield create_message(
# group, "Mixed Alphabet", "QWERTYUIOPASDFG_HJKLZXCVBNM",
# "BETTER THAN CAESAR",
# "WTLLTJMLIQFMEQTKQJ")
yield create_message(
group, "Playfair", "ILKENCRYPTOABDFGHMQSUVWXZ",
"WELCOME TO VILLIERS PARK",
"XKIRBGNPAULKKLLPQTHAEW")
yield create_message(
group, "Playfair", "ILKENCRYPTOABDFGHMQSUVWXZ",
"YOU SHOULD ALWAYS PLAYFAIR",
"CBZGGAVIFBKVBRQTRHTBOLPV")
yield create_message(
group, "Trifid", "QWERTYUIOPASDFGHJKLZXCVBNM_",
"WELCOME TO VILLIERS PARK",
"QBZMUILSEOLXXQVTCZMMRCNY")
yield create_message(
group, "Trifid", "QWERTYUIOPASDFGHJKLZXCVBNM_",
"THE DAY OF THE TRIFIDS",
"CHBVOGVWZYSPUPFXSMHMAY")
yield create_message(
group, "Fleissner",
"XooXooooooXoXoooXoooXXoXoooooooooXoXoooXooooXoooXoXoooXXoooooooo",
"WELCOME TO VILLIERS PARK",
"WEXEXRXSXXL CXXXOPXXMEA XXXRXXXKXTXOXXX XXXXVXXXIXLXXXLIXXXXXXXX")
yield create_message(
group, "Fleissner",
"XooXooooooXoXoooXoooXXoXoooooooooXoXoooXooooXoooXoXoooXXoooooooo",
"FLEISSNER IS A FUNNY NAME",
"FUXLXNXNXXEYIXXXS XXSNNEXXXAXXXMXRE XXXIXXXXSXXX XAXXX FXXXXXXXX")
# yield create_message(
# group, "Rail Fence", "5",
# "WELCOME TO VILLIERS PARK",
# "WTEE OIRKLE LSRCMVL AOIP")
# yield create_message(
# group, "Rail Fence", "5",
# "RAIL FENCE CIPHERS RULE",
# "RCRANEESIE H ELFCPRL IU")
yield create_message(
group, "Myszkowski", "VILLIERS",
"WELCOME TO VILLIERS PARK",
"MLAEOOIRPLC VS ELR IKWTE")
yield create_message(
group, "Myszkowski", "VILLIERS",
"HOW DO YOU SPELL MYSZKOWSKI",
"OEK ODUPMZK W SYSI LO YLW HO S")
# yield create_message(
# group, "Permutation", "VILLIERS",
# "WELCOME TO VILLIERS PARK",
# "MEOLCE WLOI VLITARPS RKE")
# yield create_message(
# group, "Permutation", "VILLIERS",
# "ISNT THAT A HAIRSTYLE",
# "TS NTHAIA HA IRT TEYL S")
def create_h4x0r_messages(group):
# Short messages that can be brute forced
messages = [
"BLACK", "BLUE", "BROWN", "GREEN", "ORANGE", "PINK", "PURPLE", "RED",
"WHITE", "YELLOW"
]
cipher = MixedAlphabet(key="FNGZPOXKT_HDLWEMQJRVCSYIBUA")
for message in messages:
yield create_message(
group,
"Mixed Alphabet",
cipher.key,
message,
cipher.encrypt(message))
# Long message that can be frequency analysed
cipher = MixedAlphabet(key="IGLKWSREJDCANUFBZYP_THMVXQO")
message = "ALICE WAS BEGINNING TO GET VERY TIRED OF SITTING BY HER SISTER ON THE BANK AND OF HAVING NOTHING TO DO ONCE OR TWICE SHE HAD PEEPED INTO THE BOOK HER SISTER WAS READING BUT IT HAD NO PICTURES OR CONVERSATIONS IN IT AND WHAT IS THE USE OF A BOOK THOUGHT ALICE WITHOUT PICTURES OR CONVERSATION"
yield create_message(
group,
"Mixed Alphabet",
cipher.key,
message,
cipher.encrypt(message))
# Messages that build on each other to construct the key
messages = [
"FIRST A LONG MESSAGE THAT HAS MOST LETTERS IN IT NOT ALL BUT MOST",
"THEN A VERY SHORT MESSAGE",
"FOLLOWED BY AN EXCEPTIONAL ONE"
]
cipher = MixedAlphabet(key="AXT UWVCFGIMBOSNHZRYKEDJLPQ")
for message in messages:
yield create_message(
group,
"Mixed Alphabet",
cipher.key,
message,
cipher.encrypt(message))
def create_activity_messages(group, ciphers, trifid, messages):
for c, pt in zip(ciphers * 2, messages):
yield create_message(
group,
cipher=c.name,
key=c.key,
plaintext=pt,
ciphertext=c.encrypt(pt))
pt = 'The {} key is {}'.format(ciphers[1].name, ciphers[1].key)
yield create_message(
group,
cipher=ciphers[0].name,
key=ciphers[0].key,
plaintext=pt,
ciphertext=ciphers[0].encrypt(pt))
pt = 'The {} key is {}'.format(ciphers[2].name, ciphers[2].key)
yield create_message(
group,
cipher=ciphers[1].name,
key=ciphers[1].key,
plaintext=pt,
ciphertext=ciphers[1].encrypt(pt))
pt = 'Line one of the Trifid key is {}'.format(trifid.key[0:9])
yield create_message(
group,
cipher=ciphers[0].name,
key=ciphers[0].key,
plaintext=pt,
ciphertext=ciphers[0].encrypt(pt))
pt = 'Line two of the Trifid key is {}'.format(trifid.key[9:18])
yield create_message(
group,
cipher=ciphers[1].name,
key=ciphers[1].key,
plaintext=pt,
ciphertext=ciphers[1].encrypt(pt))
pt = 'Line three of the Trifid key is {}'.format(trifid.key[18:27])
yield create_message(
group,
cipher=ciphers[2].name,
key=ciphers[2].key,
plaintext=pt,
ciphertext=ciphers[2].encrypt(pt))
pt = 'CORRECT HORSE BATTERY STAPLE'
# Don't return this one, don't want it on the site, where known plaintext could be used.
create_message(
group,
cipher='Trifid',
key=trifid.key,
plaintext=pt,
ciphertext=trifid.encrypt(pt))
def create_babbage_messages(group):
playfair = Playfair(key='SCQOGUERPBKVWYZFDAHIMTLNX')
myszkowski = Myszkowski(key='ENGINE')
fleissner = Fleissner(key='ooooooXoooooXooXooooooooXoXoXoXoooooooooXXXoXooXooXoooXoXoooXooo')
trifid = Trifid(key='HFERKDGNOQVJTMYP_AXSUBZWLIC')
messages = [
'CHARLES BABBAGE WAS BORN IN THE YEAR SEVENTEEN HUNDRED AND NINETY ONE',
'BABBAGE INVENTED THE IDEA OF A DIGITAL PROGRAMMABLE COMPUTER',
'CB IS CONSIDERED TO BE ONE OF THE FATHERS OF THE COMPUTER',
'HIS MOST FAMOUS INVENTION IS THE DIFFERENCE ENGINE',
'ON THE ECONOMY OF MACHINERY AND MANUFACTURES',
'BABBAGE CRACKED THE UNCRACKABLE VIGENERE CIPHER',
]
yield from create_activity_messages(group, [playfair, myszkowski, fleissner], trifid, messages)
def create_friedman_messages(group):
playfair = Playfair(key='ADUQYKOMRETGVFHNWLXZCIPBS')
myszkowski = Myszkowski(key='INDIANA')
fleissner = Fleissner(key='oooXooooXXoXXooXXXooooXooooooooooooXoXoXXoXoXooooooooooooooooooX')
trifid = Trifid(key='V_WLNJDOGRMHIPXKYQSETBZUAFC')
messages = [
'AMERICAS FIRST FEMALE CRYPTANALYST',
'FRIEDMAN WAS BORN IN EIGHTEEN NINETY TWO',
'CRACKED CODES AT RIVERBANK DURING WORLD WAR ONE',
'USED CRYPTANALYSIS TO STOP SMUGGLING AND BOOTLEGGING',
'SHE WORKED FOR THE US NAVY THE TREASURY DEPARTMENT AND THE COAST GUARD',
'THE SHAKESPEAREAN CIPHERS EXAMINED'
]
yield from create_activity_messages(group, [myszkowski, fleissner, playfair], trifid, messages)
def create_driscoll_messages(group):
playfair = Playfair(key='CTOEFMUHYISAWNGQRZXLDKBVP')
myszkowski = Myszkowski(key='ILLINOIS')
fleissner = Fleissner(key='oooooooXXoooXoooXoXooooXXooooXoXooooXXooooooooXoXXooXXoooooooooo')
trifid = Trifid(key='VAMZWXKSYONFTDUCHIBERGPJL_Q')
messages = [
'AGNES MEYER DRISCOLL WAS BORN IN EIGHTEEN EIGHTY NINE',
'SHE WAS ALSO KNOWN AS MADAME X',
'SHE WAS WITHOUT PEER AS A CRYPTANALYST',
'DRISCOLL WORKED FOR THE US NAVY IN WORLD WAR ONE AND TWO',
'SHE IS IN THE NATIONAL SECURITY AGENCYS HALL OF HONOR',
'AGNES CRACKED JAPANESE NAVAL CODES INCLUDING THE RED AND BLUE BOOK CODES'
]
yield from create_activity_messages(group, [fleissner, playfair, myszkowski], trifid, messages)
def create_tutte_messages(group):
playfair = Playfair(key='KQSGLRYTWEUXBFPVDHMNZOCAI')
myszkowski = Myszkowski(key='TUNNY')
fleissner = Fleissner(key='oXXXooXXooXoXXooooooXooooooXoooooXooooooooooXXoXoXoooooooooXoooo')
trifid = Trifid(key='_AWCDPYSEKQORNHBTLGJMVFIZXU')
messages = [
'WILLIAM THOMAS TUTTE WORKED AT BLETCHLEY PARK CRACKING GERMAN CIPHERS',
'BILL WAS BORN IN SUFFOLK IN NINETEEN SEVENTEEN',
'TUTTE WAS INSTRUMENTAL IN BREAKING THE GERMAN LORENZ CIPHER',
'AN ALGEBRAIC THEORY OF GRAPHS',
'TUTTE PERFORMED ONE OF THE GREATEST INTELLECTUAL FEATS OF WORLD WAR TWO',
'UNLIKE IN THE MOVIE TUTTE DID NOT WORK DIRECTLY WITH TURING'
]
yield from create_activity_messages(group, [myszkowski, fleissner, playfair], trifid, messages)
def create_rivest_messages(group):
playfair = Playfair(key='PCGDAVESFOHMUWZLYNBRXKIQT')
myszkowski = Myszkowski(key='CLIFFORD')
fleissner = Fleissner(key='oXXXooXoooXoXoooXooooXooXooooXXooooXoooooooXooXoooooooXooooooooX')
trifid = Trifid(key='AOGMPWEDZRCIBH_XTLVUQSNKYJF')
messages = [
'RONALD LINN RIVEST WAS BORN IN NINETEEN FOURTY SEVEN',
'RIVEST IS ONE OF THE INVENTORS OF THE RSA ALGORITHM',
'RON ALSO AUTHORED MANY OTHER ENCRYPTION ALGORITMS',
'RONALD WORKS AS A CRYPTOGRAPHER AND INSTITUTE PROFESSOR AT MIT',
'RIVEST WAS GIVEN A TURING AWARD IN TWO THOUSAND AND TWO',
'RSA IS ONE OF THE FIRST PRACTICAL PUBLIC KEY CIPHERS IT IS USED EVERYWHERE'
]
yield from create_activity_messages(group, [playfair, myszkowski, fleissner], trifid, messages)
def create_diffie_messages(group):
playfair = Playfair(key='LSCZGADKUORFXTHQNBEPYMWIV')
myszkowski = Myszkowski(key='HELLMAN')
fleissner = Fleissner(key='ooooooooooooXoooXoooXoXXXoooXXXoooooooooooXooooooXXooooXooooXoXX')
trifid = Trifid(key='CSQY_UBNVDWKPGERLTZJOHFAXMI')
messages = [
'BAILEY WHITFIELD DIFFIE WAS BORN IN NINETEEN FOURTY FOUR',
'WHIT WAS THE COCREATOR OF DIFFIE HELLMAN KEY EXCHANGE',
'DIFFIE GRADUATED FROM MIT IN NINETEEN SIXTY FIVE',
'WITHOUT BAILEYS WORK THE INTERNET WOULD NOT BE POSSIBLE',
'NEW DIRECTIONS IN CRYPTOGRAPHY',
'HE HELPED DEVELOP THE FUNDAMENTAL IDEAS BEHIND PUBLIC KEY CIPHERS'
]
yield from create_activity_messages(group, [fleissner, myszkowski, playfair], trifid, messages)
if __name__ == '__main__':
with create_app().app_context():
a = create_group("Billy")
db.session.add(a)
for m in create_admin_messages(a):
db.session.add(m)
a.give_point(1, "Sent Message", m)
h = create_group("L33t H4x0r")
db.session.add(h)
for m in create_h4x0r_messages(h):
db.session.add(m)
h.give_point(1, "Sent Message", m)
g = create_group("Babbage")
for m in create_babbage_messages(g):
g.give_point(1, "Sent Message", m)
db.session.add(m)
g = create_group("Friedman")
for m in create_friedman_messages(g):
g.give_point(1, "Sent Message", m)
db.session.add(m)
g = create_group("Driscoll")
for m in create_driscoll_messages(g):
g.give_point(1, "Sent Message", m)
db.session.add(m)
g = create_group("Tutte")
for m in create_tutte_messages(g):
g.give_point(1, "Sent Message", m)
db.session.add(m)
g = create_group("Rivest")
for m in create_rivest_messages(g):
g.give_point(1, "Sent Message", m)
db.session.add(m)
g = create_group("Diffie")
for m in create_diffie_messages(g):
g.give_point(1, "Sent Message", m)
db.session.add(m)
db.session.commit()
| WilliamMayor/scytale.xyz | scripts/seed/seeder.py | Python | mit | 13,201 | 0.001667 |
# mesa - toolkit for building dynamic python apps with zero downtime
# basis: package is inspected for all instances of specified abc and each added to internal mesa list
# Casa is a mesa obj is instantiated as holder of dynamic obj list, one for each abc type in specified package
# m = mesa.Casa(hideExceptions=False) parameter instructs whether to generate exception on existance of methods to run against abc method list
# Mesa.run('method name') = for methods executes named method against each concrete class in the package, does a check to ensure method name exists in abc
# Mesa.generate('method name') = a generator for functions that emits the results from calls to the specified function name in each concrete class. also checks
#
# house recipes
# event driven messsage-passing based app framework - each casa contains a specific route or flow
# wsgi based simple mvc web framework using 2bit as templating language. single casa for all pages
# DOTO: decide best way to test, some scenarios require file io but no clicks required - simple unit tests
# DOTO: generate is a generator yielding a dictionary of results
# DOTO: check flickoutr and how to dynamically create classes with parameters
# DOTO: auth - way to supply callback for required input fields collection from ui
# DOTO: base.Casa appears to pass it's own instance as self to called module. Unsure what side effects are?
# DOTO: utility interface to implement by client app to take care of input for each specific data type
# DOTO: accompanying Method utility that once required args are declared once, elegant handling
# ie no passing from interface to host back to interface like it is in unit test right now
# TODO: meta methods that build on the basic iterating methods to abstract away iteration from caller
# TODO: check for abc type conformance
# TODO: at minute convention is that dynamic module contains one class of same name. Change to support all/others
# TODO: mesa test suit scenarios:
# build a casa, add class, rebuild casa
# build casa, call method not in abc
# build casa with concrete class not implementing an abc method
| rutherford/mesa | TODO.py | Python | bsd-2-clause | 2,149 | 0.008841 |
import os
import re
import nltk
from nltk.tag import tnt
from modules import cleaner
class Location:
def __init__(self):
train_data = []
with open(os.path.join(os.path.dirname(__file__), 'tagged_locations.txt'), 'r') as f:
for line in f:
train_data.append([nltk.tag.str2tuple(t) for t in line.split()])
self.tnt_pos_tagger = tnt.TnT()
self.tnt_pos_tagger.train(train_data)
grammar = r"""
LOC: {(<PRFX><PRFX>*<B-LOC><I-LOC>*)|(<B-LOC><I-LOC>*)}
"""
self.cp = nltk.RegexpParser(grammar)
def get_locations(self, tweet):
tweet = cleaner.clean(tweet)
tagged_chunked_tweet = self.cp.parse(self.tnt_pos_tagger.tag(nltk.word_tokenize(tweet)))
locations = []
for subtree in tagged_chunked_tweet.subtrees():
if subtree.label() == 'LOC':
location = []
for leave in subtree.leaves():
location.append(leave[0])
locations.append(' '.join(location))
return locations
def is_first_loc_similar(self, text1, text2):
try:
loc1 = self.get_locations(text1)[0]
except IndexError as e:
loc1 = ''
try:
loc2 = self.get_locations(text2)[0]
except IndexError as e:
loc2 = ''
return loc1 == loc2 | dwiajik/twit-macet-mining-v2 | modules/location.py | Python | mit | 1,393 | 0.003589 |
#
# Author: Endre Karlson <endre.karlson@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cliff.command import Command as CliffCommand
from cliff.lister import Lister
from cliff.show import ShowOne
from fakturo.core import utils
class Command(CliffCommand):
api = None
action = None
@property
def name(self):
"""
The name of the command
api-action like account-create
"""
if self.api is None or self.action is None:
return None
return self.api + '-' + self.action
@property
def method_name(self):
return self.name.replace('-', '_') if self.name else None
def get_parser(self, prog_name):
"""
Override get_parser in order to get equivelant from the Provider
and extend options
"""
parser = super(Command, self).get_parser(prog_name)
self.app.provider_manager.extend_parser(self.method_name, parser)
return parser
def execute(self, parsed_args):
"""
Execute something, this is since we overload self.take_action()
in order to format the data
:param parsed_args: The parsed args that are given by take_action()
"""
return self.app.provider_manager.execute(
self.method_name,
parsed_args,
self)
def post_execute(self, data):
"""
Format the results locally if needed, by default we just return data
:param data: Whatever is returned by self.execute()
"""
return data
def take_action(self, parsed_args):
"""
Call self.execute to get data and then format it a bit with post_exec
"""
# TODO: Common Exception Handling Here
results = self.execute(parsed_args)
return self.post_execute(results)
class ListCommand(Command, Lister):
action = 'list'
def post_execute(self, results):
if len(results) > 0:
columns = utils.get_columns(results)
data = [utils.get_item_properties(i, columns) for i in results]
return columns, data
else:
return [], ()
class GetCommand(Command, ShowOne):
action = 'get'
def post_execute(self, results):
return results.keys(), results.values()
class CreateCommand(Command, ShowOne):
action = 'create'
def post_execute(self, results):
return results.keys(), results.values()
class UpdateCommand(Command, ShowOne):
action = 'update'
def post_execute(self, results):
return results.keys(), results.values()
class DeleteCommand(Command):
action = 'delete'
__all__ = ["Command", "ListCommand", "GetCommand", "CreateCommand",
"UpdateCommand", "DeleteCommand"]
| billingstack/python-fakturo | fakturo/core/cli/base.py | Python | apache-2.0 | 3,279 | 0 |
{
'name': "Tag Project/Task View",
'version': "1.0",
'author': 'TAG Small Biz Community',
'website': 'http://www.smallbiz.community',
'category': "Tools",
'data': ['task_view.xml','project_view.xml'
],
'demo': [],
'depends': ['project'],
'installable': True,
} | smartforceplus/SmartForceplus | openerp/addons/tag_project_tasks/__openerp__.py | Python | agpl-3.0 | 311 | 0.009646 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import pkgsetcomp
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon',
'sphinxcontrib.programoutput']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pkgsetcomp'
copyright = u'2014, Wes Turner'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = pkgsetcomp.__version__
# The full version, including alpha/beta/rc tags.
release = pkgsetcomp.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pkgsetcompdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'pkgsetcomp.tex',
u'pkgsetcomp Documentation',
u'Wes Turner', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pkgsetcomp',
u'pkgsetcomp Documentation',
[u'Wes Turner'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pkgsetcomp',
u'pkgsetcomp Documentation',
u'Wes Turner',
'pkgsetcomp',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| westurner/pkgsetcomp | docs/conf.py | Python | bsd-3-clause | 8,492 | 0.005299 |
# coding:utf-8
import os
from db.MysqlHelper import MySqlConnection
from common.JsonHelper import loadJsonConfig
MYSQLCONNECTION = None
def initMysql():
global MYSQLCONNECTION
config = loadJsonConfig(os.path.abspath(os.path.join(os.getcwd(), "../config/db.json")))
MYSQLCONNECTION = MySqlConnection(config['host'], config['port'], config['user'], config['password'],
config['database'])
def execute(sql, parameter=None):
global MYSQLCONNECTION
MYSQLCONNECTION.execute(sql, parameter)
def select(sql, fetchall=True):
global MYSQLCONNECTION
return MYSQLCONNECTION.select(sql, fetchall)
def batchInsert(sql, parameters):
global MYSQLCONNECTION
MYSQLCONNECTION.batchInsert(sql, parameters)
def disconnect():
global MYSQLCONNECTION
MYSQLCONNECTION.disconnect()
| zwffff2015/stock | db/MysqlUtil.py | Python | mit | 850 | 0.002353 |
# coding: utf-8
from __future__ import unicode_literals
from ...strings import StringStore
import pytest
def test_string_hash(stringstore):
'''Test that string hashing is stable across platforms'''
ss = stringstore
assert ss.add('apple') == 8566208034543834098
heart = '\U0001f499'
print(heart)
h = ss.add(heart)
assert h == 11841826740069053588
def test_stringstore_from_api_docs(stringstore):
apple_hash = stringstore.add('apple')
assert apple_hash == 8566208034543834098
assert stringstore[apple_hash] == u'apple'
assert u'apple' in stringstore
assert u'cherry' not in stringstore
orange_hash = stringstore.add('orange')
all_strings = [s for s in stringstore]
assert all_strings == [u'apple', u'orange']
banana_hash = stringstore.add('banana')
assert len(stringstore) == 3
assert banana_hash == 2525716904149915114
assert stringstore[banana_hash] == u'banana'
assert stringstore[u'banana'] == banana_hash
@pytest.mark.parametrize('text1,text2,text3', [(b'Hello', b'goodbye', b'hello')])
def test_stringstore_save_bytes(stringstore, text1, text2, text3):
key = stringstore.add(text1)
assert stringstore[text1] == key
assert stringstore[text2] != key
assert stringstore[text3] != key
@pytest.mark.parametrize('text1,text2,text3', [('Hello', 'goodbye', 'hello')])
def test_stringstore_save_unicode(stringstore, text1, text2, text3):
key = stringstore.add(text1)
assert stringstore[text1] == key
assert stringstore[text2] != key
assert stringstore[text3] != key
@pytest.mark.parametrize('text', [b'A'])
def test_stringstore_retrieve_id(stringstore, text):
key = stringstore.add(text)
assert len(stringstore) == 1
assert stringstore[key] == text.decode('utf8')
with pytest.raises(KeyError):
stringstore[20000]
@pytest.mark.parametrize('text1,text2', [(b'0123456789', b'A')])
def test_stringstore_med_string(stringstore, text1, text2):
store = stringstore.add(text1)
assert stringstore[store] == text1.decode('utf8')
dummy = stringstore.add(text2)
assert stringstore[text1] == store
def test_stringstore_long_string(stringstore):
text = "INFORMATIVE](http://www.google.com/search?as_q=RedditMonkey&hl=en&num=50&btnG=Google+Search&as_epq=&as_oq=&as_eq=&lr=&as_ft=i&as_filetype=&as_qdr=all&as_nlo=&as_nhi=&as_occt=any&as_dt=i&as_sitesearch=&as_rights=&safe=off"
store = stringstore.add(text)
assert stringstore[store] == text
@pytest.mark.parametrize('factor', [254, 255, 256])
def test_stringstore_multiply(stringstore, factor):
text = 'a' * factor
store = stringstore.add(text)
assert stringstore[store] == text
def test_stringstore_massive_strings(stringstore):
text = 'a' * 511
store = stringstore.add(text)
assert stringstore[store] == text
text2 = 'z' * 512
store = stringstore.add(text2)
assert stringstore[store] == text2
text3 = '1' * 513
store = stringstore.add(text3)
assert stringstore[store] == text3
@pytest.mark.parametrize('text', ["qqqqq"])
def test_stringstore_to_bytes(stringstore, text):
store = stringstore.add(text)
serialized = stringstore.to_bytes()
new_stringstore = StringStore().from_bytes(serialized)
assert new_stringstore[store] == text
| aikramer2/spaCy | spacy/tests/stringstore/test_stringstore.py | Python | mit | 3,384 | 0.000887 |
#! /usr/bin/env python
# Copyright (C) 2012 Club Capra - capra.etsmtl.ca
#
# This file is part of CapraVision.
#
# CapraVision is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cv2
import cv2.cv as cv
import numpy as np
from CapraVision.server.filters.parameter import Parameter
from CapraVision.server.filters.dataextract import DataExtractor
class MTI880(DataExtractor):
def __init__(self):
DataExtractor.__init__(self)
self.hue_min = 113
self.hue_max = 255
self.area_min = 600
self.normal_hand = 0
self.extended_hand = 0
self.closed_hand = 0
self.amplitude = 0
self._capture_normal_hand = False
self._capture_extended_hand = False
self._capture_closed_hand = False
self._calibrate_hue = False
self.accumulate = []
self.observers = []
def add_observer(self, observer):
self.observers.append(observer)
def remove_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for obs in self.observers:
obs()
def execute(self, image):
image = cv2.cvtColor(image, cv2.cv.CV_BGR2HSV)
h, _, _ = cv2.split(image)
image[h < self.hue_min] *= 0
image[h > self.hue_max] *= 0
#image[image > 0] = 255
gray = cv2.cvtColor(image, cv.CV_BGR2GRAY)
cnt, _ = cv2.findContours(gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
image *= 0
area, c = self.detect_biggest_area(gray)
if self._calibrate_hue and c is not None and area > self.area_min:
self.hue_min += 1
#self.notify_observers()
elif self._calibrate_hue and self.hue_min > 0:
print self.hue_min
self.notify_observers()
self._calibrate_hue = False
self.calibrate_closed_hand(area)
self.calibrate_extended_hand(area)
self.calibrate_normal_hand(area)
if c is not None and area >= self.area_min:
hull = cv2.convexHull(c)
cv2.drawContours(image, [hull],-1, (255,255,255), -1)
self.notify_output_observers(str(self.calc_return_value(area)) + "\n")
else:
self.notify_output_observers('0\n')
return image
def detect_biggest_area(self, gray):
cnt, _ = cv2.findContours(gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
maxarea = 0
maxcnt = None
for c in cnt:
approx = cv2.approxPolyDP(c, 0, False)
area = np.abs(cv2.contourArea(c))
if area > maxarea:
maxarea = area
maxcnt = c
return (maxarea, maxcnt)
def calibrate_normal_hand(self, area):
if self._capture_normal_hand:
self.accumulate.append(area)
if len(self.accumulate) == 10:
total = 0
for val in self.accumulate:
total += val
self._capture_normal_hand = False
self.normal_hand = total / 10
self.accumulate = []
self.notify_observers()
def calibrate_extended_hand(self, area):
if self._capture_extended_hand:
self.accumulate.append(area)
if len(self.accumulate) == 10:
total = 0
for val in self.accumulate:
total += val
self._capture_extended_hand = False
self.extended_hand = total / 10
self.accumulate = []
self.notify_observers()
def calibrate_closed_hand(self, area):
if self._capture_closed_hand:
self.accumulate.append(area)
if len(self.accumulate) == 10:
total = 0
for val in self.accumulate:
total += val
self._capture_closed_hand = False
self.closed_hand = total / 10
self.accumulate = []
self.notify_observers()
def calc_return_value(self, area):
if area > self.normal_hand:
diff = (self.extended_hand - self.normal_hand) * (self.amplitude / 100.0)
if area > (self.normal_hand + diff):
return area
else:
return 0
else:
diff = (self.normal_hand - self.closed_hand) * (self.amplitude / 100.0)
if area < (self.normal_hand - diff):
return -area
else:
return 0
| clubcapra/Ibex | src/seagoatvision_ros/scripts/CapraVision/server/filters/implementation/mti880.py | Python | gpl-3.0 | 5,241 | 0.006487 |
#!/usr/bin/env python
#
# SearchLIR.py
#
# Searches a local LIR file for RNA fragments that fits.
#
# http://iimcb.genesilico.pl/moderna/
#
__author__ = "Magdalena Rother, Tomasz Puton, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Magdalena Rother"
__email__ = "mmusiel@genesilico.pl"
__status__ = "Production"
import os
from numpy import array
# from rna_tools.tools.mini_moderna3.moderna.fragment_library.LIR import Lir, LirRecord
from rna_tools.tools.mini_moderna3.moderna.ModernaStructure import ModernaStructure
from rna_tools.tools.mini_moderna3.moderna.ModernaFragment import keep_first_last
from rna_tools.tools.mini_moderna3.moderna.FragmentInsertion import FragmentInserter
from rna_tools.tools.mini_moderna3.moderna.sequence.ModernaSequence import Sequence
from rna_tools.tools.mini_moderna3.moderna.fragment_library.StructureLibrary import library
from rna_tools.tools.mini_moderna3.moderna.util.Errors import LirError, SearchLirError
from rna_tools.tools.mini_moderna3.moderna.Constants import PATH_TO_LIR_STRUCTURES, \
LIR_DATABASE_PATH, MAX_DIST_STEM, \
NUMBER_OF_FRAGMENT_CANDIDATES
from rna_tools.tools.mini_moderna3.moderna.util.LogFile import log
struc_cache = {}
DIST_MASK = array([0, 1, 1, 1, 1, 0, 0, 1, 1])
class LirScoringOption(object):
"""
Class for gathering all Lir scoring values toogether.
Has an influence on the way in which fragment candidates are choosen.
Enables to distinguish between fast scoring functions and advanced scoring functions.
"""
def __init__(self, scoring_mode = 'fast'):
if scoring_mode == 'fast': self.set_fast_scoring()
elif scoring_mode == 'advanced': self.set_advanced_scoring()
def set_fast_scoring(self, atom_distance = 10.0, seq_sim=0.0, secstruc=100.0):
# previously tried: omega 0.0
"""
Enables to change score for goodness and sequence similarity.
RMSD, clashes and HBonds are always 0 for fast scoring.
"""
self.distance = atom_distance
self.seq_similarity = seq_sim
self.rmsd = 0
self.clashes = 0
self.secstruc = secstruc
def set_advanced_scoring(self, atom_distance = 0.0000, seq_sim=1.0, rmsd=10.0, clash=2.00, secstruc=100.0):
# previously: omega 5.0, good 2.0 rms 10.0
"""
Enables to change all scoring options
(goodness, seqence similarity, RMSD, clashes, H bonds).
"""
self.distance = atom_distance
self.seq_similarity = seq_sim
self.rmsd = rmsd
self.clashes = clash
self.secstruc = secstruc
class LirRecord():
"""empty class @mmagnus"""
pass
class LirQuery(LirRecord):
"""
Special LirRecord taht enables searching lfragments.
"""
def __init__(self, res5, res3, sequence, model_structure, lir_path, secstruc=None):
LirRecord.__init__(self, fr_length = len(sequence), structure = None, chain = None, preceding_resi = None, following_resi=None, \
sequence=sequence, sequence_anchor=None, secstruc=secstruc, x=None, y=None, dist_anchor=None, beta=None, gamma=None, omega5=None, omega3=None, \
P_dist=None, O5p_dist=None, C5p_dist=None, C4p_dist=None, C3p_dist=None, O3p_dist=None, O2p_dist=None, C1p_dist=None, N_dist=None)
self.anchor5 = res5 # ModernaResiude instance
self.anchor3 = res3 # ModernaResidues instance
self.sequence = sequence # ModernaSequence instance/string ???
self.model_instance = model_structure # RNAModel instance
self.set_record_values()
self.lir_path = lir_path
if secstruc and len(sequence)+2!=len(secstruc):
raise SearchLirError("Secondary structure '%s' must be 2 resis shorter as the sequence '%s'."%(secstruc, str(sequence)))
# Whether function get_query_record in LIR should exist?
def set_record_values(self):
l=Lir(self.anchor5, self.anchor3)
self.fr_length = len(self.sequence)
self.x=l.x
self.y=l.y
self.dist_anchor = l.dist_anchor
self.beta = l.beta
self.gamma = l.gamma
self.omega5 =l.omega5
self.omega3 = l.omega3
self.distances = array([l.P_dist, l.O5p_dist, l.C5p_dist, l.C4p_dist, l.C3p_dist, l.O3p_dist, l.O2p_dist, l.C1p_dist, l.N_dist])
class LirHit(LirRecord):
"""
Special LIRRecord that has a goodness, rmsd and sequence identity score.
"""
def __init__(self, query, fr_length, structure, chain, preceding_resi, following_resi, \
sequence, sequence_anchor, secstruc, x, y, dist_anchor, beta, gamma, omega5, omega3, \
P_dist, O5p_dist, C5p_dist, C4p_dist, C3p_dist, O3p_dist, O2p_dist, C1p_dist, N_dist):
LirRecord.__init__(self, fr_length, structure, chain, preceding_resi,following_resi, \
sequence, sequence_anchor, secstruc, x, y, dist_anchor, beta, gamma, omega5, omega3,
P_dist, O5p_dist, C5p_dist, C4p_dist, C3p_dist, O3p_dist, O2p_dist, C1p_dist, N_dist)
self.d_dist_score = 0
self.rmsd = 0
self.seq_similarity = 0
self.clash = 0 #score depends on the number of clashing residues
self.score = 0
self.d_secstruc = 0
self.fragment_instance = None
self.query = query
def __cmp__(self, hit):
"""Allow lists of LirHits to be sorted."""
return cmp(self.score, hit.score)
def __str__(self):
"""Returns one-line summary."""
hit_str="%16s [%s]\t%4s\t%s\t%s\t%3i\t%7.4f\t%7.4f\t%7.4f\t%7.4f"\
% (self.structure,self.chain,self.preceding_residue, self.sequence, self.secstruc, self.fr_length,self.d_dist_score, self.rmsd, self.clash, self.score)
return hit_str
def __repr__(self):
return str(self.score)
def calculate_dist_score(self):
"""calculates square differences of all distances."""
self.d_dist_score = sum( (self.distances*DIST_MASK - self.query.distances*DIST_MASK)**2)
def calculate_seq_similarity(self):
"""
The higher score the less similar sequences are.
Two identical sequences will get score 0.
"""
#KR: used local variables to save time
hit_seq = self.sequence
query_seq = self.query.sequence
assert len(hit_seq) == len(query_seq)
# calculate similarity
similarity_score = 0.0
for hit_ae, query_ae in zip(hit_seq, query_seq):
if hit_ae == query_ae: continue
hit_orig = hit_ae.original_base
query_orig = query_ae.original_base
if hit_orig == query_orig: similarity_score += 0.1
elif hit_orig in 'AG' and query_orig in 'AG': similarity_score += 0.5
elif hit_orig in 'CU' and query_orig in 'CU': similarity_score += 0.5
else: similarity_score +=1
if len(self.sequence): self.seq_similarity = similarity_score/len(self.sequence)
else: self.seq_similarity = 0
def match_secstruc(self):
"""Gives a BIG penalty if secstruc does not match."""
if self.query.secstruc:
self.d_secstruc = 100.0
if self.fragment_instance:
if self.query.secstruc == self.fragment_instance.struc.get_secstruc():
self.d_secstruc = 0.0
elif self.query.secstruc == self.secstruc:
self.d_secstruc = 0.0
def get_fragment(self):
"""
Allows to get small structure (as ModernaFragment) with fragment candidate fragment (fragment + anchor residues).
Returns ModernaFragment. Fragment is also stored in fragment_instance attribute.
Argument:
- anchor residue from 5' end (from structure to which fragment will be added)
- anchor residue from 3' end (from structure to which fragment will be added)
- sequence
"""
if not self.fragment_instance:
lir_path = self.query.lir_path
frag = library.get_fragment_part(\
lir_path+self.structure, self.chain, \
self.preceding_residue, self.following_residue, \
self.query.anchor5, self.query.anchor3, \
self.query.sequence, keep=keep_first_last, \
seq=self.sequence_anchor \
)
finsert = FragmentInserter()
finsert.prepare_fragment(frag, self.query.model_instance)
self.fragment_instance = frag
return self.fragment_instance
def calculate_rmsd(self, res5=None, res3=None, seq=None):
"""
Fragnet is superimposed acording coordinates of given anchor residues (taken from query) and RMSD is calculated.
Returns RMSD. RMSD is also stored in rmsd attrbute of fragment hit.
"""
self.get_fragment()
self.rmsd = self.fragment_instance.rmsd
return self.rmsd
def find_clash(self):
"""
Checks wheader a fragment candidate clashes with given list of residues or not.
Returns list with tuples of clashing residues.
Arguments:
- list with residues from structure for which fragment will be added
- anchor residue from 5' end (from structure to which fragment will be added)
- anchor residue from 3' end (from structure to which fragment will be added)
- fragment sequence (from structure to which fragment will be added)
"""
self.get_fragment()
model_residues = self.query.model_instance.find_residues_not_in_range(self.query.anchor5.identifier, self.query.anchor3.identifier)
clashes = self.fragment_instance.has_clashes(model_residues)
self.clash = clashes
return clashes
def score_clash(self):
"""
Finds clashes and gives score acording to number of clashes.
If there is no clash the score is 0.
"""
clashes = self.find_clash()
self.clash = len(clashes)
return self.clash
def check_backbone_continouity(self):
pass
def calculate_score(self, scoring):
"""
Calculates hit score acording to the provided scoring_option.
Scoring_option instance is responsible for distingushing fast scoring from advance scoring
The lower score the better candidate.
Atributes:
- LirScoringOption instance
"""
if scoring.distance: self.calculate_dist_score()
if scoring.seq_similarity: self.calculate_seq_similarity()
if scoring.rmsd: self.calculate_rmsd()
if scoring.clashes: self.score_clash()
if scoring.secstruc: self.match_secstruc()
self.score = scoring.distance * self.d_dist_score + \
scoring.seq_similarity * self.seq_similarity + \
scoring.rmsd * self.rmsd + \
scoring.clashes * self.clash + \
scoring.secstruc * self.d_secstruc
def write_hit_structure(self, file_name='LirHit.pdb', with_anchor_residues=False, with_model=False, write_anchors_to_file=True):
"""
Writes structure of hit to a pdb file.
Optioonaly it can write hit with anchor residues or with whole model.
Arguments:
- with_anchor_residues - True/False (by default False)
- with_model - True/False (by default False)
"""
if not self.fragment_instance: self.get_fragment()
if not self.rmsd: self.fragment_instance.superimpose()
if write_anchors_to_file: self.write_anchors_to_separate_file(file_name)
if with_anchor_residues and not with_model:
resis = list(self.fragment_instance.struc)
elif not with_anchor_residues and not with_model:
#with model or without model and without anchor residues
resis = self.fragment_instance.nonanchor_residues
if with_model:
# get a variable with the class because it cant be imported; looks ugly but works.
rm_class = self.query.model_instance.__class__
m = rm_class(None, None, self.query.model_instance.chain_name, 'residues', self.query.model_instance)
m.insert_fragment(self.fragment_instance)
m.fix_backbone()
resis = m.moderna_residues.values() # MM: or resi = m.moderna_residues
m = ModernaStructure('residues', resis, 'A')
m.sort_residues()
m.write_pdb_file(file_name)
class FragmentCandidates(object):
"""
Takes care about fragment candidates.
Prepares initial candidates list and different scorings on it.
Arguments:
- query (LirQuery instance)
- path to file with LIR data
"""
lir_cache = ["", []] # filename, records
def __init__(self, query, lir_db_filename=LIR_DATABASE_PATH):
self.query=query
self.lir_database_filename = lir_db_filename
self.accepted_fragments=[]
self.index = 0
def __getitem__(self, args):
return self.accepted_fragments[args]
def __len__(self):
return len(self.accepted_fragments)
def __iter__(self):
return self.accepted_fragments.__iter__()
def __str__(self):
return "Present number of accepted fragment candidates: %s" %str(len(self.accepted_fragments))
def parse_lir_database(self, separator='\t'): # WAS: ' - '
"""
Reads fragments from the LIR database file and generates
LirRecords from the columns of the file.
To lern more see also LirRecord documentation.
"""
# check if a different file is read
if self.lir_cache[0] != self.lir_database_filename or self.lir_cache[1]==[]:
# empty old lir
self.lir_cache[1] = []
# read new lir
self.lir_cache[0] = self.lir_database_filename
for line in open(self.lir_database_filename):
if line[0] == 'l': continue #.startswith('fr length'): continue
line = line.strip().split(separator)
if len(line) == 17:
self.lir_cache[1].append(line)
def create_hit_from_line(self, line):
"""Creates a LirHit object."""
lir_hit = LirHit(
query = self.query,
fr_length=int(line[0]),
structure=line[1],
chain=line[2],
preceding_resi=line[3],
following_resi=line[4],
sequence = Sequence(line[5]),
sequence_anchor=Sequence(line[6]),
secstruc = line[7],
x=0.0, #float(line[7]),
y=0.0 ,#float(line[8]),
dist_anchor =0.0, #float(line[9]),
beta =0.0, #float(line[10]),
gamma=0.0, #float(line[11]),
omega5=0.0,#float(line[12]),
omega3=0.0,#float(line[13])
P_dist = float(line[8]),
O5p_dist = float(line[9]),
C5p_dist = float(line[10]),
C4p_dist = float(line[11]),
C3p_dist = float(line[12]),
O3p_dist = float(line[13]),
O2p_dist = float(line[14]),
C1p_dist = float(line[15]),
N_dist = float(line[16])
)
return lir_hit
def create_initial_fragment_set(self, max_dist_anchor=MAX_DIST_STEM, separator='\t'):
"""
Collects all records from the LIR database that fill the length and anchor distance conditions.
"""
self.parse_lir_database(separator)
#Creates a list of LirHit objects for all entries in the pre-parsed LIR DB.
length = self.query.fr_length # SPEEDUP
self.accepted_fragments = [self.create_hit_from_line(line) for line in self.lir_cache[1] if int(line[0])==length]
def make_fast_scoring(self, scoring_option, number_of_candidates):
"""
Prepares scoring for all candidates based on values that are already present in LIR database.
Takes into account goodness ans sequence similarity.
Returns small number of candidates (indicated by number of candidates)
which can udergo more advance (longer) scoring.
"""
#for hit in self.accepted_fragments: hit.calculate_score(scoring_option)
[hit.calculate_score(scoring_option) for hit in self.accepted_fragments]
self.accepted_fragments.sort()
self.accepted_fragments = self.accepted_fragments[:number_of_candidates]
def make_advanced_scoring(self, scoring_option):
"""
Prepares scoring acorgding to RMSD, clashes and Hbonds.
Should be called when number of self.accepted_fragments is small
so as to avoid long calculations.
"""
#for hit in self.accepted_fragments: hit.calculate_score(scoring_option)
[hit.calculate_score(scoring_option) for hit in self.accepted_fragments]
self.accepted_fragments.sort()
if self.accepted_fragments != []:
if not self.accepted_fragments[0].fragment_instance:
for hit in self.accepted_fragments: hit.get_fragment()
#TODO: could be merged with method above.
def write_fragment_candidates(self, directory_name='LIR_candidates', with_anchor_residues=False, with_model=False, write_anchors_to_file=False, log=True):
"""
Writes pdb files with all fragment candidates.
Candidates can be easly checked manualy by calling 'pymol *' in candidates directory
Arguments:
- directory name
"""
# MM: What when there is no hit? Exception or nothing?
if not os.path.isdir(directory_name):
os.mkdir(directory_name)
if directory_name[-1] != os.sep: directory_name += os.sep
if log: f=open(directory_name+'candidates.log', 'w')
for x, hit in enumerate(self.accepted_fragments):
if log: f.write(str(hit)+'\n'+50*'_'+'\n')
path=directory_name + 'candidate' + str(x) + '.pdb'
hit.write_hit_structure(path, with_anchor_residues, with_model, write_anchors_to_file)
class FragmentFinder(object):
"""
Looks for fragment that fits the best."""
def __init__(self, anchor5, anchor3, sequence, model_structure, candidates_number=NUMBER_OF_FRAGMENT_CANDIDATES, lir_path=PATH_TO_LIR_STRUCTURES, secstruc=None):
self.anchor5 = anchor5 # ModernaResidue instance, anchor residue from the model from 5' (fragment start)
self.anchor3 = anchor3 # ModernaResidue instance, anchor residue from the model from 3' (fragment end)
self.sequence = sequence # Sequence instance. The sequence that fragment should have.
self.model_structure = model_structure #RNAModel instance
self.candidates_number = candidates_number
self.secstruc = self._get_secstruc(secstruc)
self.scoring = LirScoringOption()
self.lir_path=lir_path
self.query = self.get_query()
#TODO: could check whether parameters have the right type (?)
#MM: but type(anchor5) gives only 'instance'
def _get_secstruc(self, secstruc):
"""Adds pairing of anchors to secstruc query"""
if secstruc == None:
return None
basepair = self.anchor5.get_bp(self.anchor3)
if basepair and (basepair.canonical or basepair.wobble):
return '('+secstruc+')'
else:
return '.'+secstruc+'.'
def get_query(self):
"""Creates LirQuery obiect taht can be used for searching LIRdb."""
return LirQuery(self.anchor5, self.anchor3, self.sequence, self.model_structure, self.lir_path, self.secstruc)
def log_candidates(self, candidates):
log.write_message('\nFragment candidates:\n')
log.write_message("""
Fragment candidates:
structure [chain]\t5'-resi\tsequence\tsecstruc\tlength\tdist\tRMSD\tclashes\tscore""")
for l in candidates: log.write_message(str(l))
def find_fragment_candidates(self):
"""Returns a FragmentCandidates object."""
candidates = FragmentCandidates(self.query)
candidates.create_initial_fragment_set()
self.scoring.set_fast_scoring()
candidates.make_fast_scoring(self.scoring, self.candidates_number)
self.scoring.set_advanced_scoring()
candidates.make_advanced_scoring(self.scoring)
self.log_candidates(candidates)
if len(candidates)>0 and candidates[0].score > 10000:
log.write_message("\nNo fragment candidate with aproppriate secondary structure was found.\nThe next best fragment is inserted.\n")
return candidates
def find_fragment(self):
"""
Search for fragment candidates and returns fragment that belongs to the best candidate.
"""
candidates = self.find_fragment_candidates() # FragmentCandidates instance
if len(candidates) == 0: raise LirError('No fragment candidates found')
return candidates[0].fragment_instance
| mmagnus/rna-pdb-tools | rna_tools/tools/mini_moderna3/moderna/fragment_library/SearchLIR.py | Python | gpl-3.0 | 21,344 | 0.014618 |
"""
Batch processors
These commands implements the 'batch-command' and 'batch-code'
processors, using the functionality in src.utils.batchprocessors.
They allow for offline world-building.
Batch-command is the simpler system. This reads a file (*.ev)
containing a list of in-game commands and executes them in sequence as
if they had been entered in the game (including permission checks
etc).
Example batch-command file: game/gamesrc/commands/examples/batch_cmds.ev
Batch-code is a full-fledged python code interpreter that reads blocks
of python code (*.py) and executes them in sequence. This allows for
much more power than Batch-command, but requires knowing Python and
the Evennia API. It is also a severe security risk and should
therefore always be limited to superusers only.
Example batch-code file: game/gamesrc/commands/examples/batch_code.py
"""
from traceback import format_exc
from django.conf import settings
from src.utils.batchprocessors import BATCHCMD, BATCHCODE
from src.commands.cmdset import CmdSet
from src.commands.default.muxcommand import MuxCommand
from src.utils import utils
# limit symbols for API inclusion
__all__ = ("CmdBatchCommands", "CmdBatchCode")
_HEADER_WIDTH = 70
_UTF8_ERROR = \
"""
{rDecode error in '%s'.{n
This file contains non-ascii character(s). This is common if you
wrote some input in a language that has more letters and special
symbols than English; such as accents or umlauts. This is usually
fine and fully supported! But for Evennia to know how to decode such
characters in a universal way, the batchfile must be saved with the
international 'UTF-8' encoding. This file is not.
Please re-save the batchfile with the UTF-8 encoding (refer to the
documentation of your text editor on how to do this, or switch to a
better featured one) and try again.
The (first) error was found with a character on line %s in the file.
"""
_PROCPOOL_BATCHCMD_SOURCE = """
from src.commands.default.batchprocess import batch_cmd_exec, step_pointer, BatchSafeCmdSet
caller.ndb.batch_stack = commands
caller.ndb.batch_stackptr = 0
caller.ndb.batch_batchmode = "batch_commands"
caller.cmdset.add(BatchSafeCmdSet)
for inum in range(len(commands)):
print "command:", inum
caller.cmdset.add(BatchSafeCmdSet)
if not batch_cmd_exec(caller):
break
step_pointer(caller, 1)
print "leaving run ..."
"""
_PROCPOOL_BATCHCODE_SOURCE = """
from src.commands.default.batchprocess import batch_code_exec, step_pointer, BatchSafeCmdSet
caller.ndb.batch_stack = codes
caller.ndb.batch_stackptr = 0
caller.ndb.batch_batchmode = "batch_code"
caller.cmdset.add(BatchSafeCmdSet)
for inum in range(len(codes)):
print "code:", inum
caller.cmdset.add(BatchSafeCmdSet)
if not batch_code_exec(caller):
break
step_pointer(caller, 1)
print "leaving run ..."
"""
#------------------------------------------------------------
# Helper functions
#------------------------------------------------------------
def format_header(caller, entry):
"""
Formats a header
"""
width = _HEADER_WIDTH - 10
entry = entry.strip()
header = utils.crop(entry, width=width)
ptr = caller.ndb.batch_stackptr + 1
stacklen = len(caller.ndb.batch_stack)
header = "{w%02i/%02i{G: %s{n" % (ptr, stacklen, header)
# add extra space to the side for padding.
header = "%s%s" % (header, " "*(width - len(header)))
header = header.replace('\n', '\\n')
return header
def format_code(entry):
"""
Formats the viewing of code and errors
"""
code = ""
for line in entry.split('\n'):
code += "\n{G>>>{n %s" % line
return code.strip()
def batch_cmd_exec(caller):
"""
Helper function for executing a single batch-command entry
"""
ptr = caller.ndb.batch_stackptr
stack = caller.ndb.batch_stack
command = stack[ptr]
caller.msg(format_header(caller, command))
try:
caller.execute_cmd(command)
except Exception:
caller.msg(format_code(format_exc()))
return False
return True
def batch_code_exec(caller):
"""
Helper function for executing a single batch-code entry
"""
ptr = caller.ndb.batch_stackptr
stack = caller.ndb.batch_stack
debug = caller.ndb.batch_debug
codedict = stack[ptr]
caller.msg(format_header(caller, codedict['code']))
err = BATCHCODE.code_exec(codedict,
extra_environ={"caller":caller}, debug=debug)
if err:
caller.msg(format_code(err))
return False
return True
def step_pointer(caller, step=1):
"""
Step in stack, returning the item located.
stackptr - current position in stack
stack - the stack of units
step - how many steps to move from stackptr
"""
ptr = caller.ndb.batch_stackptr
stack = caller.ndb.batch_stack
nstack = len(stack)
if ptr + step <= 0:
caller.msg("{RBeginning of batch file.")
if ptr + step >= nstack:
caller.msg("{REnd of batch file.")
caller.ndb.batch_stackptr = max(0, min(nstack-1, ptr + step))
def show_curr(caller, showall=False):
"""
Show the current position in stack
"""
stackptr = caller.ndb.batch_stackptr
stack = caller.ndb.batch_stack
if stackptr >= len(stack):
caller.ndb.batch_stackptr = len(stack) - 1
show_curr(caller, showall)
return
entry = stack[stackptr]
if type(entry) == dict:
# this is a batch-code entry
string = format_header(caller, entry['code'])
codeall = entry['code'].strip()
else:
# this is a batch-cmd entry
string = format_header(caller, entry)
codeall = entry.strip()
string += "{G(hh for help)"
if showall:
for line in codeall.split('\n'):
string += "\n{G|{n %s" % line
caller.msg(string)
def purge_processor(caller):
"""
This purges all effects running
on the caller.
"""
try:
del caller.ndb.batch_stack
del caller.ndb.batch_stackptr
del caller.ndb.batch_pythonpath
del caller.ndb.batch_batchmode
except:
pass
# clear everything but the default cmdset.
caller.cmdset.delete(BatchSafeCmdSet)
caller.cmdset.clear()
caller.scripts.validate() # this will purge interactive mode
#------------------------------------------------------------
# main access commands
#------------------------------------------------------------
class CmdBatchCommands(MuxCommand):
"""
Build from batch-command file
Usage:
@batchcommands[/interactive] <python.path.to.file>
Switch:
interactive - this mode will offer more control when
executing the batch file, like stepping,
skipping, reloading etc.
Runs batches of commands from a batch-cmd text file (*.ev).
"""
key = "@batchcommands"
aliases = ["@batchcommand", "@batchcmd"]
locks = "cmd:perm(batchcommands) or superuser()"
help_category = "Building"
def func(self):
"Starts the processor."
caller = self.caller
args = self.args
if not args:
caller.msg("Usage: @batchcommands[/interactive] <path.to.file>")
return
python_path = self.args
#parse indata file
try:
commands = BATCHCMD.parse_file(python_path)
except UnicodeDecodeError, err:
lnum = err.linenum
caller.msg(_UTF8_ERROR % (python_path, lnum))
return
if not commands:
string = "'%s' not found.\nYou have to supply the python path "
string += "of the file relative to \none of your batch-file directories (%s)."
caller.msg(string % (python_path, ", ".join(settings.BASE_BATCHPROCESS_PATHS)))
return
switches = self.switches
# Store work data in cache
caller.ndb.batch_stack = commands
caller.ndb.batch_stackptr = 0
caller.ndb.batch_pythonpath = python_path
caller.ndb.batch_batchmode = "batch_commands"
caller.cmdset.add(BatchSafeCmdSet)
if 'inter' in switches or 'interactive' in switches:
# Allow more control over how batch file is executed
# Set interactive state directly
caller.cmdset.add(BatchInteractiveCmdSet)
caller.msg("\nBatch-command processor - Interactive mode for %s ..." % python_path)
show_curr(caller)
else:
caller.msg("Running Batch-command processor - Automatic mode for %s (this might take some time) ..." % python_path)
procpool = False
if "PythonProcPool" in utils.server_services():
if utils.uses_database("sqlite3"):
caller.msg("Batchprocessor disabled ProcPool under SQLite3.")
else:
procpool=True
if procpool:
# run in parallel process
def callback(r):
caller.msg(" {GBatchfile '%s' applied." % python_path)
purge_processor(caller)
def errback(e):
caller.msg(" {RError from processor: '%s'" % e)
purge_processor(caller)
utils.run_async(_PROCPOOL_BATCHCMD_SOURCE, commands=commands, caller=caller, at_return=callback, at_err=errback)
else:
# run in-process (might block)
for inum in range(len(commands)):
# loop through the batch file
if not batch_cmd_exec(caller):
return
step_pointer(caller, 1)
# clean out the safety cmdset and clean out all other temporary attrs.
string = " Batchfile '%s' applied." % python_path
caller.msg("{G%s" % string)
purge_processor(caller)
class CmdBatchCode(MuxCommand):
"""
Build from batch-code file
Usage:
@batchcode[/interactive] <python path to file>
Switch:
interactive - this mode will offer more control when
executing the batch file, like stepping,
skipping, reloading etc.
debug - auto-delete all objects that has been marked as
deletable in the script file (see example files for
syntax). This is useful so as to to not leave multiple
object copies behind when testing out the script.
Runs batches of commands from a batch-code text file (*.py).
"""
key = "@batchcode"
aliases = ["@batchcodes"]
locks = "cmd:superuser()"
help_category = "Building"
def func(self):
"Starts the processor."
caller = self.caller
args = self.args
if not args:
caller.msg("Usage: @batchcode[/interactive/debug] <path.to.file>")
return
python_path = self.args
#parse indata file
try:
codes = BATCHCODE.parse_file(python_path)
except UnicodeDecodeError, err:
lnum = err.linenum
caller.msg(_UTF8_ERROR % (python_path, lnum))
return
if not codes:
string = "'%s' not found.\nYou have to supply the python path "
string += "of the file relative to \nyour batch-file directories (%s)."
caller.msg(string % (python_path, ", ".join(settings.BASE_BATCHPROCESS_PATHS)))
return
switches = self.switches
debug = False
if 'debug' in switches:
debug = True
# Store work data in cache
caller.ndb.batch_stack = codes
caller.ndb.batch_stackptr = 0
caller.ndb.batch_pythonpath = python_path
caller.ndb.batch_batchmode = "batch_code"
caller.ndb.batch_debug = debug
caller.cmdset.add(BatchSafeCmdSet)
if 'inter' in switches or 'interactive'in switches:
# Allow more control over how batch file is executed
# Set interactive state directly
caller.cmdset.add(BatchInteractiveCmdSet)
caller.msg("\nBatch-code processor - Interactive mode for %s ..." % python_path)
show_curr(caller)
else:
caller.msg("Running Batch-code processor - Automatic mode for %s ..." % python_path)
procpool = False
if "PythonProcPool" in utils.server_services():
if utils.uses_database("sqlite3"):
caller.msg("Batchprocessor disabled ProcPool under SQLite3.")
else:
procpool=True
if procpool:
# run in parallel process
def callback(r):
caller.msg(" {GBatchfile '%s' applied." % python_path)
purge_processor(caller)
def errback(e):
caller.msg(" {RError from processor: '%s'" % e)
purge_processor(caller)
utils.run_async(_PROCPOOL_BATCHCODE_SOURCE, codes=codes, caller=caller, at_return=callback, at_err=errback)
else:
# un in-process (will block)
for inum in range(len(codes)):
# loop through the batch file
if not batch_code_exec(caller):
return
step_pointer(caller, 1)
# clean out the safety cmdset and clean out all other temporary attrs.
string = " Batchfile '%s' applied." % python_path
caller.msg("{G%s" % string)
purge_processor(caller)
#------------------------------------------------------------
# State-commands for the interactive batch processor modes
# (these are the same for both processors)
#------------------------------------------------------------
class CmdStateAbort(MuxCommand):
"""
@abort
This is a safety feature. It force-ejects us out of the processor and to
the default cmdset, regardless of what current cmdset the processor might
have put us in (e.g. when testing buggy scripts etc).
"""
key = "@abort"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
"Exit back to default."
purge_processor(self.caller)
self.caller.msg("Exited processor and reset out active cmdset back to the default one.")
class CmdStateLL(MuxCommand):
"""
ll
Look at the full source for the current
command definition.
"""
key = "ll"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
show_curr(self.caller, showall=True)
class CmdStatePP(MuxCommand):
"""
pp
Process the currently shown command definition.
"""
key = "pp"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
"""
This checks which type of processor we are running.
"""
caller = self.caller
if caller.ndb.batch_batchmode == "batch_code":
batch_code_exec(caller)
else:
batch_cmd_exec(caller)
class CmdStateRR(MuxCommand):
"""
rr
Reload the batch file, keeping the current
position in it.
"""
key = "rr"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
caller = self.caller
if caller.ndb.batch_batchmode == "batch_code":
new_data = BATCHCODE.parse_file(caller.ndb.batch_pythonpath)
else:
new_data = BATCHCMD.parse_file(caller.ndb.batch_pythonpath)
caller.ndb.batch_stack = new_data
caller.msg(format_code("File reloaded. Staying on same command."))
show_curr(caller)
class CmdStateRRR(MuxCommand):
"""
rrr
Reload the batch file, starting over
from the beginning.
"""
key = "rrr"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
caller = self.caller
if caller.ndb.batch_batchmode == "batch_code":
BATCHCODE.parse_file(caller.ndb.batch_pythonpath)
else:
BATCHCMD.parse_file(caller.ndb.batch_pythonpath)
caller.ndb.batch_stackptr = 0
caller.msg(format_code("File reloaded. Restarting from top."))
show_curr(caller)
class CmdStateNN(MuxCommand):
"""
nn
Go to next command. No commands are executed.
"""
key = "nn"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
caller = self.caller
arg = self.args
if arg and arg.isdigit():
step = int(self.args)
else:
step = 1
step_pointer(caller, step)
show_curr(caller)
class CmdStateNL(MuxCommand):
"""
nl
Go to next command, viewing its full source.
No commands are executed.
"""
key = "nl"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
caller = self.caller
arg = self.args
if arg and arg.isdigit():
step = int(self.args)
else:
step = 1
step_pointer(caller, step)
show_curr(caller, showall=True)
class CmdStateBB(MuxCommand):
"""
bb
Backwards to previous command. No commands
are executed.
"""
key = "bb"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
caller = self.caller
arg = self.args
if arg and arg.isdigit():
step = -int(self.args)
else:
step = -1
step_pointer(caller, step)
show_curr(caller)
class CmdStateBL(MuxCommand):
"""
bl
Backwards to previous command, viewing its full
source. No commands are executed.
"""
key = "bl"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
caller = self.caller
arg = self.args
if arg and arg.isdigit():
step = -int(self.args)
else:
step = -1
step_pointer(caller, step)
show_curr(caller, showall=True)
class CmdStateSS(MuxCommand):
"""
ss [steps]
Process current command, then step to the next
one. If steps is given,
process this many commands.
"""
key = "ss"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
caller = self.caller
arg = self.args
if arg and arg.isdigit():
step = int(self.args)
else:
step = 1
for istep in range(step):
if caller.ndb.batch_batchmode == "batch_code":
batch_code_exec(caller)
else:
batch_cmd_exec(caller)
step_pointer(caller, 1)
show_curr(caller)
class CmdStateSL(MuxCommand):
"""
sl [steps]
Process current command, then step to the next
one, viewing its full source. If steps is given,
process this many commands.
"""
key = "sl"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
caller = self.caller
arg = self.args
if arg and arg.isdigit():
step = int(self.args)
else:
step = 1
for istep in range(step):
if caller.ndb.batch_batchmode == "batch_code":
batch_code_exec(caller)
else:
batch_cmd_exec(caller)
step_pointer(caller, 1)
show_curr(caller)
class CmdStateCC(MuxCommand):
"""
cc
Continue to process all remaining
commands.
"""
key = "cc"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
caller = self.caller
nstack = len(caller.ndb.batch_stack)
ptr = caller.ndb.batch_stackptr
step = nstack - ptr
for istep in range(step):
if caller.ndb.batch_batchmode == "batch_code":
batch_code_exec(caller)
else:
batch_cmd_exec(caller)
step_pointer(caller, 1)
show_curr(caller)
del caller.ndb.batch_stack
del caller.ndb.batch_stackptr
del caller.ndb.batch_pythonpath
del caller.ndb.batch_batchmode
caller.msg(format_code("Finished processing batch file."))
class CmdStateJJ(MuxCommand):
"""
j <command number>
Jump to specific command number
"""
key = "j"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
caller = self.caller
arg = self.args
if arg and arg.isdigit():
number = int(self.args)-1
else:
caller.msg(format_code("You must give a number index."))
return
ptr = caller.ndb.batch_stackptr
step = number - ptr
step_pointer(caller, step)
show_curr(caller)
class CmdStateJL(MuxCommand):
"""
jl <command number>
Jump to specific command number and view its full source.
"""
key = "jl"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
caller = self.caller
arg = self.args
if arg and arg.isdigit():
number = int(self.args)-1
else:
caller.msg(format_code("You must give a number index."))
return
ptr = caller.ndb.batch_stackptr
step = number - ptr
step_pointer(caller, step)
show_curr(caller, showall=True)
class CmdStateQQ(MuxCommand):
"""
qq
Quit the batchprocessor.
"""
key = "qq"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
purge_processor(self.caller)
self.caller.msg("Aborted interactive batch mode.")
class CmdStateHH(MuxCommand):
"Help command"
key = "hh"
help_category = "BatchProcess"
locks = "cmd:perm(batchcommands)"
def func(self):
string = """
Interactive batch processing commands:
nn [steps] - next command (no processing)
nl [steps] - next & look
bb [steps] - back to previous command (no processing)
bl [steps] - back & look
jj <N> - jump to command nr N (no processing)
jl <N> - jump & look
pp - process currently shown command (no step)
ss [steps] - process & step
sl [steps] - process & step & look
ll - look at full definition of current command
rr - reload batch file (stay on current)
rrr - reload batch file (start from first)
hh - this help list
cc - continue processing to end, then quit.
qq - quit (abort all remaining commands)
@abort - this is a safety command that always is available
regardless of what cmdsets gets added to us during
batch-command processing. It immediately shuts down
the processor and returns us to the default cmdset.
"""
self.caller.msg(string)
#------------------------------------------------------------
#
# Defining the cmdsets for the interactive batchprocessor
# mode (same for both processors)
#
#------------------------------------------------------------
class BatchSafeCmdSet(CmdSet):
"""
The base cmdset for the batch processor.
This sets a 'safe' @abort command that will
always be available to get out of everything.
"""
key = "Batch_default"
priority = 104 # override other cmdsets.
def at_cmdset_creation(self):
"Init the cmdset"
self.add(CmdStateAbort())
class BatchInteractiveCmdSet(CmdSet):
"""
The cmdset for the interactive batch processor mode.
"""
key = "Batch_interactive"
priority = 104
def at_cmdset_creation(self):
"init the cmdset"
self.add(CmdStateAbort())
self.add(CmdStateLL())
self.add(CmdStatePP())
self.add(CmdStateRR())
self.add(CmdStateRRR())
self.add(CmdStateNN())
self.add(CmdStateNL())
self.add(CmdStateBB())
self.add(CmdStateBL())
self.add(CmdStateSS())
self.add(CmdStateSL())
self.add(CmdStateCC())
self.add(CmdStateJJ())
self.add(CmdStateJL())
self.add(CmdStateQQ())
self.add(CmdStateHH())
| TaliesinSkye/evennia | src/commands/default/batchprocess.py | Python | bsd-3-clause | 24,488 | 0.002654 |
from django import template
register = template.Library()
class RepeatNode(template.Node):
def __init__(self, nodelist, count):
self.nodelist = nodelist
self.count = template.Variable(count)
def render(self, context):
output = self.nodelist.render(context)
return output * int(self.count.resolve(context) + 1)
def repeat(parser, token):
"""
Repeats the containing text a certain number of times.
Requires a single argument, an integer, to indicate the number of times to
repeat the enclosing content.
Example::
{% repeat 3 %}foo{% endrepeat %}
Yields::
foofoofoo
"""
bits = token.split_contents()
if len(bits) != 2:
raise template.TemplateSyntaxError('%r tag requires 1 argument.' % bits[0])
count = bits[1]
nodelist = parser.parse(('endrepeat',))
parser.delete_first_token()
return RepeatNode(nodelist, count)
repeat = register.tag(repeat)
| julcollas/django-smokeping | smokeping/templatetags/repeat.py | Python | gpl-2.0 | 1,004 | 0.010956 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
import sqlalchemy as sa
from quantum.db import api as db
from quantum.db import model_base
from quantum.db import models_v2
from quantum.db import securitygroups_db as sg_db
from quantum.extensions import securitygroup as ext_sg
from quantum import manager
from quantum.openstack.common import log as logging
from quantum.plugins.nec.common import config # noqa
from quantum.plugins.nec.common import exceptions as nexc
from quantum.plugins.nec.db import models as nmodels
LOG = logging.getLogger(__name__)
OFP_VLAN_NONE = 0xffff
resource_map = {'ofc_tenant': nmodels.OFCTenantMapping,
'ofc_network': nmodels.OFCNetworkMapping,
'ofc_port': nmodels.OFCPortMapping,
'ofc_packet_filter': nmodels.OFCFilterMapping}
old_resource_map = {'ofc_tenant': nmodels.OFCTenant,
'ofc_network': nmodels.OFCNetwork,
'ofc_port': nmodels.OFCPort,
'ofc_packet_filter': nmodels.OFCFilter}
# utitlity methods
def _get_resource_model(resource, old_style):
if old_style:
return old_resource_map[resource]
else:
return resource_map[resource]
def initialize():
db.configure_db()
def clear_db(base=model_base.BASEV2):
db.clear_db(base)
def get_ofc_item(session, resource, quantum_id, old_style=False):
try:
model = _get_resource_model(resource, old_style)
return session.query(model).filter_by(quantum_id=quantum_id).one()
except sa.orm.exc.NoResultFound:
return None
def get_ofc_id(session, resource, quantum_id, old_style=False):
ofc_item = get_ofc_item(session, resource, quantum_id, old_style)
if ofc_item:
if old_style:
return ofc_item.id
else:
return ofc_item.ofc_id
else:
return None
def exists_ofc_item(session, resource, quantum_id, old_style=False):
if get_ofc_item(session, resource, quantum_id, old_style):
return True
else:
return False
def find_ofc_item(session, resource, ofc_id, old_style=False):
try:
model = _get_resource_model(resource, old_style)
if old_style:
params = dict(id=ofc_id)
else:
params = dict(ofc_id=ofc_id)
return (session.query(model).filter_by(**params).one())
except sa.orm.exc.NoResultFound:
return None
def add_ofc_item(session, resource, quantum_id, ofc_id, old_style=False):
try:
model = _get_resource_model(resource, old_style)
if old_style:
params = dict(quantum_id=quantum_id, id=ofc_id)
else:
params = dict(quantum_id=quantum_id, ofc_id=ofc_id)
item = model(**params)
with session.begin(subtransactions=True):
session.add(item)
session.flush()
except Exception as exc:
LOG.exception(exc)
raise nexc.NECDBException(reason=exc.message)
return item
def del_ofc_item(session, resource, quantum_id, old_style=False,
warning=True):
try:
model = _get_resource_model(resource, old_style)
with session.begin(subtransactions=True):
item = session.query(model).filter_by(quantum_id=quantum_id).one()
session.delete(item)
return True
except sa.orm.exc.NoResultFound:
if warning:
LOG.warning(_("_del_ofc_item(): NotFound item "
"(model=%(model)s, id=%(id)s) "),
{'model': model, 'id': quantum_id})
return False
def get_ofc_id_lookup_both(session, resource, quantum_id):
ofc_id = get_ofc_id(session, resource, quantum_id)
# Lookup old style of OFC mapping table
if not ofc_id:
ofc_id = get_ofc_id(session, resource, quantum_id,
old_style=True)
if not ofc_id:
reason = (_("NotFound %(resource)s for quantum_id=%(id)s.")
% {'resource': resource, 'id': quantum_id})
raise nexc.OFCConsistencyBroken(reason=reason)
return ofc_id
def exists_ofc_item_lookup_both(session, resource, quantum_id):
if exists_ofc_item(session, resource, quantum_id):
return True
# Check old style of OFC mapping table
if exists_ofc_item(session, resource, quantum_id,
old_style=True):
return True
return False
def del_ofc_item_lookup_both(session, resource, quantum_id):
# Delete the mapping from new style of OFC mapping table
if del_ofc_item(session, resource, quantum_id,
old_style=False, warning=False):
return
# Delete old style of OFC mapping table
if del_ofc_item(session, resource, quantum_id,
old_style=True, warning=False):
return
# The specified resource not found
LOG.warning(_("_del_ofc_item(): NotFound item "
"(resource=%(resource)s, id=%(id)s) "),
{'resource': resource, 'id': quantum_id})
def get_portinfo(session, id):
try:
return (session.query(nmodels.PortInfo).
filter_by(id=id).
one())
except sa.orm.exc.NoResultFound:
return None
def add_portinfo(session, id, datapath_id='', port_no=0,
vlan_id=OFP_VLAN_NONE, mac=''):
try:
portinfo = nmodels.PortInfo(id=id, datapath_id=datapath_id,
port_no=port_no, vlan_id=vlan_id, mac=mac)
with session.begin(subtransactions=True):
session.add(portinfo)
except Exception as exc:
LOG.exception(exc)
raise nexc.NECDBException(reason=exc.message)
return portinfo
def del_portinfo(session, id):
try:
with session.begin(subtransactions=True):
portinfo = session.query(nmodels.PortInfo).filter_by(id=id).one()
session.delete(portinfo)
except sa.orm.exc.NoResultFound:
LOG.warning(_("del_portinfo(): NotFound portinfo for "
"port_id: %s"), id)
def get_port_from_device(port_id):
"""Get port from database."""
LOG.debug(_("get_port_with_securitygroups() called:port_id=%s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id == port_id)
port_and_sgs = query.all()
if not port_and_sgs:
return None
port = port_and_sgs[0][0]
plugin = manager.QuantumManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict[ext_sg.SECURITYGROUPS] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
| yamt/neutron | quantum/plugins/nec/db/api.py | Python | apache-2.0 | 7,759 | 0 |
# -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import sys
import uuid
import json
import time
import getpass
import hashlib
import mimetypes
from itertools import chain
from os.path import join, dirname, basename, isfile
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.http import parse_cookie
from werkzeug.debug.tbtools import get_current_traceback, render_console_html
from werkzeug.debug.console import Console
from werkzeug.security import gen_salt
from werkzeug._internal import _log
from werkzeug._compat import text_type
# DEPRECATED
#: import this here because it once was documented as being available
#: from this module. In case there are users left ...
from werkzeug.debug.repr import debug_repr # noqa
# A week
PIN_TIME = 60 * 60 * 24 * 7
def hash_pin(pin):
if isinstance(pin, text_type):
pin = pin.encode('utf-8', 'replace')
return hashlib.md5(pin + b'shittysalt').hexdigest()[:12]
_machine_id = None
def get_machine_id():
global _machine_id
rv = _machine_id
if rv is not None:
return rv
def _generate():
# Potential sources of secret information on linux. The machine-id
# is stable across boots, the boot id is not
for filename in '/etc/machine-id', '/proc/sys/kernel/random/boot_id':
try:
with open(filename, 'rb') as f:
return f.readline().strip()
except IOError:
continue
# On OS X we can use the computer's serial number assuming that
# ioreg exists and can spit out that information.
try:
# Also catch import errors: subprocess may not be available, e.g.
# Google App Engine
# See https://github.com/pallets/werkzeug/issues/925
from subprocess import Popen, PIPE
dump = Popen(['ioreg', '-c', 'IOPlatformExpertDevice', '-d', '2'],
stdout=PIPE).communicate()[0]
match = re.search(b'"serial-number" = <([^>]+)', dump)
if match is not None:
return match.group(1)
except (OSError, ImportError):
pass
# On Windows we can use winreg to get the machine guid
wr = None
try:
import winreg as wr
except ImportError:
try:
import _winreg as wr
except ImportError:
pass
if wr is not None:
try:
with wr.OpenKey(wr.HKEY_LOCAL_MACHINE,
'SOFTWARE\\Microsoft\\Cryptography', 0,
wr.KEY_READ | wr.KEY_WOW64_64KEY) as rk:
return wr.QueryValueEx(rk, 'MachineGuid')[0]
except WindowsError:
pass
_machine_id = rv = _generate()
return rv
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
def get_pin_and_cookie_name(app):
"""Given an application object this returns a semi-stable 9 digit pin
code and a random key. The hope is that this is stable between
restarts to not make debugging particularly frustrating. If the pin
was forcefully disabled this returns `None`.
Second item in the resulting tuple is the cookie name for remembering.
"""
pin = os.environ.get('WERKZEUG_DEBUG_PIN')
rv = None
num = None
# Pin was explicitly disabled
if pin == 'off':
return None, None
# Pin was provided explicitly
if pin is not None and pin.replace('-', '').isdigit():
# If there are separators in the pin, return it directly
if '-' in pin:
rv = pin
else:
num = pin
modname = getattr(app, '__module__',
getattr(app.__class__, '__module__'))
try:
# `getpass.getuser()` imports the `pwd` module,
# which does not exist in the Google App Engine sandbox.
username = getpass.getuser()
except ImportError:
username = None
mod = sys.modules.get(modname)
# This information only exists to make the cookie unique on the
# computer, not as a security feature.
probably_public_bits = [
username,
modname,
getattr(app, '__name__', getattr(app.__class__, '__name__')),
getattr(mod, '__file__', None),
]
# This information is here to make it harder for an attacker to
# guess the cookie name. They are unlikely to be contained anywhere
# within the unauthenticated debug page.
private_bits = [
str(uuid.getnode()),
get_machine_id(),
]
h = hashlib.md5()
for bit in chain(probably_public_bits, private_bits):
if not bit:
continue
if isinstance(bit, text_type):
bit = bit.encode('utf-8')
h.update(bit)
h.update(b'cookiesalt')
cookie_name = '__wzd' + h.hexdigest()[:20]
# If we need to generate a pin we salt it a bit more so that we don't
# end up with the same value and generate out 9 digits
if num is None:
h.update(b'pinsalt')
num = ('%09d' % int(h.hexdigest(), 16))[:9]
# Format the pincode in groups of digits for easier remembering if
# we don't have a result yet.
if rv is None:
for group_size in 5, 4, 3:
if len(num) % group_size == 0:
rv = '-'.join(num[x:x + group_size].rjust(group_size, '0')
for x in range(0, len(num), group_size))
break
else:
rv = num
return rv, cookie_name
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
.. versionadded:: 0.9
The `lodgeit_url` parameter was deprecated.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
:param pin_security: can be used to disable the pin based security system.
:param pin_logging: enables the logging of the pin system.
"""
def __init__(self, app, evalex=False, request_key='werkzeug.request',
console_path='/console', console_init_func=None,
show_hidden_frames=False, lodgeit_url=None,
pin_security=True, pin_logging=True):
if lodgeit_url is not None:
from warnings import warn
warn(DeprecationWarning('Werkzeug now pastes into gists.'))
if not console_init_func:
console_init_func = None
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
self._failed_pin_auth = 0
self.pin_logging = pin_logging
if pin_security:
# Print out the pin for the debugger on standard out.
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true' and \
pin_logging:
_log('warning', ' * Debugger is active!')
if self.pin is None:
_log('warning', ' * Debugger pin disabled. '
'DEBUGGER UNSECURED!')
else:
_log('info', ' * Debugger pin code: %s' % self.pin)
else:
self.pin = None
def _get_pin(self):
if not hasattr(self, '_pin'):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin
def _set_pin(self, value):
self._pin = value
pin = property(_get_pin, _set_pin)
del _get_pin, _set_pin
@property
def pin_cookie_name(self):
"""The name of the pin cookie."""
if not hasattr(self, '_pin_cookie'):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin_cookie
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, 'close'):
app_iter.close()
except Exception:
if hasattr(app_iter, 'close'):
app_iter.close()
traceback = get_current_traceback(
skip=1, show_hidden_frames=self.show_hidden_frames,
ignore_system_exceptions=True)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response('500 INTERNAL SERVER ERROR', [
('Content-Type', 'text/html; charset=utf-8'),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
('X-XSS-Protection', '0'),
])
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ['wsgi.errors'].write(
'Debugging middleware caught exception in streamed '
'response at a point where response headers were already '
'sent.\n')
else:
is_trusted = bool(self.check_pin_trust(environ))
yield traceback.render_full(evalex=self.evalex,
evalex_trusted=is_trusted,
secret=self.secret) \
.encode('utf-8', 'replace')
traceback.log(environ['wsgi.errors'])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype='text/html')
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
if self.console_init_func is None:
ns = {}
else:
ns = dict(self.console_init_func())
ns.setdefault('app', self.app)
self.frames[0] = _ConsoleFrame(ns)
is_trusted = bool(self.check_pin_trust(request.environ))
return Response(render_console_html(secret=self.secret,
evalex_trusted=is_trusted),
mimetype='text/html')
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype='application/json')
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join(dirname(__file__), 'shared', basename(filename))
if isfile(filename):
mimetype = mimetypes.guess_type(filename)[0] \
or 'application/octet-stream'
f = open(filename, 'rb')
try:
return Response(f.read(), mimetype=mimetype)
finally:
f.close()
return Response('Not Found', status=404)
def check_pin_trust(self, environ):
"""Checks if the request passed the pin test. This returns `True` if the
request is trusted on a pin/cookie basis and returns `False` if not.
Additionally if the cookie's stored pin hash is wrong it will return
`None` so that appropriate action can be taken.
"""
if self.pin is None:
return True
val = parse_cookie(environ).get(self.pin_cookie_name)
if not val or '|' not in val:
return False
ts, pin_hash = val.split('|', 1)
if not ts.isdigit():
return False
if pin_hash != hash_pin(self.pin):
return None
return (time.time() - PIN_TIME) < int(ts)
def _fail_pin_auth(self):
time.sleep(self._failed_pin_auth > 5 and 5.0 or 0.5)
self._failed_pin_auth += 1
def pin_auth(self, request):
"""Authenticates with the pin."""
exhausted = False
auth = False
trust = self.check_pin_trust(request.environ)
# If the trust return value is `None` it means that the cookie is
# set but the stored pin hash value is bad. This means that the
# pin was changed. In this case we count a bad auth and unset the
# cookie. This way it becomes harder to guess the cookie name
# instead of the pin as we still count up failures.
bad_cookie = False
if trust is None:
self._fail_pin_auth()
bad_cookie = True
# If we're trusted, we're authenticated.
elif trust:
auth = True
# If we failed too many times, then we're locked out.
elif self._failed_pin_auth > 10:
exhausted = True
# Otherwise go through pin based authentication
else:
entered_pin = request.args.get('pin')
if entered_pin.strip().replace('-', '') == \
self.pin.replace('-', ''):
self._failed_pin_auth = 0
auth = True
else:
self._fail_pin_auth()
rv = Response(json.dumps({
'auth': auth,
'exhausted': exhausted,
}), mimetype='application/json')
if auth:
rv.set_cookie(self.pin_cookie_name, '%s|%s' % (
int(time.time()),
hash_pin(self.pin)
), httponly=True)
elif bad_cookie:
rv.delete_cookie(self.pin_cookie_name)
return rv
def log_pin_request(self):
"""Log the pin if needed."""
if self.pin_logging and self.pin is not None:
_log('info', ' * To enable the debugger you need to '
'enter the security pin:')
_log('info', ' * Debugger pin code: %s' % self.pin)
return Response('')
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get('__debugger__') == 'yes':
cmd = request.args.get('cmd')
arg = request.args.get('f')
secret = request.args.get('s')
traceback = self.tracebacks.get(request.args.get('tb', type=int))
frame = self.frames.get(request.args.get('frm', type=int))
if cmd == 'resource' and arg:
response = self.get_resource(request, arg)
elif cmd == 'paste' and traceback is not None and \
secret == self.secret:
response = self.paste_traceback(request, traceback)
elif cmd == 'pinauth' and secret == self.secret:
response = self.pin_auth(request)
elif cmd == 'printpin' and secret == self.secret:
response = self.log_pin_request()
elif self.evalex and cmd is not None and frame is not None \
and self.secret == secret and \
self.check_pin_trust(environ):
response = self.execute_command(request, cmd, frame)
elif self.evalex and self.console_path is not None and \
request.path == self.console_path:
response = self.display_console(request)
return response(environ, start_response)
| pcu4dros/pandora-core | workspace/lib/python3.5/site-packages/werkzeug/debug/__init__.py | Python | mit | 17,737 | 0.000056 |
from __future__ import absolute_import
class Newsletter(object):
__all__ = ('is_enabled', 'get_subscriptions', 'update_subscription',
'create_or_update_subscription')
DEFAULT_LIST_ID = 1
enabled = False
def is_enabled(self):
return self.enabled
def get_subscriptions(self, user):
return None
def update_subscription(self, user, **kwargs):
return None
def create_or_update_subscription(self, user, **kwargs):
kwargs['create'] = True
return self.update_subscription(user, **kwargs)
| JamesMura/sentry | src/sentry/newsletter/base.py | Python | bsd-3-clause | 571 | 0 |
"""
"""
import difflib
import hashlib
import math
import os
import socket
import time
# global variables
settings_filename = "/home/fa11en/.config/synk/synk-settings.conf"
server_ip_field = "server_ip"
server_port_field = "server_port"
local_project_loc_field = "local_project_location"
server_project_loc_field = "server_project_location"
delay_field = "delay" # in seconds
cached_project_location = "/tmp/synkProjects/"
# server connection classes
class Server_conn(object):
def __init__(self, settings, attempts):
# get the relevent settings
self.server_ip = settings[server_ip_field]
| Eternali/synk | synk-pre/synk2cp2.py | Python | gpl-3.0 | 618 | 0 |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import webob
from nova.compute import flavors
from nova import test
from nova.tests.unit.api.openstack import fakes
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
"name": 'flavor 1',
"memory_mb": '256',
"root_gb": '10',
"swap": '5',
"disabled": False,
"ephemeral_gb": '20',
"rxtx_factor": '1.0',
"vcpus": 1,
},
'flavor 2': {
"flavorid": '2',
"name": 'flavor 2',
"memory_mb": '512',
"root_gb": '10',
"swap": '10',
"ephemeral_gb": '25',
"rxtx_factor": None,
"disabled": False,
"vcpus": 1,
},
}
def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
return FAKE_FLAVORS['flavor %s' % flavorid]
def fake_get_all_flavors_sorted_list(context=None, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
return [
fake_flavor_get_by_flavor_id(1),
fake_flavor_get_by_flavor_id(2)
]
class FlavorRxtxTestV21(test.NoDBTestCase):
content_type = 'application/json'
_prefix = "/v2/fake"
def setUp(self):
super(FlavorRxtxTestV21, self).setUp()
ext = ('nova.api.openstack.compute.contrib'
'.flavor_rxtx.Flavor_rxtx')
self.flags(osapi_compute_extension=[ext])
fakes.stub_out_nw_api(self)
self.stubs.Set(flavors, "get_all_flavors_sorted_list",
fake_get_all_flavors_sorted_list)
self.stubs.Set(flavors,
"get_flavor_by_flavor_id",
fake_flavor_get_by_flavor_id)
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(self._get_app())
return res
def _get_app(self):
return fakes.wsgi_app_v21(init_only=('servers',
'flavors', 'os-flavor-rxtx'))
def _get_flavor(self, body):
return jsonutils.loads(body).get('flavor')
def _get_flavors(self, body):
return jsonutils.loads(body).get('flavors')
def assertFlavorRxtx(self, flavor, rxtx):
self.assertEqual(str(flavor.get('rxtx_factor')), rxtx)
def test_show(self):
url = self._prefix + '/flavors/1'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertFlavorRxtx(self._get_flavor(res.body), '1.0')
def test_detail(self):
url = self._prefix + '/flavors/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
flavors = self._get_flavors(res.body)
self.assertFlavorRxtx(flavors[0], '1.0')
self.assertFlavorRxtx(flavors[1], '')
| bigswitch/nova | nova/tests/unit/api/openstack/compute/test_flavor_rxtx.py | Python | apache-2.0 | 3,454 | 0.000579 |
# This file is simply here to make sure that everything is running just as
# fast under the virtualbox as under the host OS. There should be no
# performance degradation. This takes me (Ben) approximately 1.2sec outside my
# virtual machine and appx 1.15sec inside the virtual machine. WHY IT IS
# FASTER inside the VM?! WHY?
import time
def time_fnc():
a = range(1, 1000000)
for i in range(1, 200):
b = sum(a)
t0 = time.time()
time_fnc()
t1 = time.time()
print("Required: " + str(t1 - t0) + " seconds.")
| tbenthompson/codim1 | test/test_speed.py | Python | mit | 524 | 0.003817 |
# -*- coding: utf-8 -*-
"""
Grill logging module.
"""
# standard
from __future__ import annotations
import logging
from pathlib import Path
from naming import NameConfig
from grill.names import DateTimeFile
_LOG_FILE_SUFFIX = 'log'
class ErrorFilter(logging.Filter):
"""
Pass any message meant for stderr.
"""
def filter(self, record):
"""
If the record does is not logging.INFO, return True
"""
return record.levelno > logging.INFO
class OutFilter(logging.Filter):
"""
Pass any message meant for stderr.
"""
def filter(self, record):
"""
If the record does is logging.INFO, return True
"""
return record.levelno <= logging.INFO
class LogFile(DateTimeFile):
"""docstring for LogFile"""
config = dict(
log_name=r'[\w\.]+',
log_filter=r'\d+',
)
file_config = NameConfig(dict(suffix=_LOG_FILE_SUFFIX))
@property
def path(self):
return Path(r'~/grill').expanduser() / super().name
@property
def _defaults(self):
result = super()._defaults
result.update(
log_name='grill',
log_filter=logging.INFO,
suffix=_LOG_FILE_SUFFIX,
)
return result
| chrizzFTD/grill | grill/logger/model.py | Python | gpl-3.0 | 1,265 | 0 |
# -*- coding: utf-8 -*-
"""Installer for the plone.formbuilder package."""
from setuptools import find_packages
from setuptools import setup
long_description = '\n\n'.join([
open('README.rst').read(),
open('CONTRIBUTORS.rst').read(),
open('CHANGES.rst').read(),
])
setup(
name='plone.formbuilder',
version='1.0a1',
description="An addon to build form using javascript",
long_description=long_description,
# Get more from https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Environment :: Web Environment",
"Framework :: Plone",
"Framework :: Plone :: 5.0",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Operating System :: OS Independent",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
],
keywords='Python Plone',
author='Mohammad Tareq Alam',
author_email='tareq.mist@gmail.com',
url='https://pypi.python.org/pypi/plone.formbuilder',
license='GPL version 2',
packages=find_packages('src', exclude=['ez_setup']),
namespace_packages=['plone'],
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
install_requires=[
'plone.api',
'Products.GenericSetup>=1.8.2',
'setuptools',
'z3c.jbot',
'simplejson',
'plone.restapi[test]',
'odict'
],
extras_require={
'test': [
'plone.app.testing',
# Plone KGS does not use this version, because it would break
# Remove if your package shall be part of coredev.
# plone_coredev tests as of 2016-04-01.
'plone.testing>=5.0.0',
'plone.app.contenttypes',
'plone.app.robotframework[debug]',
],
},
entry_points="""
[z3c.autoinclude.plugin]
target = plone
""",
)
| tareqalam/plone.formbuilder | setup.py | Python | gpl-3.0 | 1,905 | 0 |
# -*- coding: utf-8 -*-
"""
Industrial Dual Analog In Plugin
Copyright (C) 2015 Olaf Lüke <olaf@tinkerforge.com>
Copyright (C) 2015-2016 Matthias Bolte <matthias@tinkerforge.com>
industrial_dual_analog_in.py: Industrial Dual Analog In Plugin Implementation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from PyQt5.QtWidgets import QVBoxLayout, QLabel, QHBoxLayout, QComboBox, QPushButton, QFrame, QDialog, QMessageBox
from PyQt5.QtCore import Qt
from brickv.plugin_system.plugin_base import PluginBase
from brickv.bindings.bricklet_industrial_dual_analog_in import BrickletIndustrialDualAnalogIn
from brickv.plot_widget import PlotWidget, CurveValueWrapper
from brickv.async_call import async_call
from brickv.callback_emulator import CallbackEmulator
from brickv.utils import get_modeless_dialog_flags
from brickv.plugin_system.plugins.industrial_dual_analog_in.ui_calibration import Ui_Calibration
from brickv.utils import format_voltage
def is_int32(value):
return value >= -2147483648 and value <= 2147483647
class Calibration(QDialog, Ui_Calibration):
def __init__(self, parent):
QDialog.__init__(self, parent, get_modeless_dialog_flags())
self.parent = parent
self.values0 = [0] * 10
self.values1 = [0] * 10
self.values_index = 0
self.setupUi(self)
self.button_cal_remove.clicked.connect(self.remove_clicked)
self.button_cal_offset.clicked.connect(self.offset_clicked)
self.button_cal_gain.clicked.connect(self.gain_clicked)
self.button_close.clicked.connect(self.close)
self.cbe_adc_values = CallbackEmulator(self,
self.parent.analog_in.get_adc_values,
None,
self.cb_adc_values,
self.parent.increase_error_count)
def show(self):
QDialog.show(self)
self.cbe_adc_values.set_period(100)
self.current_offset0 = 0
self.current_offset1 = 0
self.current_gain0 = 0
self.current_gain1 = 0
self.update_calibration()
def update_calibration(self):
async_call(self.parent.analog_in.get_calibration, None, self.get_calibration_async, self.parent.increase_error_count)
def remove_clicked(self):
self.parent.analog_in.set_calibration((0, 0), (0, 0))
self.update_calibration()
def offset_clicked(self):
self.parent.analog_in.set_calibration((-sum(self.values0) // 10, -sum(self.values1) // 10), (self.current_gain0, self.current_gain1))
self.update_calibration()
def gain_clicked(self):
try:
if self.parent.has_fixed_calibration:
measured0 = (sum(self.values0) / 10.0) * 244 / 44983
measured1 = (sum(self.values1) / 10.0) * 244 / 44983
else:
measured0 = (sum(self.values0) / 10.0) * 244 / 38588
measured1 = (sum(self.values1) / 10.0) * 244 / 38588
factor0 = self.spinbox_voltage_ch0.value()/measured0
factor1 = self.spinbox_voltage_ch1.value()/measured1
gain0 = int((factor0 - 1) * 2 ** 23)
gain1 = int((factor1 - 1) * 2 ** 23)
if not is_int32(gain0) or not is_int32(gain1):
raise ValueError("Out of range")
except:
QMessageBox.critical(self, "Failure during Calibration", "Calibration values are not in range.", QMessageBox.Ok)
return
self.parent.analog_in.set_calibration((self.current_offset0, self.current_offset1), (gain0, gain1))
self.update_calibration()
def get_calibration_async(self, cal):
self.current_offset0 = cal.offset[0]
self.current_offset1 = cal.offset[1]
self.current_gain0 = cal.gain[0]
self.current_gain1 = cal.gain[1]
self.label_offset0.setText(str(cal.offset[0]))
self.label_offset1.setText(str(cal.offset[1]))
self.label_gain0.setText(str(cal.gain[0]))
self.label_gain1.setText(str(cal.gain[1]))
def cb_adc_values(self, values):
self.values0[self.values_index] = values[0]
self.values1[self.values_index] = values[1]
self.values_index += 1
if self.values_index >= 10:
self.values_index = 0
self.label_adc0.setText(str(sum(self.values0) // 10))
self.label_adc1.setText(str(sum(self.values1) // 10))
def closeEvent(self, event):
self.parent.calibration_button.setEnabled(True)
self.cbe_adc_values.set_period(0)
class IndustrialDualAnalogIn(PluginBase):
def __init__(self, *args):
super().__init__(BrickletIndustrialDualAnalogIn, *args)
self.analog_in = self.device
# the firmware version of a EEPROM Bricklet can (under common circumstances)
# not change during the lifetime of an EEPROM Bricklet plugin. therefore,
# it's okay to make final decisions based on it here
self.has_fixed_calibration = self.firmware_version >= (2, 0, 1)
self.cbe_voltage0 = CallbackEmulator(self,
self.analog_in.get_voltage,
0,
self.cb_voltage,
self.increase_error_count,
pass_arguments_to_result_callback=True)
self.cbe_voltage1 = CallbackEmulator(self,
self.analog_in.get_voltage,
1,
self.cb_voltage,
self.increase_error_count,
pass_arguments_to_result_callback=True)
self.calibration = None
self.sample_rate_label = QLabel('Sample Rate:')
self.sample_rate_combo = QComboBox()
self.sample_rate_combo.addItem('976 Hz')
self.sample_rate_combo.addItem('488 Hz')
self.sample_rate_combo.addItem('244 Hz')
self.sample_rate_combo.addItem('122 Hz')
self.sample_rate_combo.addItem('61 Hz')
self.sample_rate_combo.addItem('4 Hz')
self.sample_rate_combo.addItem('2 Hz')
self.sample_rate_combo.addItem('1 Hz')
self.current_voltage = [CurveValueWrapper(), CurveValueWrapper()] # float, V
self.calibration_button = QPushButton('Calibration...')
self.sample_rate_combo.currentIndexChanged.connect(self.sample_rate_combo_index_changed)
self.calibration_button.clicked.connect(self.calibration_button_clicked)
plots = [('Channel 0', Qt.red, self.current_voltage[0], format_voltage),
('Channel 1', Qt.blue, self.current_voltage[1], format_voltage)]
self.plot_widget = PlotWidget('Voltage [V]', plots, y_resolution=0.001)
hlayout = QHBoxLayout()
hlayout.addWidget(self.sample_rate_label)
hlayout.addWidget(self.sample_rate_combo)
hlayout.addStretch()
hlayout.addWidget(self.calibration_button)
line = QFrame()
line.setObjectName("line")
line.setFrameShape(QFrame.HLine)
line.setFrameShadow(QFrame.Sunken)
layout = QVBoxLayout(self)
layout.addWidget(self.plot_widget)
layout.addWidget(line)
layout.addLayout(hlayout)
def start(self):
async_call(self.analog_in.get_sample_rate, None, self.get_sample_rate_async, self.increase_error_count)
self.cbe_voltage0.set_period(100)
self.cbe_voltage1.set_period(100)
self.plot_widget.stop = False
def stop(self):
self.cbe_voltage0.set_period(0)
self.cbe_voltage1.set_period(0)
self.plot_widget.stop = True
def destroy(self):
if self.calibration != None:
self.calibration.close()
@staticmethod
def has_device_identifier(device_identifier):
return device_identifier == BrickletIndustrialDualAnalogIn.DEVICE_IDENTIFIER
def get_voltage_value0(self):
return self.voltage_value[0]
def get_voltage_value1(self):
return self.voltage_value[1]
def calibration_button_clicked(self):
if self.calibration == None:
self.calibration = Calibration(self)
self.calibration_button.setEnabled(False)
self.calibration.show()
def sample_rate_combo_index_changed(self, index):
async_call(self.analog_in.set_sample_rate, index, None, self.increase_error_count)
def get_sample_rate_async(self, rate):
self.sample_rate_combo.setCurrentIndex(rate)
def cb_voltage(self, sensor, voltage):
self.current_voltage[sensor].value = voltage / 1000.0
| Tinkerforge/brickv | src/brickv/plugin_system/plugins/industrial_dual_analog_in/industrial_dual_analog_in.py | Python | gpl-2.0 | 9,465 | 0.002959 |
###########################################################
#
# Copyright (c) 2010, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
__all__ = [ 'SystemInfoWdg', 'LinkLoadTestWdg' ,'ClearSideBarCache']
import os, platform, sys
from pyasm.common import Environment, Config, Common
from pyasm.security import Login
from tactic.ui.common import BaseRefreshWdg
from pyasm.web import DivWdg, Table, WebContainer, Widget, SpanWdg
from pyasm.search import Search
from pyasm.biz import Project
from pyasm.widget import CheckboxWdg, TextWdg
from pyasm.command import Command
from tactic.ui.widget import ActionButtonWdg
class SystemInfoWdg(BaseRefreshWdg):
def get_display(self):
top = DivWdg()
top.add_color("background", "background")
top.add_color("color", "color")
top.add_style("min-width: 600px")
os_name = os.name
top.set_unique_id()
top.add_smart_style("spt_info_title", "background", self.top.get_color("background3"))
top.add_smart_style("spt_info_title", "padding", "3px")
top.add_smart_style("spt_info_title", "font-weight", "bold")
# server
title_div = DivWdg()
top.add(title_div)
title_div.add("Server")
title_div.add_class("spt_info_title")
os_div = DivWdg()
top.add(os_div)
os_info = platform.uname()
try:
os_login = os.getlogin()
except Exception:
os_login = os.environ.get("LOGNAME")
table = Table()
table.add_color("color", "color")
table.add_style("margin: 10px")
os_div.add(table)
for i, title in enumerate(['OS','Node Name','Release','Version','Machine']):
table.add_row()
td = table.add_cell("%s: " % title)
td.add_style("width: 150px")
table.add_cell( os_info[i] )
table.add_row()
table.add_cell("CPU Count: ")
try :
import multiprocessing
table.add_cell( multiprocessing.cpu_count() )
except (ImportError, NotImplementedError):
table.add_cell( "n/a" )
table.add_row()
table.add_cell("Login: ")
table.add_cell( os_login )
# python
title_div = DivWdg()
top.add(title_div)
title_div.add("Python")
title_div.add_class("spt_info_title")
table = Table()
table.add_color("color", "color")
table.add_style("margin: 10px")
top.add(table)
table.add_row()
td = table.add_cell("Version: ")
td.add_style("width: 150px")
table.add_cell( sys.version )
# client
title_div = DivWdg()
top.add(title_div)
title_div.add("Client")
title_div.add_class("spt_info_title")
web = WebContainer.get_web()
user_agent = web.get_env("HTTP_USER_AGENT")
table = Table()
table.add_color("color", "color")
table.add_style("margin: 10px")
top.add(table)
table.add_row()
td = table.add_cell("User Agent: ")
td.add_style("width: 150px")
table.add_cell( user_agent )
table.add_row()
td = table.add_cell("TACTIC User: ")
table.add_cell( web.get_user_name() )
top.add('<br/>')
self.handle_load_balancing(top)
# performance test
top.add('<br/>')
title_div = DivWdg()
top.add(title_div)
title_div.add("Performance Test")
title_div.add_class("spt_info_title")
performance_wdg = PerformanceWdg()
top.add(performance_wdg)
top.add('<br/>')
# mail server
title_div = DivWdg()
top.add(title_div)
title_div.add("Mail Server")
title_div.add_class("spt_info_title")
table = Table(css='email_server')
table.add_color("color", "color")
table.add_style("margin: 10px")
top.add(table)
table.add_row()
td = table.add_cell("Server: ")
td.add_style("width: 150px")
mailserver = Config.get_value("services", "mailserver")
has_mailserver = True
if mailserver:
table.add_cell( mailserver )
else:
table.add_cell("None configured")
has_mailserver = False
login = Login.get_by_login('admin')
login_email = login.get_value('email')
table.add_row()
td = table.add_cell("From: ")
td.add_style("width: 150px")
text = TextWdg('email_from')
text.set_attr('size', '40')
text.set_value(login_email)
text.add_class('email_from')
table.add_cell(text)
table.add_row()
td = table.add_cell("To: ")
td.add_style("width: 150px")
text = TextWdg('email_to')
text.set_attr('size', '40')
text.add_class('email_to')
text.set_value(login_email)
table.add_cell(text)
button = ActionButtonWdg(title='Email Send Test')
table.add_row_cell('<br/>')
table.add_row()
table.add_cell(button)
button.add_style("float: right")
button.add_behavior( {
'type': 'click_up',
'has_mailserver': has_mailserver,
'cbjs_action': '''
if (!bvr.has_mailserver) {
spt.alert('You have to fill in mailserver and possibly other mail related options in the TACTIC config file to send email.');
return;
}
var s = TacticServerStub.get();
try {
spt.app_busy.show('Sending email');
var from_txt = bvr.src_el.getParent('.email_server').getElement('.email_from');
var to_txt = bvr.src_el.getParent('.email_server').getElement('.email_to');
var rtn = s.execute_cmd('pyasm.command.EmailTriggerTestCmd',
{'sender_email': from_txt.value,
'recipient_emails': to_txt.value.split(','),
'msg': 'Simple Email Test by TACTIC'}
);
if (rtn.status == 'OK') {
spt.info("Email sent successfully to " + to_txt.value)
}
} catch(e) {
spt.alert(spt.exception.handler(e));
}
spt.app_busy.hide();
'''
})
top.add('<br/>')
self.handle_directories(top)
#table.add_row()
#td = table.add_cell("TACTIC User: ")
#table.add_cell( web.get_user_name() )
top.add('<br/>')
top.add(DivWdg('Link Test', css='spt_info_title'))
top.add('<br/>')
top.add(LinkLoadTestWdg())
top.add('<br/>')
self.handle_python_script_test(top)
top.add('<br/>')
self.handle_sidebar_clear(top)
return top
def handle_directories(self, top):
# deal with asset directories
top.add(DivWdg('Asset Folders', css='spt_info_title'))
mailserver = Config.get_value("services", "mailserver")
table = Table()
table.add_color("color", "color")
table.add_style("margin: 10px")
top.add(table)
table.add_row()
td = table.add_cell("asset_base_dir: ")
td.add_style("width: 150px")
asset_base_dir = Config.get_value("checkin", "asset_base_dir")
if asset_base_dir:
table.add_cell( asset_base_dir )
tr = table.add_row()
tr.add_style('border-bottom: 1px #bbb solid')
# check if it is writable
is_writable = os.access(asset_base_dir, os.W_OK)
span = SpanWdg("writable:")
span.add_style('padding-left: 20px')
td = table.add_cell(span)
td = table.add_cell(str(is_writable))
else:
table.add_cell( "None configured")
client_os = Environment.get_env_object().get_client_os()
if os.name == 'nt':
os_name = 'win32'
else:
os_name = 'linux'
if client_os == 'nt':
client_os_name = 'win32'
else:
client_os_name = 'linux'
env = Environment.get()
client_handoff_dir = env.get_client_handoff_dir(include_ticket=False, no_exception=True)
client_asset_dir = env.get_client_repo_dir()
table.add_row()
td = table.add_cell("%s_server_handoff_dir: " % os_name)
td.add_style("width: 150px")
handoff_dir = Config.get_value("checkin", "%s_server_handoff_dir" % os_name)
if handoff_dir:
table.add_cell( handoff_dir )
table.add_row()
# check if it is writable
is_writable = os.access(handoff_dir, os.W_OK)
span = SpanWdg("writable:")
span.add_style('padding-left: 20px')
td = table.add_cell(span)
td = table.add_cell(str(is_writable))
else:
table.add_cell( "None configured")
table.add_row()
td = table.add_cell("%s hand-off test: " % client_os_name)
td.add_style("width: 150px")
button = ActionButtonWdg(title='Test')
button.add_behavior( {
'type': 'click_up',
'handoff_dir': client_handoff_dir,
'asset_dir': client_asset_dir,
'cbjs_action': '''
var env = spt.Environment.get();
var applet = spt.Applet.get();
var handoff_state = applet.exists(bvr.handoff_dir);
var asset_state = applet.exists(bvr.asset_dir);
if (asset_state == false) {
env.set_transfer_mode("web");
spt.error('client repo directory is not accessible: ' + bvr.asset_dir);
}
else if (handoff_state == false) {
env.set_transfer_mode("web");
spt.error('client handoff directory is not accessible: ' + bvr.handoff_dir);
}
else {
env.set_transfer_mode("copy");
spt.info('<div>client handoff directory: ' + bvr.handoff_dir + '</div><br/><div>client repo directory :' + bvr.asset_dir + '</div><br/><div> can be successfully accessed.</div>', {type:'html'});
}
'''
} )
table.add_cell( button )
def handle_python_script_test(self, top):
top.add(DivWdg('Python Script Test', css='spt_info_title'))
table = Table(css='script')
table.add_color("color", "color")
table.add_style("margin: 10px")
table.add_style("width: 100%")
top.add(table)
table.add_row()
td = table.add_cell("Script Path: ")
td.add_style("width: 150px")
text = TextWdg('script_path')
td = table.add_cell(text)
button = ActionButtonWdg(title='Run')
table.add_cell(button)
button.add_style("float: right")
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var s = TacticServerStub.get();
try {
var path = bvr.src_el.getParent('.script').getElement('.spt_input').value;
if (! path)
throw('Please enter a valid script path');
s.execute_cmd('tactic.command.PythonCmd', {script_path: path});
} catch(e) {
spt.alert(spt.exception.handler(e));
}
'''
})
def handle_load_balancing(self, top):
# deal with asset directories
top.add(DivWdg('Load Balancing', css='spt_info_title'))
table = Table()
table.add_class("spt_loadbalance")
table.add_color("color", "color")
table.add_style("margin: 10px")
top.add(table)
table.add_row()
td = table.add_cell("Load Balancing: ")
td.add_style("width: 150px")
button = ActionButtonWdg(title='Test')
td = table.add_cell(button)
message_div = DivWdg()
message_div.add_class("spt_loadbalance_message")
table.add_cell(message_div)
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var server = TacticServerStub.get()
var ports = {};
var count = 0;
for (var i = 0; i < 50; i++) {
var info = server.get_connection_info();
var port = info.port;
var num = ports[port];
if (!num) {
ports[port] = 1;
count += 1;
}
else {
ports[port] += 1;
}
// if there are 10 requests and still only one, then break
if (i == 10 && count == 1)
break;
}
// build the ports string
x = [];
for (i in ports) {
x.push(i);
}
x.sort();
x = x.join(", ");
var loadbalance_el = bvr.src_el.getParent(".spt_loadbalance");
var message_el = loadbalance_el.getElement(".spt_loadbalance_message");
if (count > 1) {
var message = "Yes (found " + count + " ports: "+x+")";
}
else {
var message = "<blink style='background: red; padding: 3px'>Not enabled (found only port " + x + ")</blink>";
}
message_el.innerHTML = message
'''
} )
def handle_sidebar_clear(self, top):
top.add(DivWdg('Clear Side Bar Cache ', css='spt_info_title'))
table = Table()
table.add_color("color", "color")
table.add_style("margin: 10px")
top.add(table)
table.add_row()
td = table.add_cell("Clear the Side Bar Cache for all users")
td.add_style("width: 250px")
button = ActionButtonWdg(title='Run')
table.add_cell(button)
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
try {
var s = TacticServerStub.get();
s.execute_cmd('tactic.ui.app.ClearSideBarCache');
} catch(e) {
spt.alert(spt.exception.handler(e));
}
spt.info('Side Bar cache cleared.')
'''
})
class ClearSideBarCache(Command):
def execute(self):
tmp_dir = Environment.get_tmp_dir()
# remove the sidebar cache
sidebar_cache_dir = "%s/cache/side_bar" % tmp_dir
if os.path.exists(sidebar_cache_dir):
import shutil
shutil.rmtree(sidebar_cache_dir)
class LinkLoadTestWdg(BaseRefreshWdg):
'''Load Pages in popup as part of a testing process'''
def get_display(self):
config_search_type = "config/widget_config"
configs = []
all_element_names = []
from tactic.ui.panel import SideBarBookmarkMenuWdg
SideBarBookmarkMenuWdg.add_internal_config(configs, ['definition'])
for internal_config in configs:
all_element_names = internal_config.get_element_names()
search = Search(config_search_type)
search.add_filter("search_type", 'SideBarWdg')
search.add_filter("view", 'definition')
search.add_filter("login", None)
config = search.get_sobject()
element_names = []
if config:
element_names = config.get_element_names()
for name in element_names:
if 'separator' in name:
element_names.remove(name)
all_element_names.extend(element_names)
all_element_names = [str(name) for name in all_element_names]
all_element_names = Common.get_unique_list(all_element_names)
widget = DivWdg(css='spt_load_test_top')
span = SpanWdg('This loads all the pages defined in the Project views in popups. It will take a few minutes.')
widget.add(span)
widget.add('<br/>')
div = ActionButtonWdg(title='Run')
web = WebContainer.get_web()
base_url = web.get_base_url().to_string()
base_url = '%s/tactic/%s' %(base_url, Project.get_project_code())
div.add_behavior({'type': 'click_up',
'cbjs_action': '''
var element_names = eval(%s);
var all_element_names = eval(%s);
var top = spt.get_parent(bvr.src_el, '.spt_load_test_top');
var cb = spt.get_element(top, '.spt_input')
if (cb.checked)
element_list = all_element_names;
else
element_list = element_names
for (var k=0; k < element_list.length; k++) {
var name = element_list[k];
//if (k > 3) break;
var url = '%s/#/link/' + name;
var bvr2 = {
title: name,
target_id: 'TEST',
options: {'link': name,
'title': name,
'path': '/Link Test/' + name
},
is_popup: true};
spt.side_bar.display_link_cbk(null, bvr2);
}
''' %(element_names, all_element_names, base_url)})
widget.add('<br/>')
cb = CheckboxWdg('include_internal', label='include built-in pages')
span = SpanWdg(cb, css='med')
span.add_color('color','color')
widget.add(span)
widget.add(div)
widget.add('<br/>')
widget.add('<br/>')
return widget
class PerformanceWdg(BaseRefreshWdg):
def get_display(self):
top = self.top
top.add("<br/>")
top.add_style("margin-left: 10px")
try:
import multiprocessing
cpu_count = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
cpu_count = 'n/a'
title = DivWdg()
title.add("Click to start performance test: ")
title.add_style("float: left")
top.add(title)
title.add_style("margin-top: 5px")
button = ActionButtonWdg(title='Test')
top.add(button)
button.add_behavior( {
'type': 'click_up',
'cpu_count': cpu_count,
'cbjs_action': '''
var iterations = bvr.cpu_count;
if (iterations == 'n/a')
iterations = 1;
var server = TacticServerStub.get();
var class_name = 'tactic.ui.panel.ViewPanelWdg';
var kwargs = {
'search_type': 'sthpw/login',
'view': 'table'
};
var args = {
'args': kwargs,
'cbjs_action': function() {
spt.app_busy.show("Asyncronous Test", "Running Test ["+(count+1)+" / "+iterations+"]");
count += 1;
var time = new Date().getTime() - start;
if (time > async_avg) {
async_avg = time;
}
if (count == iterations) {
spt.app_busy.hide();
async_avg = async_avg / iterations;
alert("async: "+ async_avg + " ms");
}
}
};
var sync_avg = 0.0;
for (var i = 0; i < iterations; i++) {
spt.app_busy.show("Syncronous Requests", "Running Test ["+(i+1)+" / "+iterations+"]");
var start = new Date().getTime();
server.get_widget(class_name, args);
var time = new Date().getTime() - start;
sync_avg += time;
}
sync_avg = sync_avg / iterations;
spt.app_busy.hide();
alert("sync: " + sync_avg + " ms");
var async_avg = 0.0;
var count = 0;
spt.app_busy.show("Asyncronous Requests", "Running Test ["+(count+1)+" / "+iterations+"]");
var start = new Date().getTime();
for (var i = 0; i < iterations; i++) {
server.async_get_widget(class_name, args);
}
'''
} )
return top
| Southpaw-TACTIC/TACTIC | src/tactic/ui/app/system_info_wdg.py | Python | epl-1.0 | 19,736 | 0.007499 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/structure/shared_generic_house_player_small_style_02_floorplan_02.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/draft_schematic/structure/shared_generic_house_player_small_style_02_floorplan_02.py | Python | mit | 487 | 0.045175 |
# Borrowed liberally from awscli.
"""
AWS-NMAP
----
A Universal Command Line Environment for Amazon Web Services.
"""
import os
__version__ = '0.0.1'
#
# Get our data path to be added to botocore's search path
#
_awscli_data_path = []
if 'AWS_DATA_PATH' in os.environ:
for path in os.environ['AWS_DATA_PATH'].split(os.pathsep):
path = os.path.expandvars(path)
path = os.path.expanduser(path)
_awscli_data_path.append(path)
_awscli_data_path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
)
os.environ['AWS_DATA_PATH'] = os.pathsep.join(_awscli_data_path)
EnvironmentVariables = {
'ca_bundle': ('ca_bundle', 'AWS_CA_BUNDLE', None, None),
'output': ('output', 'AWS_DEFAULT_OUTPUT', 'json', None),
}
SCALAR_TYPES = set([
'string', 'float', 'integer', 'long', 'boolean', 'double',
'blob', 'timestamp'
])
COMPLEX_TYPES = set(['structure', 'map', 'list'])
| sporkmonger/aws-nmap | awsnmap/__init__.py | Python | apache-2.0 | 930 | 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2017/10/19 12:52
# @Author : xxc727xxc (xxc727xxc@foxmail.com)
# @Version : 1.0.0
from datetime import datetime
import time
from core.template.jinja2.init import jinja_filter
@jinja_filter('datetime')
def datetime_filter(t):
delta = int(time.time() - t)
if delta < 60:
return '1分钟前'
if delta < 3600:
return '%s分钟前' % (delta // 60)
if delta < 86400:
return '%s小时前' % (delta // 3600)
if delta < 604800:
return '%s天前' % (delta // 86400)
dt = datetime.fromtimestamp(t)
return '%s年%s月%s日' % (dt.year, dt.month, dt.day)
| DreamerBear/awesome-py3-webapp | www/core/template/jinja2/filters.py | Python | gpl-3.0 | 667 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Urwid escape sequences common to curses_display and raw_display
# Copyright (C) 2004-2011 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
from __future__ import division, print_function
"""
Terminal Escape Sequences for input and display
"""
import re
try:
from urwid import str_util
except ImportError:
from urwid import old_str_util as str_util
from urwid.compat import bytes, bytes3
# NOTE: because of circular imports (urwid.util -> urwid.escape -> urwid.util)
# from urwid.util import is_mouse_event -- will not work here
import urwid.util
within_double_byte = str_util.within_double_byte
SO = "\x0e"
SI = "\x0f"
IBMPC_ON = "\x1b[11m"
IBMPC_OFF = "\x1b[10m"
DEC_TAG = "0"
DEC_SPECIAL_CHARS = u'▮◆▒␉␌␍␊°±␋┘┐┌└┼⎺⎻─⎼⎽├┤┴┬│≤≥π≠£·'
ALT_DEC_SPECIAL_CHARS = u"_`abcdefghijklmnopqrstuvwxyz{|}~"
DEC_SPECIAL_CHARMAP = {}
assert len(DEC_SPECIAL_CHARS) == len(ALT_DEC_SPECIAL_CHARS), repr((DEC_SPECIAL_CHARS, ALT_DEC_SPECIAL_CHARS))
for c, alt in zip(DEC_SPECIAL_CHARS, ALT_DEC_SPECIAL_CHARS):
DEC_SPECIAL_CHARMAP[ord(c)] = SO + alt + SI
SAFE_ASCII_DEC_SPECIAL_RE = re.compile(u"^[ -~%s]*$" % DEC_SPECIAL_CHARS)
DEC_SPECIAL_RE = re.compile(u"[%s]" % DEC_SPECIAL_CHARS)
###################
## Input sequences
###################
class MoreInputRequired(Exception):
pass
def escape_modifier( digit ):
mode = ord(digit) - ord("1")
return "shift "*(mode&1) + "meta "*((mode&2)//2) + "ctrl "*((mode&4)//4)
input_sequences = [
('[A','up'),('[B','down'),('[C','right'),('[D','left'),
('[E','5'),('[F','end'),('[G','5'),('[H','home'),
('[1~','home'),('[2~','insert'),('[3~','delete'),('[4~','end'),
('[5~','page up'),('[6~','page down'),
('[7~','home'),('[8~','end'),
('[[A','f1'),('[[B','f2'),('[[C','f3'),('[[D','f4'),('[[E','f5'),
('[11~','f1'),('[12~','f2'),('[13~','f3'),('[14~','f4'),
('[15~','f5'),('[17~','f6'),('[18~','f7'),('[19~','f8'),
('[20~','f9'),('[21~','f10'),('[23~','f11'),('[24~','f12'),
('[25~','f13'),('[26~','f14'),('[28~','f15'),('[29~','f16'),
('[31~','f17'),('[32~','f18'),('[33~','f19'),('[34~','f20'),
('OA','up'),('OB','down'),('OC','right'),('OD','left'),
('OH','home'),('OF','end'),
('OP','f1'),('OQ','f2'),('OR','f3'),('OS','f4'),
('Oo','/'),('Oj','*'),('Om','-'),('Ok','+'),
('[Z','shift tab'),
('On', '.'),
('[200~', 'begin paste'), ('[201~', 'end paste'),
] + [
(prefix + letter, modifier + key)
for prefix, modifier in zip('O[', ('meta ', 'shift '))
for letter, key in zip('abcd', ('up', 'down', 'right', 'left'))
] + [
("[" + digit + symbol, modifier + key)
for modifier, symbol in zip(('shift ', 'meta '), '$^')
for digit, key in zip('235678',
('insert', 'delete', 'page up', 'page down', 'home', 'end'))
] + [
('O' + chr(ord('p')+n), str(n)) for n in range(10)
] + [
# modified cursor keys + home, end, 5 -- [#X and [1;#X forms
(prefix+digit+letter, escape_modifier(digit) + key)
for prefix in ("[", "[1;")
for digit in "12345678"
for letter,key in zip("ABCDEFGH",
('up','down','right','left','5','end','5','home'))
] + [
# modified F1-F4 keys -- O#X form
("O"+digit+letter, escape_modifier(digit) + key)
for digit in "12345678"
for letter,key in zip("PQRS",('f1','f2','f3','f4'))
] + [
# modified F1-F13 keys -- [XX;#~ form
("["+str(num)+";"+digit+"~", escape_modifier(digit) + key)
for digit in "12345678"
for num,key in zip(
(3,5,6,11,12,13,14,15,17,18,19,20,21,23,24,25,26,28,29,31,32,33,34),
('delete', 'page up', 'page down',
'f1','f2','f3','f4','f5','f6','f7','f8','f9','f10','f11',
'f12','f13','f14','f15','f16','f17','f18','f19','f20'))
] + [
# mouse reporting (special handling done in KeyqueueTrie)
('[M', 'mouse'),
# mouse reporting for SGR 1006
('[<', 'sgrmouse'),
# report status response
('[0n', 'status ok')
]
class KeyqueueTrie(object):
def __init__( self, sequences ):
self.data = {}
for s, result in sequences:
assert type(result) != dict
self.add(self.data, s, result)
def add(self, root, s, result):
assert type(root) == dict, "trie conflict detected"
assert len(s) > 0, "trie conflict detected"
if ord(s[0]) in root:
return self.add(root[ord(s[0])], s[1:], result)
if len(s)>1:
d = {}
root[ord(s[0])] = d
return self.add(d, s[1:], result)
root[ord(s)] = result
def get(self, keys, more_available):
result = self.get_recurse(self.data, keys, more_available)
if not result:
result = self.read_cursor_position(keys, more_available)
return result
def get_recurse(self, root, keys, more_available):
if type(root) != dict:
if root == "mouse":
return self.read_mouse_info(keys,
more_available)
elif root == "sgrmouse":
return self.read_sgrmouse_info (keys, more_available)
return (root, keys)
if not keys:
# get more keys
if more_available:
raise MoreInputRequired()
return None
if keys[0] not in root:
return None
return self.get_recurse(root[keys[0]], keys[1:], more_available)
def read_mouse_info(self, keys, more_available):
if len(keys) < 3:
if more_available:
raise MoreInputRequired()
return None
b = keys[0] - 32
x, y = (keys[1] - 33)%256, (keys[2] - 33)%256 # supports 0-255
prefix = ""
if b & 4: prefix = prefix + "shift "
if b & 8: prefix = prefix + "meta "
if b & 16: prefix = prefix + "ctrl "
if (b & MOUSE_MULTIPLE_CLICK_MASK)>>9 == 1: prefix = prefix + "double "
if (b & MOUSE_MULTIPLE_CLICK_MASK)>>9 == 2: prefix = prefix + "triple "
# 0->1, 1->2, 2->3, 64->4, 65->5
button = ((b&64)//64*3) + (b & 3) + 1
if b & 3 == 3:
action = "release"
button = 0
elif b & MOUSE_RELEASE_FLAG:
action = "release"
elif b & MOUSE_DRAG_FLAG:
action = "drag"
elif b & MOUSE_MULTIPLE_CLICK_MASK:
action = "click"
else:
action = "press"
return ( (prefix + "mouse " + action, button, x, y), keys[3:] )
def read_sgrmouse_info(self, keys, more_available):
# Helpful links:
# https://stackoverflow.com/questions/5966903/how-to-get-mousemove-and-mouseclick-in-bash
# http://invisible-island.net/xterm/ctlseqs/ctlseqs.pdf
if not keys:
if more_available:
raise MoreInputRequired()
return None
value = ''
pos_m = 0
found_m = False
for k in keys:
value = value + chr(k);
if ((k is ord('M')) or (k is ord('m'))):
found_m = True
break;
pos_m += 1
if not found_m:
if more_available:
raise MoreInputRequired()
return None
(b, x, y) = value[:-1].split(';')
# shift, meta, ctrl etc. is not communicated on my machine, so I
# can't and won't be able to add support for it.
# Double and triple clicks are not supported as well. They can be
# implemented by using a timer. This timer can check if the last
# registered click is below a certain threshold. This threshold
# is normally set in the operating system itself, so setting one
# here will cause an inconsistent behaviour. I do not plan to use
# that feature, so I won't implement it.
button = ((int(b) & 64) // 64 * 3) + (int(b) & 3) + 1
x = int(x) - 1
y = int(y) - 1
if (value[-1] == 'M'):
if int(b) & MOUSE_DRAG_FLAG:
action = "drag"
else:
action = "press"
else:
action = "release"
return ( ("mouse " + action, button, x, y), keys[pos_m + 1:] )
def read_cursor_position(self, keys, more_available):
"""
Interpret cursor position information being sent by the
user's terminal. Returned as ('cursor position', x, y)
where (x, y) == (0, 0) is the top left of the screen.
"""
if not keys:
if more_available:
raise MoreInputRequired()
return None
if keys[0] != ord('['):
return None
# read y value
y = 0
i = 1
for k in keys[i:]:
i += 1
if k == ord(';'):
if not y:
return None
break
if k < ord('0') or k > ord('9'):
return None
if not y and k == ord('0'):
return None
y = y * 10 + k - ord('0')
if not keys[i:]:
if more_available:
raise MoreInputRequired()
return None
# read x value
x = 0
for k in keys[i:]:
i += 1
if k == ord('R'):
if not x:
return None
return (("cursor position", x-1, y-1), keys[i:])
if k < ord('0') or k > ord('9'):
return None
if not x and k == ord('0'):
return None
x = x * 10 + k - ord('0')
if not keys[i:]:
if more_available:
raise MoreInputRequired()
return None
# This is added to button value to signal mouse release by curses_display
# and raw_display when we know which button was released. NON-STANDARD
MOUSE_RELEASE_FLAG = 2048
# This 2-bit mask is used to check if the mouse release from curses or gpm
# is a double or triple release. 00 means single click, 01 double,
# 10 triple. NON-STANDARD
MOUSE_MULTIPLE_CLICK_MASK = 1536
# This is added to button value at mouse release to differentiate between
# single, double and triple press. Double release adds this times one,
# triple release adds this times two. NON-STANDARD
MOUSE_MULTIPLE_CLICK_FLAG = 512
# xterm adds this to the button value to signal a mouse drag event
MOUSE_DRAG_FLAG = 32
#################################################
# Build the input trie from input_sequences list
input_trie = KeyqueueTrie(input_sequences)
#################################################
_keyconv = {
-1:None,
8:'backspace',
9:'tab',
10:'enter',
13:'enter',
127:'backspace',
# curses-only keycodes follow.. (XXX: are these used anymore?)
258:'down',
259:'up',
260:'left',
261:'right',
262:'home',
263:'backspace',
265:'f1', 266:'f2', 267:'f3', 268:'f4',
269:'f5', 270:'f6', 271:'f7', 272:'f8',
273:'f9', 274:'f10', 275:'f11', 276:'f12',
277:'shift f1', 278:'shift f2', 279:'shift f3', 280:'shift f4',
281:'shift f5', 282:'shift f6', 283:'shift f7', 284:'shift f8',
285:'shift f9', 286:'shift f10', 287:'shift f11', 288:'shift f12',
330:'delete',
331:'insert',
338:'page down',
339:'page up',
343:'enter', # on numpad
350:'5', # on numpad
360:'end',
}
def process_keyqueue(codes, more_available):
"""
codes -- list of key codes
more_available -- if True then raise MoreInputRequired when in the
middle of a character sequence (escape/utf8/wide) and caller
will attempt to send more key codes on the next call.
returns (list of input, list of remaining key codes).
"""
code = codes[0]
if code >= 32 and code <= 126:
key = chr(code)
return [key], codes[1:]
if code in _keyconv:
return [_keyconv[code]], codes[1:]
if code >0 and code <27:
return ["ctrl %s" % chr(ord('a')+code-1)], codes[1:]
if code >27 and code <32:
return ["ctrl %s" % chr(ord('A')+code-1)], codes[1:]
em = str_util.get_byte_encoding()
if (em == 'wide' and code < 256 and
within_double_byte(chr(code),0,0)):
if not codes[1:]:
if more_available:
raise MoreInputRequired()
if codes[1:] and codes[1] < 256:
db = chr(code)+chr(codes[1])
if within_double_byte(db, 0, 1):
return [db], codes[2:]
if em == 'utf8' and code>127 and code<256:
if code & 0xe0 == 0xc0: # 2-byte form
need_more = 1
elif code & 0xf0 == 0xe0: # 3-byte form
need_more = 2
elif code & 0xf8 == 0xf0: # 4-byte form
need_more = 3
else:
return ["<%d>"%code], codes[1:]
for i in range(need_more):
if len(codes)-1 <= i:
if more_available:
raise MoreInputRequired()
else:
return ["<%d>"%code], codes[1:]
k = codes[i+1]
if k>256 or k&0xc0 != 0x80:
return ["<%d>"%code], codes[1:]
s = bytes3(codes[:need_more+1])
assert isinstance(s, bytes)
try:
return [s.decode("utf-8")], codes[need_more+1:]
except UnicodeDecodeError:
return ["<%d>"%code], codes[1:]
if code >127 and code <256:
key = chr(code)
return [key], codes[1:]
if code != 27:
return ["<%d>"%code], codes[1:]
result = input_trie.get(codes[1:], more_available)
if result is not None:
result, remaining_codes = result
return [result], remaining_codes
if codes[1:]:
# Meta keys -- ESC+Key form
run, remaining_codes = process_keyqueue(codes[1:],
more_available)
if urwid.util.is_mouse_event(run[0]):
return ['esc'] + run, remaining_codes
if run[0] == "esc" or run[0].find("meta ") >= 0:
return ['esc']+run, remaining_codes
return ['meta '+run[0]]+run[1:], remaining_codes
return ['esc'], codes[1:]
####################
## Output sequences
####################
ESC = "\x1b"
CURSOR_HOME = ESC+"[H"
CURSOR_HOME_COL = "\r"
APP_KEYPAD_MODE = ESC+"="
NUM_KEYPAD_MODE = ESC+">"
SWITCH_TO_ALTERNATE_BUFFER = ESC+"7"+ESC+"[?47h"
RESTORE_NORMAL_BUFFER = ESC+"[?47l"+ESC+"8"
#RESET_SCROLL_REGION = ESC+"[;r"
#RESET = ESC+"c"
REPORT_STATUS = ESC + "[5n"
REPORT_CURSOR_POSITION = ESC+"[6n"
INSERT_ON = ESC + "[4h"
INSERT_OFF = ESC + "[4l"
def set_cursor_position( x, y ):
assert type(x) == int
assert type(y) == int
return ESC+"[%d;%dH" %(y+1, x+1)
def move_cursor_right(x):
if x < 1: return ""
return ESC+"[%dC" % x
def move_cursor_up(x):
if x < 1: return ""
return ESC+"[%dA" % x
def move_cursor_down(x):
if x < 1: return ""
return ESC+"[%dB" % x
HIDE_CURSOR = ESC+"[?25l"
SHOW_CURSOR = ESC+"[?25h"
MOUSE_TRACKING_ON = ESC+"[?1000h"+ESC+"[?1002h"+ESC+"[?1006h"
MOUSE_TRACKING_OFF = ESC+"[?1006l"+ESC+"[?1002l"+ESC+"[?1000l"
DESIGNATE_G1_SPECIAL = ESC+")0"
ERASE_IN_LINE_RIGHT = ESC+"[K"
| urwid/urwid | urwid/escape.py | Python | lgpl-2.1 | 15,967 | 0.01691 |
import metrics
import node
import slm_ratios as slm
version = "1.0"
slm.set_clks_event_name("CPU_CLK_UNHALTED.THREAD")
smt_enabled = False
class CyclesPerUop(slm.CyclesPerUop):
server = True
# LEVEL 1
class FrontendBound(slm.FrontendBound):
server = True
class BackendBound(slm.BackendBound):
server = True
class BadSpeculation(slm.BadSpeculation):
server = True
class Retiring(slm.Retiring):
server = True
# LEVEL 2
class FrontendLatency(slm.FrontendLatency):
server = True
# LEVEL 3
class ICacheMisses(slm.ICacheMisses):
server = True
# Override _compute(), since KNL does not have
# the DECODE_RESTRICTION.PDCACHE_WRONG event
def _compute(self, ev):
return slm.icache_line_fetch_cost(ev, self.level)
class ITLBMisses(slm.ITLBMisses):
server = True
class MSSwitches(slm.MSSwitches):
server = True
class Setup(object):
def __init__(self, runner):
# Instantiate nodes as required to be able to specify their
# references
# L3 objects
icache_misses = ICacheMisses()
itlb_misses = ITLBMisses()
ms_cost = MSSwitches()
#L1 objects
frontend = FrontendBound()
bad_speculation = BadSpeculation()
retiring = Retiring()
backend = BackendBound(retiring=retiring,
bad_speculation=bad_speculation,
frontend=frontend)
# L2 objects
frontend_latency = FrontendLatency(icache_misses=icache_misses,
itlb=itlb_misses,
ms_cost=ms_cost,
frontend=frontend
)
# Set parents
node.set_parent(None, [frontend, bad_speculation, retiring, backend])
node.set_parent(frontend, [frontend_latency])
node.set_parent(frontend_latency,
[icache_misses, itlb_misses, ms_cost])
# User visible metrics
user_metrics = [slm.Metric_IPC(), slm.Metric_CPI(),
slm.Metric_TurboUtilization(),
slm.Metric_CLKS(), slm.Metric_Time(),
slm.CyclesPerUop()]
nodes = [obj for obj in locals().values()
if issubclass(obj.__class__, metrics.MetricBase) and
obj.level > 0]
nodes = sorted(nodes, key=lambda n: n.level)
# Pass to runner
list(map(runner.run, nodes))
list(map(runner.metric, user_metrics))
| andikleen/pmu-tools | knl_ratios.py | Python | gpl-2.0 | 2,582 | 0.005035 |
# -*- coding:utf-8 -*-
import argparse
from nlp.ner.idcnn.train import train
from nlp.ner.idcnn.predict import predict
def main(args):
if args.train:
train(args)
else:
predict(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--train', type=bool, default=True, help="Whether train the model")
parser.add_argument('--clean', type=bool, default=True, help="Whether clean the model")
parser.add_argument('--ckpt_path', type=str, default="ckpt", help="Path to save model")
parser.add_argument('--log_path', type=str, default="train.log", help="File for log")
parser.add_argument('--vocab_path', type=str, default="vocab.json", help="Path to vocab file")
parser.add_argument('--config_path', type=str, default="config_file", help="File for config")
parser.add_argument('--script', type=str, default="conlleval", help="evaluation script")
parser.add_argument('--result_path', type=str, default="result", help="Path to result")
parser.add_argument('--emb_file', type=str, default="vec.txt", help="Path for pre_trained embedding")
parser.add_argument('--train_file', type=str, default="train.txt", help="Path for train data")
parser.add_argument('--dev_file', type=str, default="dev.txt", help="Path for dev data")
parser.add_argument('--test_file', type=str, default="test.txt", help="Path for test data")
parser.add_argument('--raw_file', type=str, default="example.raw", help="Path for predict data")
parser.add_argument('--model_type', type=str, default="bilstm", help="Model type, can be idcnn or bilstm")
parser.add_argument('--seg_dim', type=int, default=50, help="Embedding size for segmentation, 0 if not used")
parser.add_argument('--char_dim', type=int, default=100, help="Embedding size for characters")
parser.add_argument('--lstm_dim', type=int, default=100, help="Num of hidden units in LSTM, or num of filters in IDCNN")
parser.add_argument('--tag_schema', type=str, default="iobes", help="tagging schema iobes or iob")
parser.add_argument('--clip', type=int, default=5, help="Gradient clip")
parser.add_argument('--dropout', type=float, default=0.5, help="Dropout rate")
parser.add_argument('--batch_size', type=int, default=20, help="batch size")
parser.add_argument('--lr', type=float, default=0.001, help="Initial learning rate")
parser.add_argument('--optimizer', type=str, default='adam')
parser.add_argument('--pre_emb', type=bool, default=True, help="Whether use pre-trained embedding")
parser.add_argument('--zeros', type=bool, default=False, help="Whether replace digits with zero")
parser.add_argument('--lower', type=bool, default=True, help="Whether lower case")
parser.add_argument('--max_epoch', type=int, default=4, help="maximum training epochs")
parser.add_argument('--steps_check', type=int, default=100, help="steps per checkpoint")
args = parser.parse_args()
main(args)
| koala-ai/tensorflow_nlp | nlp/ner/idcnn/run.py | Python | apache-2.0 | 3,000 | 0.009333 |
from nose.tools import (raises, assert_raises, assert_true,
assert_equal, assert_not_equal, assert_almost_equal)
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from deepjets.preprocessing import zoom_image, pixel_edges
def test_zoom():
edges = pixel_edges(jet_size=1, pixel_size=(0.1, 0.1), border_size=0.25)
assert_equal(edges[0].shape, (26,))
assert_equal(edges[1].shape, (26,))
image, _, _ = np.histogram2d(
np.random.normal(0, 1, 1000), np.random.normal(0, 1, 1000),
bins=(edges[0], edges[1]))
assert_true(image.sum() > 0)
assert_equal(image.shape, (25, 25))
# zooming with factor 1 should not change anything
image_zoomed = zoom_image(image, 1, out_width=25)
assert_array_almost_equal(image, image_zoomed)
assert_raises(ValueError, zoom_image, image, 0.5)
# test out_width
assert_equal(zoom_image(image, 1, out_width=11).shape, (11, 11))
image_zoomed = zoom_image(image, 2, out_width=25)
assert_true(image.sum() < image_zoomed.sum())
| deepjets/deepjets | deepjets/tests/test_preprocessing.py | Python | bsd-3-clause | 1,088 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.