text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
def parse_event(raw_event,preserve_backslash=False,preserve_dot=False):
in_string = False
words = []
d = {}
key = None
curr = []
for c in raw_event:
if c == '\\' and not preserve_backslash:
continue
elif c == '"':
in_string = not in_string
elif c == ' ':
if in_string:
curr.append(c)
else:
if key:
val = ''.join(curr)
d[key] = decodeCounters(val) if key == 'COUNTERS' else val
key = None
else:
word = ''.join(curr)
if preserve_dot or word != '.':
words.append( ''.join(curr) )
curr = []
elif c == '=':
key = ''.join(curr)
curr = []
else:
curr.append(c)
if in_string:
curr.append(c)
else:
if key:
d[key] = ''.join(curr)
key = None
else:
word = ''.join(curr)
if preserve_dot or word != '.':
words.append( ''.join(curr) )
curr = []
return words,d
def decodeCounters(counters):
raw_counter_families = counters[1:-1].split('}{')
counter_families = {}
for raw_family in raw_counter_families:
splitted = raw_family.split('[')
name,desc = decodeCounterKey( splitted[0] )
raw_counters = [s[:-1] if s[-1] == ']' else s for s in splitted[1:]]
counters = {}
for raw_counter in raw_counters:
cname,fdesc,val = decodeCounterKey(raw_counter)
#counters[cname] = Counter(cname,fdesc,val)
counters[cname] = (fdesc,val)
#counter_families[name] = CounterFamily(name,desc,counters)
counter_families[name] = (name,desc,counters)
return counter_families
def decodeCounterKey(s):
return s[1:-1].split(')(')
| melrief/Hadoop-Log-Tools | hadoop/log/convert/libjobevent.py | Python | apache-2.0 | 1,665 | 0.032432 |
from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import ctypes
import os
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array,
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT,
assert_array_less, runstring, dec, SkipTest, temppath
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
assert_raises(ValueError, np.array, C()) # segfault?
def test_failed_len_sequence(self):
# gh-7393
class A(object):
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, a.prod)
self.assertRaises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
# test generic class with bogus ordering,
# should not segfault.
class Boom(object):
def __lt__(self, other):
return True
a = np.array([Boom()]*100, dtype=object)
for kind in ['q', 'm', 'h']:
msg = "bogus comparison object sort, kind=%s" % kind
c.sort(kind=kind)
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
d = np.arange(24).reshape(4, 6)
ddt = np.array(
[[ 55, 145, 235, 325],
[ 145, 451, 757, 1063],
[ 235, 757, 1279, 1801],
[ 325, 1063, 1801, 2539]]
)
dtd = np.array(
[[504, 540, 576, 612, 648, 684],
[540, 580, 620, 660, 700, 740],
[576, 620, 664, 708, 752, 796],
[612, 660, 708, 756, 804, 852],
[648, 700, 752, 804, 856, 908],
[684, 740, 796, 852, 908, 964]]
)
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
assert_equal(np.dot(eaf, eaf), eaf)
assert_equal(np.dot(eaf.T, eaf), eaf)
assert_equal(np.dot(eaf, eaf.T), eaf)
assert_equal(np.dot(eaf.T, eaf.T), eaf)
assert_equal(np.dot(eaf.T.copy(), eaf), eaf)
assert_equal(np.dot(eaf, eaf.T.copy()), eaf)
assert_equal(np.dot(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
assert_equal(np.dot(ebf, ebf), eaf)
assert_equal(np.dot(ebf.T, ebf), eaf)
assert_equal(np.dot(ebf, ebf.T), eaf)
assert_equal(np.dot(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
np.dot(edf[::-1, :], edf.T),
np.dot(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
np.dot(edf[:, ::-1], edf.T),
np.dot(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
np.dot(edf, edf[::-1, :].T),
np.dot(edf, edf[::-1, :].T.copy())
)
assert_equal(
np.dot(edf, edf[:, ::-1].T),
np.dot(edf, edf[:, ::-1].T.copy())
)
assert_equal(
np.dot(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
np.dot(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
np.dot(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
np.dot(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
assert_equal(np.dot(edf, edf.T), eddtf)
assert_equal(np.dot(edf.T, edf), edtdf)
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert isinstance(t, MyArray)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
# when calling np.put, make sure a
# TypeError is raised if the object
# isn't an ndarray
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array(7, dtype=dt)
b = np.array([7], dtype=dt)
c = np.array([[[[[7]]]]], dtype=dt)
msg = 'dtype: {0}'.format(dt)
ap = complex(a)
assert_equal(ap, a, msg)
bp = complex(b)
assert_equal(bp, b, msg)
cp = complex(c)
assert_equal(cp, c, msg)
def test__complex__should_not_work(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises(TypeError, complex, a)
dt = np.dtype([('a', 'f8'), ('b', 'i1')])
b = np.array((1.0, 3), dtype=dt)
assert_raises(TypeError, complex, b)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(TypeError, complex, c)
d = np.array('1+1j')
assert_raises(TypeError, complex, d)
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
if i < len(inputs):
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
if ('out' in kw) and (kw['out'] is not None):
kw['out'] = np.asarray(kw['out'])
r = func(*inputs, **kw)
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_numpy_ufunc_index(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
class CheckIndex(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return i
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), 1)
assert_equal(np.sin(dummy, out=a), 1)
assert_equal(np.sin(dummy, out=(a,)), 1)
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), 1)
assert_equal(np.modf(dummy, None, a), 2)
assert_equal(np.modf(dummy, dummy, a), 2)
assert_equal(np.modf(dummy, out=a), 1)
assert_equal(np.modf(dummy, out=(a,)), 1)
assert_equal(np.modf(dummy, out=(a, None)), 1)
assert_equal(np.modf(dummy, out=(a, dummy)), 1)
assert_equal(np.modf(dummy, out=(None, a)), 2)
assert_equal(np.modf(dummy, out=(dummy, a)), 2)
assert_equal(np.modf(a, out=(dummy, a)), 0)
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), 2)
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), 2)
assert_equal(np.add(dummy, dummy, out=(a,)), 2)
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# regression test for github bug 4753
class OutClass(np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmax(), 0)
a[3] = 10
assert_equal(a.argmax(), 3)
a[1] = 30
assert_equal(a.argmax(), 1)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2, 3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmin(), 0)
a[3] = 30
assert_equal(a.argmin(), 3)
a[1] = 10
assert_equal(a.argmin(), 1)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
def test_nan(self):
input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
result = input_arr.clip(-1, 1)
expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
assert_array_equal(result, expected)
class TestCompress(TestCase):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], T(val))
assert_equal(x.dtype, T)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(ValueError, np.lexsort, x, axis=2)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(IOError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(IOError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unbuffered_fromfile(self):
# gh-6246
self.x.tofile(self.filename)
def fail(*args, **kwargs):
raise io.IOError('Can not tell or seek')
f = io.open(self.filename, 'rb', buffering=0)
f.seek = fail
f.tell = fail
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_largish_file(self):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
d.tofile(self.filename)
assert_equal(os.path.getsize(self.filename), d.nbytes)
assert_array_equal(d, np.fromfile(self.filename))
# check offset
with open(self.filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is True)
assert_(c.flags.updateifcopy is False)
assert_(d.flags.updateifcopy is False)
assert_(e.flags.updateifcopy is False)
assert_(f.flags.updateifcopy is True)
assert_(f.base is self.b0)
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_assign():
dtype = np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_assign)
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
def test_multiple_field_name_unicode(self):
def test_assign_unicode():
dt = np.dtype([("\u20B9", "f8"),
("B", "f8"),
("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_assign_unicode)
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
# for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.inner, c, A)
assert_raises(TypeError, np.inner, A, c)
def test_inner_scalar_and_vector(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
vec = np.array([1, 2], dtype=dt)
desired = np.array([3, 6], dtype=dt)
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
def test_inner_scalar_and_matrix(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
assert_equal(np.inner(arr, sca), desired)
assert_equal(np.inner(sca, arr), desired)
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_3d_tensor(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
a = np.arange(24).reshape(2,3,4).astype(dt)
b = np.arange(24, 48).reshape(2,3,4).astype(dt)
desired = np.array(
[[[[ 158, 182, 206],
[ 230, 254, 278]],
[[ 566, 654, 742],
[ 830, 918, 1006]],
[[ 974, 1126, 1278],
[1430, 1582, 1734]]],
[[[1382, 1598, 1814],
[2030, 2246, 2462]],
[[1790, 2070, 2350],
[2630, 2910, 3190]],
[[2198, 2542, 2886],
[3230, 3574, 3918]]]],
dtype=dt
)
assert_equal(np.inner(a, b), desired)
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestAlen(TestCase):
def test_basic(self):
m = np.array([1, 2, 3])
self.assertEqual(np.alen(m), 3)
m = np.array([[1, 2, 3], [4, 5, 7]])
self.assertEqual(np.alen(m), 2)
m = [1, 2, 3]
self.assertEqual(np.alen(m), 3)
m = [[1, 2, 3], [4, 5, 7]]
self.assertEqual(np.alen(m), 2)
def test_singleton(self):
self.assertEqual(np.alen(5), 1)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
class TestRepeat(TestCase):
def setUp(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
def test_basic(self):
A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
def test_broadcast1(self):
A = np.repeat(self.m, 2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
def test_axis_spec(self):
A = np.repeat(self.m_rect, [2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = np.repeat(self.m_rect, [1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
def test_broadcast2(self):
A = np.repeat(self.m_rect, 2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = np.repeat(self.m_rect, 2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
def test_char_vs_string(self):
dt = np.dtype('c')
self._check('c', dt)
dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
self._check('4c4s', dt)
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
if HAS_REFCOUNT:
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
if HAS_REFCOUNT:
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert_(memoryview(c).strides == (800, 80, 8))
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert_(memoryview(fortran).strides == (8, 80, 800))
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_array_interface_itemsize():
# See gh-6361
my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
'offsets': [0, 8], 'itemsize': 16})
a = np.ones(10, dtype=my_dtype)
descr_t = np.dtype(a.__array_interface__['descr'])
typestr_t = np.dtype(a.__array_interface__['typestr'])
assert_equal(descr_t.itemsize, typestr_t.itemsize)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
# Unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1,
0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
def test_orderconverter_with_nonASCII_unicode_ordering():
# gh-7475
a = np.arange(5)
assert_raises(ValueError, a.flatten, order=u'\xe2')
if __name__ == "__main__":
run_module_suite()
| kiwifb/numpy | numpy/core/tests/test_multiarray.py | Python | bsd-3-clause | 244,600 | 0.000773 |
import IMP
import IMP.algebra
import IMP.core
import IMP.atom
import IMP.test
class Tests(IMP.test.TestCase):
"""Tests for SurfaceMover."""
def test_init(self):
"""Test creation of surface mover."""
m = IMP.Model()
surf = IMP.core.Surface.setup_particle(IMP.Particle(m))
surf.set_coordinates_are_optimized(True)
surf.set_normal_is_optimized(True)
mv = IMP.core.SurfaceMover(surf, 1, .1, 1.)
mv.set_was_used(True)
def test_propose_move(self):
"""Test proposing move alters center and normal."""
m = IMP.Model()
surf = IMP.core.Surface.setup_particle(IMP.Particle(m))
n = surf.get_normal()
c = surf.get_coordinates()
surf.set_coordinates_are_optimized(True)
surf.set_normal_is_optimized(True)
mv = IMP.core.SurfaceMover(surf, 1, .1, 1.)
mv.propose()
self.assertNotAlmostEqual((n - surf.get_normal()).get_magnitude(), 0)
self.assertNotAlmostEqual((c - surf.get_coordinates()).get_magnitude(), 0)
def test_propose_reflect(self):
"""Test reflect correctly flips normal."""
m = IMP.Model()
surf = IMP.core.Surface.setup_particle(IMP.Particle(m))
n = surf.get_normal()
surf.set_normal_is_optimized(True)
mv = IMP.core.SurfaceMover(surf, 0, 0, 1.)
mv.propose()
self.assertAlmostEqual((n + surf.get_normal()).get_magnitude(), 0)
def test_reject_restores_initial_state(self):
"""Test rejecting a move returns the surface to previous state."""
m = IMP.Model()
surf = IMP.core.Surface.setup_particle(IMP.Particle(m))
n = surf.get_normal()
c = surf.get_coordinates()
surf.set_coordinates_are_optimized(True)
surf.set_normal_is_optimized(True)
mv = IMP.core.SurfaceMover(surf, 1, .1, 1.)
mv.propose()
mv.reject()
self.assertAlmostEqual((n - surf.get_normal()).get_magnitude(), 0)
self.assertAlmostEqual((c - surf.get_coordinates()).get_magnitude(), 0)
def test_inputs(self):
"""Test only input is Surface."""
m = IMP.Model()
surf = IMP.core.Surface.setup_particle(IMP.Particle(m))
surf.set_coordinates_are_optimized(True)
surf.set_normal_is_optimized(True)
mv = IMP.core.SurfaceMover(surf, 1, .1, 1.)
self.assertSetEqual(set([surf.get_particle()]), set(mv.get_inputs()))
mv.set_was_used(True)
if __name__ == '__main__':
IMP.test.main()
| shanot/imp | modules/core/test/test_surface_mover.py | Python | gpl-3.0 | 2,528 | 0.000396 |
from collections import namedtuple
import ckan.plugins.toolkit as tk
from ckan import model
from ckan.model import Session
import json
OgdchDatasetInfo = namedtuple('OgdchDatasetInfo',
['name', 'belongs_to_harvester', 'package_id'])
def get_organization_slug_for_harvest_source(harvest_source_id):
context = get_default_context()
try:
source_dataset = \
tk.get_action('package_show')(context, {'id': harvest_source_id})
return source_dataset.get('organization').get('name')
except (KeyError, IndexError, TypeError):
raise tk.ObjectNotFound
def get_packages_to_delete(existing_dataset_infos,
gathered_ogdch_identifiers):
return [
(identifier, info)
for identifier, info
in existing_dataset_infos.items()
if info.belongs_to_harvester and identifier not in gathered_ogdch_identifiers # noqa
]
def get_double_packages(existing_dataset_infos, gathered_ogdch_identifiers): # noqa
return [
(identifier, info)
for identifier, info
in existing_dataset_infos.items()
if not info.belongs_to_harvester and identifier in gathered_ogdch_identifiers # noqa
]
def find_package_for_identifier(identifier):
context = get_default_context()
fq = "identifier:({})".format(identifier)
try:
result = tk.get_action('package_search')(context,
{'fq': fq,
'include_private': True})
if result.get('count') > 0:
pkg = result['results'][0]
return OgdchDatasetInfo(name=pkg['name'],
package_id=pkg['id'],
belongs_to_harvester=True)
else:
return None
except Exception as e:
print("Error occured while searching for packages with fq: {}, error: {}" # noqa
.format(fq, e))
def get_dataset_infos_for_organization(organization_name, harvest_source_id):
context = get_default_context()
rows = 500
page = 0
result_count = 0
fq = "organization:({})".format(organization_name)
processed_count = 0
ogdch_dataset_infos = {}
while page == 0 or processed_count < result_count:
try:
page = page + 1
start = (page - 1) * rows
result = tk.get_action('package_search')(context,
{'fq': fq,
'rows': rows,
'start': start,
'include_private': True})
if not result_count:
result_count = result['count']
datasets_in_result = result.get('results')
if datasets_in_result:
for dataset in datasets_in_result:
extras = dataset.get('extras')
dataset_harvest_source_id = \
get_value_from_dataset_extras(extras,
'harvest_source_id')
if dataset_harvest_source_id and dataset_harvest_source_id == harvest_source_id: # noqa
belongs_to_harvester = True
else:
belongs_to_harvester = False
ogdch_dataset_infos[dataset['identifier']] = \
OgdchDatasetInfo(
name=dataset['name'],
package_id=dataset['id'],
belongs_to_harvester=belongs_to_harvester)
processed_count += len(datasets_in_result)
except Exception as e:
print("Error occured while searching for packages with fq: {}, error: {}" # noqa
.format(fq, e))
break
return ogdch_dataset_infos
def get_default_context():
return {
'model': model,
'session': Session,
'ignore_auth': True
}
def get_value_from_dataset_extras(extras, key):
if extras:
extras_reduced_to_key = [item.get('value')
for item in extras
if item.get('key') == key]
if extras_reduced_to_key:
return extras_reduced_to_key[0]
return None
def get_value_from_object_extra(harvest_object_extras, key):
for extra in harvest_object_extras:
if extra.key == key:
return extra.value
return None
def map_resources_to_ids(pkg_dict, pkg_info):
existing_package = \
tk.get_action('package_show')({}, {'id': pkg_info.package_id})
existing_resources = existing_package.get('resources')
existing_resources_mapping = \
{r['id']: _get_resource_id_string(r) for r in existing_resources}
for resource in pkg_dict.get('resources'):
resource_id_dict = _get_resource_id_string(resource)
id_to_reuse = [k for k, v in existing_resources_mapping.items()
if v == resource_id_dict]
if id_to_reuse:
id_to_reuse = id_to_reuse[0]
resource['id'] = id_to_reuse
del existing_resources_mapping[id_to_reuse]
def _get_resource_id_string(resource):
resource_id_dict = {'url': resource.get('url'),
'title': resource.get('title'),
'description': resource.get('description')}
return json.dumps(resource_id_dict)
| opendata-swiss/ckanext-geocat | ckanext/geocat/utils/search_utils.py | Python | agpl-3.0 | 5,592 | 0 |
from plenum.common.constants import NODE, NYM
from plenum.common.transactions import PlenumTransactions
def testTransactionsAreEncoded():
assert NODE == "0"
assert NYM == "1"
def testTransactionEnumDecoded():
assert PlenumTransactions.NODE.name == "NODE"
assert PlenumTransactions.NYM.name == "NYM"
def testTransactionEnumEncoded():
assert PlenumTransactions.NODE.value == "0"
assert PlenumTransactions.NYM.value == "1"
| evernym/zeno | plenum/test/common/test_transactions.py | Python | apache-2.0 | 450 | 0 |
import base64
import binascii
import logging
import re
from Crypto.Cipher import AES
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
from streamlink.utils.crypto import unpad_pkcs5
from streamlink.utils.parse import parse_json
from streamlink.utils.url import update_scheme
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(\w+)\.web\.tv/?"
))
class WebTV(Plugin):
_sources_re = re.compile(r'"sources": (\[.*?\]),', re.DOTALL)
_sources_schema = validate.Schema([
{
"src": validate.any(
validate.contains("m3u8"),
validate.all(
validate.text,
validate.transform(lambda x: WebTV.decrypt_stream_url(x)),
validate.contains("m3u8")
)
),
"type": validate.text,
"label": validate.text
}
])
@staticmethod
def decrypt_stream_url(encoded_url):
data = base64.b64decode(encoded_url)
cipher_text = binascii.unhexlify(data[96:])
decryptor = AES.new(binascii.unhexlify(data[32:96]),
AES.MODE_CBC,
binascii.unhexlify(data[:32]))
return unpad_pkcs5(decryptor.decrypt(cipher_text)).decode("utf8")
def _get_streams(self):
"""
Find the streams for web.tv
:return:
"""
headers = {}
res = self.session.http.get(self.url, headers=headers)
headers["Referer"] = self.url
sources = self._sources_re.findall(res.text)
if len(sources):
sdata = parse_json(sources[0], schema=self._sources_schema)
for source in sdata:
log.debug(f"Found stream of type: {source['type']}")
if source["type"] == "application/vnd.apple.mpegurl":
url = update_scheme("https://", source["src"], force=False)
try:
# try to parse the stream as a variant playlist
variant = HLSStream.parse_variant_playlist(self.session, url, headers=headers)
if variant:
yield from variant.items()
else:
# and if that fails, try it as a plain HLS stream
yield 'live', HLSStream(self.session, url, headers=headers)
except OSError:
log.warning("Could not open the stream, perhaps the channel is offline")
__plugin__ = WebTV
| amurzeau/streamlink-debian | src/streamlink/plugins/webtv.py | Python | bsd-2-clause | 2,658 | 0.001129 |
"""
WSGI config for crawler project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crawler.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| lincolnnascimento/crawler | crawler/wsgi.py | Python | apache-2.0 | 389 | 0.002571 |
import re
from .state import State
from .expression import ConstantExpression
__all__ = ["assemble"]
_const_parser = None
def _evaluate_d(expression : str, state : State) -> int:
value = _get_register(expression) or state.GetLabelAddress(expression)
if not value:
value = _const_parser.Evaluate(expression)
return "{:0>9b}".format(value)
def _evaluate_s(expression : str, state : State) -> str:
value = _get_register(expression) or state.GetLabelAddress(expression)
if not value:
value = _const_parser.Evaluate(expression)
if value > 0x1FF:
raise AssemblerError(state.LineNumber, "s-field expression evaluated to a value greater than $1FF.")
return "{:0>9b}".format(value)
def _get_register(name : str) -> int:
return None if name not in lang.registers else lang.registers[name]
def assemble(source, binary_format="binary", hub_offset=0, syntax_version=1):
global _const_parser
state = State()
pending = []
output = []
if binary_format != "raw":
state.HubAddress = 0x10
else:
state.HubAddress = int(hub_offset)
_const_parser = ConstantExpression(state)
# PASS 1
for line in source:
state.LineNumber += 1
if "'" in line:
line = line[:line.index("'")] # remove comments
if line == "" or str.isspace(line): # ignore empty lines
continue
line = line.upper()
parts = line.split(maxsplit=1)
label = ""
directive = ""
cond = ""
opcode = ""
parameters = ""
try:
if parts[0] not in lang.reserved_words:
label = parts[0]
parts = parts[1].split(maxsplit=1) if len(parts) == 2 else []
if parts and parts[0] in lang.directives:
directive = parts[0]
parameters = parts[1] if len(parts) == 2 else ""
parts = []
if parts and parts[0] in lang.conditions:
cond = parts[0]
parts = parts[1].split(maxsplit=1) if len(parts) == 2 else []
if parts and parts[0] in lang.instructions:
opcode = parts[0]
parameters = parts[1] if len(parts) == 2 else ""
parts = []
if parts and parts[0] in lang.datatypes:
opcode = parts[0]
parameters = parts[1] if len(parts) == 2 else ""
parts = []
if label != "":
if directive in ("ORG", "FIT"):
raise AssemblerError(state.LineNumber, "Labels are not allowed for ORG or FIT.")
if not state.AddLabel(label):
raise AssemblerError(state.LineNumber, "Could not add label '{}'".format(label))
if directive != "":
if directive == "ORG":
if parameters == "":
state.ORG()
else:
state.ORG(_const_parser.Evaluate(parameteters))
elif directive == "FIT":
fit = (parameters == "") and state.FIT() or state.FIT(_const_parser.Evaluate(parameters))
if not fit:
raise AssemblerError(state.LineNumber, "It doesn't FIT!")
elif directive == "RES":
state.FixLabelAddresses()
if parameters == "":
state.RES()
else:
state.RES(_const_parser.Evaluate(parameters))
else:
raise AssemblerError(state.LineNumber, "Unrecognized directive!")
if opcode != "":
state.FixLabelAddresses()
pending.append((cond, opcode, parameters.strip(), state.LineNumber, state.CogAddress, state.HubAddress, line))
state.CogAddress += 1
state.HubAddress += 1
if directive == "" and opcode == "" and label == "":
raise AssemblerError(state.LineNumber, "unrecognized text: {}".format(line))
# print("> {0}".format(line.rstrip()))
except AssemblerError as e:
state.AddError(e)
# print("Pass 2...")
# PASS 2
for line in pending:
state.SetLineNumber(line[3])
parameters = line[2]
try:
if line[1] in lang.datatypes:
value = _const_parser.Evaluate(parameters)
if line[1] == "BYTE":
if isinstance(value, list):
temp = value[0]
count = 8
for b in (value + [0,0,0])[1:]:
if b < 0: b += 0x100
temp += (b << count)
count += 8
if count == 32: break
value = temp
elif value < 0:
value += 0x100
elif line[1] == "WORD":
if isinstance(value, list):
temp = value[0]
count = 16
for b in (value + [0])[1:]:
if b < 0: b += 0x10000
temp += (b << count)
count += 16
if count == 32: break
value = temp
elif value < 0:
value += 0x10000
else:
if isinstance(value, list):
value = value[0]
if value < 0:
value += 0x100000000
bits = "{:0>32b}".format(value)
else:
rules = lang.instructions[line[1]]
bits = rules[0]
if rules[5] and line[0]:
cond = lang.conditions[line[0]]
bits = bits[:10] + cond + bits[14:]
if parameters:
wr_nr = False
effect = re.split("[\s\t\n,]+", parameters)[-1]
while effect in lang.effects:
if effect == "WZ":
if not line[1]:
raise AssemblerError(state.LineNumber, "WZ Not allowed!")
bits = bits[:6] + "1" + bits[7:]
elif effect == "WC":
if not line[2]:
raise AssemblerError(state.LineNumber, "WC Not allowed!")
bits = bits[:7] + "1" + bits[8:]
elif effect in ("WR", "NR"):
if not line[3]:
raise AssemblerError(state.LineNumber, "WR Not allowed!")
if wr_nr:
raise AssemblerError(state.LineNumber, "Cannot use NR and WR at the same time.")
bits = bits[:8] + ("1" if effect == "WR" else "0") + bits[9:]
wr_nr = True
parameters = parameters[:-3]
effect = parameters and re.split("[\s\t\n,]+", parameters)[-1] or ""
if parameters:
if "d" in bits and "s" in bits:
(d, s) = parameters.split(",")
elif "d" in bits:
d = parameters
elif "s" in bits:
s = parameters
else:
raise AssemblerError(state.LineNumber, "Unrecognized parameters: {}".format(parameters))
if "d" in bits:
d = d.strip()
d = _evaluate_d(d, state)
d_start = bits.index("d")
d_stop = bits.rindex("d")
bits = bits[:d_start] + d + bits[d_stop+1:]
if "s" in bits:
s = s.strip()
if s[0] == "#":
if not rules[4]:
raise AssemblerError(state.LineNumber, "Source cannot have an immediate value.")
bits = bits[:9] + "1" + bits[10:]
s = s[1:]
s = _evaluate_s(s, state)
s_start = bits.index("s")
s_stop = bits.rindex("s")
bits = bits[:s_start] + s + bits[s_stop+1:]
if len(rules) == 7:
bits = rules[6](bits, line[2], state)
bits = re.sub("[^01]", "0", bits)
output.append(int(bits[24:32] + bits[16:24] + bits[8:16] + bits[0:8], 2))
# hex = format(output[-1], "0>8x").upper()
# print("[{}][{}] {}".format(bits, hex, line[5].rstrip()))
except AssemblerError as e:
state.AddError(e)
if state.Errors:
print("Errors Encountered:\n")
for error in state.Errors:
print("{: >3} : {}\n".format(error.LineNumber, error.Message))
exit()
data = bytearray()
checksum = 0
for v in output:
data += bytearray.fromhex(format(v, "0>8x"))
# Note: for "raw" format, all you get is the data. So there is no additional processing.
if binary_format in ("binary", "eeprom"):
spin_code = bytearray.fromhex("35 37 03 35 2C 00 00 00")
pbase = 0x0010
pcurr = pbase + len(data)
vbase = pcurr + len(spin_code)
dbase = vbase + 0x08
dcurr = dbase + 0x04
# Header (16 bytes)
header = bytearray(reversed(bytearray.fromhex(format(80000000, "0>8x")))) # clkfreq (4)
header += bytearray([0x6F]) # clkmode (1)
header += bytearray([0x00]) # checksum (1)
header += bytearray(reversed(bytearray.fromhex(format(pbase, "0>4x")))) # pbase (2)
header += bytearray(reversed(bytearray.fromhex(format(vbase, "0>4x")))) # vbase (2)
header += bytearray(reversed(bytearray.fromhex(format(dbase, "0>4x")))) # dbase (2)
header += bytearray(reversed(bytearray.fromhex(format(pcurr, "0>4x")))) # pcurr (2)
header += bytearray(reversed(bytearray.fromhex(format(dcurr, "0>4x")))) # dcurr (2)
data = header + data + spin_code
# the modulus operators are due to Python's lack of a signed char type.
# Same as "checksum = 0x14 - sum(data)".
checksum = (sum(data) + 0xEC) % 256
checksum = (256 - checksum) % 256
data[0x05] = checksum
if binary_format == "eeprom":
data += bytearray([0xff, 0xff, 0xf9, 0xff] * 2)
data += bytearray([0x00] * int(self.eepromSize - len(code)))
return data
| Seairth/Orochi | assembler/__init__.py | Python | gpl-3.0 | 11,327 | 0.005562 |
from ...scheme import Scheme
from ..schemeinfo import SchemeInfoDialog
from ...gui import test
class TestSchemeInfo(test.QAppTestCase):
def test_scheme_info(self):
scheme = Scheme(title="A Scheme", description="A String\n")
dialog = SchemeInfoDialog()
dialog.setScheme(scheme)
status = dialog.exec_()
if status == dialog.Accepted:
self.assertEqual(scheme.title.strip(),
str(dialog.editor.name_edit.text()).strip())
self.assertEqual(scheme.description,
str(dialog.editor.desc_edit \
.toPlainText()).strip())
| cheral/orange3 | Orange/canvas/application/tests/test_schemeinfo.py | Python | bsd-2-clause | 680 | 0.002941 |
# -*- coding: utf-8 -*-
"""Shared functions and classes for testing."""
from __future__ import unicode_literals
import os
import shutil
import tempfile
import unittest
class BaseTestCase(unittest.TestCase):
"""The base test case."""
_DATA_PATH = os.path.join(os.getcwd(), 'data')
_TEST_DATA_PATH = os.path.join(os.getcwd(), 'test_data')
# Show full diff results, part of TestCase so does not follow our naming
# conventions.
maxDiff = None
def _GetTestFilePath(self, path_segments):
"""Retrieves the path of a test file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
str: path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(self._TEST_DATA_PATH, *path_segments)
def _SkipIfPathNotExists(self, path):
"""Skips the test if the path does not exist.
Args:
path (str): path of a test file.
Raises:
SkipTest: if the path path does not exist and the test should be skipped.
"""
if not os.path.exists(path):
filename = os.path.basename(path)
raise unittest.SkipTest('missing test file: {0:s}'.format(filename))
class TempDirectory(object):
"""Class that implements a temporary directory."""
def __init__(self):
"""Initializes a temporary directory."""
super(TempDirectory, self).__init__()
self.name = ''
def __enter__(self):
"""Make this work with the 'with' statement."""
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make this work with the 'with' statement."""
shutil.rmtree(self.name, True)
| Onager/artifacts | tests/test_lib.py | Python | apache-2.0 | 1,770 | 0.00791 |
from __future__ import print_function
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
import sys
import os
import inspect
from . import copydir
from . import command
from paste.util.template import paste_script_template_renderer
import six
class Template(object):
# Subclasses must define:
# _template_dir (or template_dir())
# summary
# Variables this template uses (mostly for documentation now)
# a list of instances of var()
vars = []
# Eggs that should be added as plugins:
egg_plugins = []
# Templates that must be applied first:
required_templates = []
# Use Cheetah for substituting templates:
use_cheetah = False
# If true, then read all the templates to find the variables:
read_vars_from_templates = False
# You can also give this function/method to use something other
# than Cheetah or string.Template. The function should be of the
# signature template_renderer(content, vars, filename=filename).
# Careful you don't turn this into a method by putting a function
# here (without staticmethod)!
template_renderer = None
def __init__(self, name):
self.name = name
self._read_vars = None
def module_dir(self):
"""Returns the module directory of this template."""
mod = sys.modules[self.__class__.__module__]
return os.path.dirname(mod.__file__)
def template_dir(self):
assert self._template_dir is not None, (
"Template %r didn't set _template_dir" % self)
if isinstance( self._template_dir, tuple):
return self._template_dir
else:
return os.path.join(self.module_dir(), self._template_dir)
def run(self, command, output_dir, vars):
self.pre(command, output_dir, vars)
self.write_files(command, output_dir, vars)
self.post(command, output_dir, vars)
def check_vars(self, vars, cmd):
expect_vars = self.read_vars(cmd)
if not expect_vars:
# Assume that variables aren't defined
return vars
converted_vars = {}
unused_vars = vars.copy()
errors = []
for var in expect_vars:
if var.name not in unused_vars:
if cmd.interactive:
prompt = 'Enter %s' % var.full_description()
response = cmd.challenge(prompt, var.default, var.should_echo)
converted_vars[var.name] = response
elif var.default is command.NoDefault:
errors.append('Required variable missing: %s'
% var.full_description())
else:
converted_vars[var.name] = var.default
else:
converted_vars[var.name] = unused_vars.pop(var.name)
if errors:
raise command.BadCommand(
'Errors in variables:\n%s' % '\n'.join(errors))
converted_vars.update(unused_vars)
vars.update(converted_vars)
return converted_vars
def read_vars(self, command=None):
if self._read_vars is not None:
return self._read_vars
assert (not self.read_vars_from_templates
or self.use_cheetah), (
"You can only read variables from templates if using Cheetah")
if not self.read_vars_from_templates:
self._read_vars = self.vars
return self.vars
vars = self.vars[:]
var_names = [var.name for var in self.vars]
read_vars = find_args_in_dir(
self.template_dir(),
verbose=command and command.verbose > 1).items()
read_vars.sort()
for var_name, var in read_vars:
if var_name not in var_names:
vars.append(var)
self._read_vars = vars
return vars
def write_files(self, command, output_dir, vars):
template_dir = self.template_dir()
if not os.path.exists(output_dir):
print("Creating directory %s" % output_dir)
if not command.simulate:
# Don't let copydir create this top-level directory,
# since copydir will svn add it sometimes:
os.makedirs(output_dir)
copydir.copy_dir(template_dir, output_dir,
vars,
verbosity=command.verbose,
simulate=command.options.simulate,
interactive=command.interactive,
overwrite=command.options.overwrite,
indent=1,
use_cheetah=self.use_cheetah,
template_renderer=self.template_renderer)
def print_vars(self, indent=0):
vars = self.read_vars()
var.print_vars(vars)
def pre(self, command, output_dir, vars):
"""
Called before template is applied.
"""
pass
def post(self, command, output_dir, vars):
"""
Called after template is applied.
"""
pass
NoDefault = command.NoDefault
class var(object):
def __init__(self, name, description,
default='', should_echo=True):
self.name = name
self.description = description
self.default = default
self.should_echo = should_echo
def __repr__(self):
return '<%s %s default=%r should_echo=%s>' % (
self.__class__.__name__,
self.name, self.default, self.should_echo)
def full_description(self):
if self.description:
return '%s (%s)' % (self.name, self.description)
else:
return self.name
def print_vars(cls, vars, indent=0):
max_name = max([len(v.name) for v in vars])
for var in vars:
if var.description:
print('%s%s%s %s' % (
' '*indent,
var.name,
' '*(max_name-len(var.name)),
var.description))
else:
print(' %s' % var.name)
if var.default is not command.NoDefault:
print(' default: %r' % var.default)
if var.should_echo is True:
print(' should_echo: %s' % var.should_echo)
print()
print_vars = classmethod(print_vars)
class BasicPackage(Template):
_template_dir = 'paster-templates/basic_package'
summary = "A basic setuptools-enabled package"
vars = [
var('version', 'Version (like 0.1)'),
var('description', 'One-line description of the package'),
var('long_description', 'Multi-line description (in reST)'),
var('keywords', 'Space-separated keywords/tags'),
var('author', 'Author name'),
var('author_email', 'Author email'),
var('url', 'URL of homepage'),
var('license_name', 'License name'),
var('zip_safe', 'True/False: if the package can be distributed as a .zip file', default=False),
]
template_renderer = staticmethod(paste_script_template_renderer)
_skip_variables = ['VFN', 'currentTime', 'self', 'VFFSL', 'dummyTrans',
'getmtime', 'trans']
def find_args_in_template(template):
if isinstance(template, six.string_types):
# Treat as filename:
import Cheetah.Template
template = Cheetah.Template.Template(file=template)
if not hasattr(template, 'body'):
# Don't know...
return None
method = template.body
args, varargs, varkw, defaults = inspect.getargspec(method)
defaults=list(defaults or [])
vars = []
while args:
if len(args) == len(defaults):
default = defaults.pop(0)
else:
default = command.NoDefault
arg = args.pop(0)
if arg in _skip_variables:
continue
# @@: No way to get description yet
vars.append(
var(arg, description=None,
default=default))
return vars
def find_args_in_dir(dir, verbose=False):
all_vars = {}
for fn in os.listdir(dir):
if fn.startswith('.') or fn == 'CVS' or fn == '_darcs':
continue
full = os.path.join(dir, fn)
if os.path.isdir(full):
inner_vars = find_args_in_dir(full)
elif full.endswith('_tmpl'):
inner_vars = {}
found = find_args_in_template(full)
if found is None:
# Couldn't read variables
if verbose:
print('Template %s has no parseable variables' % full)
continue
for var in found:
inner_vars[var.name] = var
else:
# Not a template, don't read it
continue
if verbose:
print('Found variable(s) %s in Template %s' % (
', '.join(inner_vars.keys()), full))
for var_name, var in inner_vars.items():
# Easy case:
if var_name not in all_vars:
all_vars[var_name] = var
continue
# Emit warnings if the variables don't match well:
cur_var = all_vars[var_name]
if not cur_var.description:
cur_var.description = var.description
elif (cur_var.description and var.description
and var.description != cur_var.description):
print((
"Variable descriptions do not match: %s: %s and %s"
% (var_name, cur_var.description, var.description)), file=sys.stderr)
if (cur_var.default is not command.NoDefault
and var.default is not command.NoDefault
and cur_var.default != var.default):
print((
"Variable defaults do not match: %s: %r and %r"
% (var_name, cur_var.default, var.default)), file=sys.stderr)
return all_vars
| stefanv/aandete | app/lib/paste/script/templates.py | Python | bsd-3-clause | 10,088 | 0.001685 |
# -*- coding=utf-8 -*-
import requests
import os
import json
import sys
import time
reload(sys)
sys.setdefaultencoding('utf8')
download_base_url = 'http://www.jikexueyuan.com/course/video_download'
cookie_map = 'gr_user_id=eb91fa90-1980-4500-a114-6fea026da447; _uab_collina=148758210602708013401536; connect.sid=s%3AsRUeQ8XeWpWREBnEnaAWt31xIIpwZHj0.otjTDyQcivT1X65RfMF%2B2mpSfgjAoC3%2BBog9Z8C9NCo; _gat=1; _umdata=2FB0BDB3C12E491D192D688906F3F911DBC9CBDAAC2337399CD12353C2D45B7A1BAFC8FE8A49D872CD43AD3E795C914C7A1B39D73E9DDB85B7E1FAADEEA5709A; uname=king_aric; uid=3034284; code=MNla69; authcode=29e9YmwFSjDxwSHA4AZN%2B3s%2B7%2BcEm6ZdlmeMMoEKxP5an1nNvLlH96ke%2FL34Br0NXXoQ%2FcPNkhbXSOUOF2ZM5RPSw%2F0sjlravys3aCucZ1C12Fn2UxWA8V8J%2FPSV; avatar=https%3A%2F%2Fassets.jikexueyuan.com%2Fuser%2Favtar%2Fdefault.gif; ca_status=0; vip_status=1; level_id=1; is_expire=0; domain=0JjajqVPq; _ga=GA1.2.1312183079.1487582095; gr_session_id_aacd01fff9535e79=39cf51ef-683e-4338-b251-e487baed02bc; gr_cs1_39cf51ef-683e-4338-b251-e487baed02bc=uid%3A3034284; QINGCLOUDELB=84b10773c6746376c2c7ad1fac354ddfd562b81daa2a899c46d3a1e304c7eb2b|WK6ZY|WK6YR Host:www.jikexueyuan.com'
def download_execute(root_dir, result_list, sort=False):
"""
result_list [{'href':'','title':'','course_id':''},]
"""
number = 0
for doc in result_list:
number += 1
if sort:
doc_path = u'%s/%d.%s' % (root_dir, number, doc.get('title'))
else:
doc_path = u'%s/%s' % (root_dir, doc.get('title'))
print doc_path
create_dir(doc_path)
seq, keep_running = 0, True
while keep_running:
seq += 1
download_url = '%s?seq=%d&course_id=%d' % (download_base_url, seq, doc.get('course_id'))
for i in range(10): # retry 10 times
result_code = request_data(download_url, doc_path, seq)
if result_code == 404:
keep_running = False
break
elif result_code == 500:
print u'重试%d : %s' % (i + 1, download_url)
continue
break
def request_data(download_url, doc_path, seq):
"""
:return 200,404,500
"""
try:
if not os.path.exists(doc_path):return 404
response = requests.get(url=download_url, headers={'Cookie': cookie_map})
if response.status_code == 200:
download_data = response.content
download_data = json.loads(download_data)
# print download_data, download_data.get('data').get('title')
if download_data.get('code') != 200:
if download_data.get('code') == 10101:
return 404
print u'request error: %s' % download_data.get('msg').decode('utf-8')
return 500
file_path = u'%s/%d.%s.mp4' % (
doc_path, seq, download_data.get('data').get('title'))
if os.path.exists(file_path):
print u'%s 已经存在' % file_path
return 200
begin_time = time.time()
r = requests.get(download_data.get('data').get('urls'))
with open(file_path, "wb") as code:
try:
code.write(r.content)
print u'下载:%s %d秒' % (file_path, int(time.time() - begin_time))
return 200
except Exception:
print u'下载:%s 失败' % file_path
return 500
else:
print u'%s 请求失败,状态%d' % (download_url, response.status_code)
return 500
except Exception, e:
print u'%s 请求失败,\n异常信息:%s' % (download_url, e)
return 500
def create_dir(path):
if not os.path.exists(path):
try:
os.makedirs(path)
except Exception, e:
print u'文件夹%s 创建失败;\n %s' % (path, e)
else:
print u'文件夹%s 已经存在' % path
def parent_dir(path):
if path[-1] == '/': path = path[0:-1]
return '/'.join(path.split('/')[0:-1])
def del_dir(path):
assert os.path.exists(path) and os.path.isdir(path)
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(path)
def create_file(name, mode='r', data=""):
try:
parent_path = parent_dir(name)
if parent_path and not os.path.exists(parent_path): create_dir(parent_path)
with open(name, mode)as f:
f.write(data)
except Exception, e:
print u'%s 创建失败\n异常:%s' % (name, e)
| amlyj/pythonStudy | 2.7/crawlers/jkxy/jk_utils.py | Python | mit | 4,771 | 0.002144 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61968.Common.Document import Document
class Request(Document):
"""A request for work, service or project.A request for work, service or project.
"""
def __init__(self, actionNeeded='', priority='', corporateCode='', ErpQuoteLineItem=None, Projects=None, Organisation=None, Works=None, *args, **kw_args):
"""Initialises a new 'Request' instance.
@param actionNeeded: Based on the current 'Status.status', the action that is needed before this Request can transition to the desired state, such as initiating the requested Work. For example, missing or additionally needed information may be required from the requesting organisation before a work Design may be created.
@param priority: The priority of this request.
@param corporateCode: The corporate code for this request.
@param ErpQuoteLineItem:
@param Projects:
@param Organisation:
@param Works:
"""
#: Based on the current 'Status.status', the action that is needed before this Request can transition to the desired state, such as initiating the requested Work. For example, missing or additionally needed information may be required from the requesting organisation before a work Design may be created.
self.actionNeeded = actionNeeded
#: The priority of this request.
self.priority = priority
#: The corporate code for this request.
self.corporateCode = corporateCode
self._ErpQuoteLineItem = None
self.ErpQuoteLineItem = ErpQuoteLineItem
self._Projects = []
self.Projects = [] if Projects is None else Projects
self._Organisation = None
self.Organisation = Organisation
self._Works = []
self.Works = [] if Works is None else Works
super(Request, self).__init__(*args, **kw_args)
_attrs = ["actionNeeded", "priority", "corporateCode"]
_attr_types = {"actionNeeded": str, "priority": str, "corporateCode": str}
_defaults = {"actionNeeded": '', "priority": '', "corporateCode": ''}
_enums = {}
_refs = ["ErpQuoteLineItem", "Projects", "Organisation", "Works"]
_many_refs = ["Projects", "Works"]
def getErpQuoteLineItem(self):
return self._ErpQuoteLineItem
def setErpQuoteLineItem(self, value):
if self._ErpQuoteLineItem is not None:
self._ErpQuoteLineItem._Request = None
self._ErpQuoteLineItem = value
if self._ErpQuoteLineItem is not None:
self._ErpQuoteLineItem.Request = None
self._ErpQuoteLineItem._Request = self
ErpQuoteLineItem = property(getErpQuoteLineItem, setErpQuoteLineItem)
def getProjects(self):
return self._Projects
def setProjects(self, value):
for p in self._Projects:
filtered = [q for q in p.Requests if q != self]
self._Projects._Requests = filtered
for r in value:
if self not in r._Requests:
r._Requests.append(self)
self._Projects = value
Projects = property(getProjects, setProjects)
def addProjects(self, *Projects):
for obj in Projects:
if self not in obj._Requests:
obj._Requests.append(self)
self._Projects.append(obj)
def removeProjects(self, *Projects):
for obj in Projects:
if self in obj._Requests:
obj._Requests.remove(self)
self._Projects.remove(obj)
def getOrganisation(self):
return self._Organisation
def setOrganisation(self, value):
if self._Organisation is not None:
filtered = [x for x in self.Organisation.Requests if x != self]
self._Organisation._Requests = filtered
self._Organisation = value
if self._Organisation is not None:
if self not in self._Organisation._Requests:
self._Organisation._Requests.append(self)
Organisation = property(getOrganisation, setOrganisation)
def getWorks(self):
return self._Works
def setWorks(self, value):
for x in self._Works:
x.Request = None
for y in value:
y._Request = self
self._Works = value
Works = property(getWorks, setWorks)
def addWorks(self, *Works):
for obj in Works:
obj.Request = self
def removeWorks(self, *Works):
for obj in Works:
obj.Request = None
| rwl/PyCIM | CIM15/IEC61970/Informative/InfWork/Request.py | Python | mit | 5,609 | 0.002318 |
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common flags."""
from typing import Any, Dict, List
from absl import flags
from uncertainty_baselines.datasets import datasets
from uncertainty_baselines.models import models
FLAGS = flags.FLAGS
def serialize_flags(flag_list: Dict[str, Any]) -> str:
string = ''
for flag_name, flag_value in flag_list.items():
string += '--{}={}\n'.format(flag_name, flag_value)
# Remove the final trailing newline.
return string[:-1]
def define_flags() -> List[str]:
"""Define common flags."""
predefined_flags = set(FLAGS)
flags.DEFINE_string('experiment_name', None, 'Name of this experiment.')
# TPU Job flags.
flags.DEFINE_string('tpu', None, 'Name of the TPU to use.')
flags.DEFINE_enum(
'mode',
'train_and_eval',
['train', 'eval', 'train_and_eval'],
'Whether to execute train and/or eval.')
flags.DEFINE_integer(
'num_cores', None, 'How many TPU cores or GPUs the job is running on.')
flags.DEFINE_bool('run_ood', False, 'Whether to run OOD jobs with eval job.')
flags.DEFINE_bool('use_cpu', False, 'Whether to run on CPU.')
flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or TPU.')
# Train/eval loop flags.
flags.DEFINE_integer(
'checkpoint_step', -1, 'Step of the checkpoint to restore from.')
flags.DEFINE_enum(
'dataset_name',
None,
datasets.get_dataset_names(),
'Name of the dataset to use.')
flags.DEFINE_enum(
'ood_dataset_name',
None,
datasets.get_dataset_names(),
'Name of the OOD dataset to use for evaluation.')
flags.DEFINE_integer(
'eval_frequency',
None,
'How many steps between evaluating on the (validation and) test set.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE.')
flags.DEFINE_string('output_dir', None, 'Base output directory.')
flags.DEFINE_enum(
'model_name',
None,
models.get_model_names(),
'Name of the model to use.')
flags.DEFINE_integer(
'log_frequency',
100,
'How many steps between logging the metrics.')
flags.DEFINE_integer('train_steps', None, 'How many steps to train for.')
# Hyperparamater flags.
flags.DEFINE_integer('batch_size', None, 'Training batch size.')
flags.DEFINE_integer('eval_batch_size', None, 'Validation/test batch size.')
flags.DEFINE_float('learning_rate', None, 'Learning rate.')
flags.DEFINE_string(
'learning_rate_schedule',
'constant',
'Learning rate schedule to use.')
flags.DEFINE_integer('schedule_hparams_warmup_epochs', 1,
'Number of epochs for a linear warmup to the initial '
'learning rate. Use 0 to do no warmup.')
flags.DEFINE_float('schedule_hparams_decay_ratio', 0.2,
'Amount to decay learning rate.')
flags.DEFINE_list('schedule_hparams_decay_epochs', ['60', '120', '160'],
'Epochs to decay learning rate by.')
flags.DEFINE_string('optimizer', 'adam', 'Optimizer to use.')
flags.DEFINE_float('optimizer_hparams_momentum', 0.9, 'SGD momentum.')
flags.DEFINE_float('optimizer_hparams_beta_1', 0.9, 'Adam beta_1.')
flags.DEFINE_float('optimizer_hparams_beta_2', 0.999, 'Adam beta_2.')
flags.DEFINE_float('optimizer_hparams_epsilon', 1e-7, 'Adam epsilon.')
flags.DEFINE_float('weight_decay', 0.0, 'Weight decay for optimizer.')
flags.DEFINE_float('l2_regularization', 1e-4, 'L2 regularization for models.')
flags.DEFINE_float(
'focal_loss_gamma', 0.0, 'The gamma parameter in the focal loss. '
'If gamma=0.0, the focal loss is equivalent to cross entropy loss.')
flags.DEFINE_integer('seed', 42, 'Random seed.')
flags.DEFINE_float(
'validation_percent',
0.0,
'Percent of training data to hold out and use as a validation set.')
flags.DEFINE_integer(
'shuffle_buffer_size', 16384, 'Dataset shuffle buffer size.')
# Model flags, Wide Resnet
flags.DEFINE_integer('wide_resnet_depth', 28,
'Depth of wide resnet model.')
flags.DEFINE_integer('wide_resnet_width_multiplier', 10,
'Width multiplier for wide resnet model.')
flags.DEFINE_integer('num_classes', 10, 'Number of label classes.')
# Flags relating to genomics_cnn model
flags.DEFINE_integer('len_seqs', 250,
'Sequence length, only used for genomics dataset.')
flags.DEFINE_integer('num_motifs', 1024,
'Number of motifs, only used for the genomics dataset.')
flags.DEFINE_integer('len_motifs', 20,
'Length of motifs, only used for the genomics dataset.')
flags.DEFINE_integer('num_denses', 128,
'Number of denses, only used for the genomics dataset.')
# Flags relating to SNGP model
flags.DEFINE_float('dropout_rate', 0.1, 'Dropout rate for dropout layers.')
flags.DEFINE_bool(
'before_conv_dropout', False,
'Whether to use filter wise dropout before convolutionary layers. ')
flags.DEFINE_bool(
'use_mc_dropout', False,
'Whether to use Monte Carlo dropout for the hidden layers.')
flags.DEFINE_bool('use_spec_norm', False,
'Whether to apply spectral normalization.')
flags.DEFINE_bool('use_gp_layer', False,
'Whether to use Gaussian process as the output layer.')
# Model flags, Spectral Normalization.
flags.DEFINE_integer(
'spec_norm_iteration', 1,
'Number of power iterations to perform for estimating '
'the spectral norm of weight matrices.')
flags.DEFINE_float('spec_norm_bound', 6.,
'Upper bound to spectral norm of weight matrices.')
# Model flags, Gaussian Process layer.
flags.DEFINE_float('gp_bias', 0., 'The bias term for GP layer.')
flags.DEFINE_float(
'gp_scale', 1.,
'The length-scale parameter for the RBF kernel of the GP layer.')
flags.DEFINE_integer(
'gp_input_dim', 128,
'The dimension to reduce the neural network input to for the GP layer '
'(via random Gaussian projection which preserves distance by the '
' Johnson-Lindenstrauss lemma). If -1 the no dimension reduction.')
flags.DEFINE_integer(
'gp_hidden_dim', 1024,
'The hidden dimension of the GP layer, which corresponds to the number '
'of random features used to for the approximation ')
flags.DEFINE_bool(
'gp_input_normalization', False,
'Whether to normalize the input using LayerNorm for GP layer.'
'This is similar to automatic relevance determination (ARD) in the '
'classic GP learning.')
flags.DEFINE_float(
'gp_cov_ridge_penalty', 1.0,
'The Ridge penalty parameter for GP posterior covariance.')
flags.DEFINE_float(
'gp_cov_discount_factor', 0.999,
'The discount factor to compute the moving average of '
'precision matrix.')
flags.DEFINE_float(
'gp_mean_field_factor', -1,
'The tunable multiplicative factor used in the mean-field approximation '
'for the posterior mean of softmax Gaussian process. If -1 then use '
'posterior mode instead of posterior mean. See [2] for detail.')
flags.mark_flag_as_required('dataset_name')
flags.mark_flag_as_required('experiment_name')
flags.mark_flag_as_required('model_name')
# Flags relating to OOD metrics
flags.DEFINE_list(
'sensitivity_thresholds', ['0.05', '0.95', '10'],
'List of sensitivities at which to calculate specificity.'
' The list should contains '
'[lower bound, upper bound, num_elements]')
flags.DEFINE_list(
'specificity_thresholds', ['0.05', '0.95', '10'],
'List of specificities at which to calculate sensitivity.'
' The list should contains '
'[lower bound, upper bound, num_elements]')
flags.DEFINE_list(
'precision_thresholds', ['0.05', '0.95', '10'],
'List of precisions at which to calculate recall.'
' The list should contains '
'[lower bound, upper bound, num_elements]')
flags.DEFINE_list(
'recall_thresholds', ['0.05', '0.95', '10'],
'List of recalls at which to calculate precision.'
' The list should contains '
'[lower bound, upper bound, num_elements]')
all_flags = set(FLAGS)
program_flag_names = sorted(list(all_flags - predefined_flags))
return program_flag_names
| google/uncertainty-baselines | experimental/single_model_uncertainty/flags.py | Python | apache-2.0 | 8,929 | 0.009184 |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 30 14:32:42 2015
@author: noore
"""
import numpy as np
from scipy.misc import comb # comb(N,k) = The number of combinations of N things taken k at a time
THETA = 0.011 # the natural abundance of 13C among the two isotopes (13C and 12C).
def compute_fractions(counts):
"""
Calculates the isotope fractions of a compound, given the list of
counts (assuming it starts from M+0).
Usage:
counts - a list of positive values representing the counts of each
isotope starting from M+0
Returns:
fractions - a list of values between 0..1 that represent the fraction
of each isotope from the total pool, after correcting for the
natural abundance of 13C
"""
N = len(counts)-1
F = np.matrix(np.zeros((N+1, N+1)))
for i in range(N+1):
for j in range(i+1):
F[i,j] = comb(N-j, i-j) * THETA**(i-j) * (1-THETA)**(N-j)
X = np.matrix(counts, dtype=float).T
corrected_counds = list((F.I * X).flat)
return corrected_counds
if __name__ == '__main__':
counts = [900, 100, 5, 900, 5000]
Y = compute_fractions(counts)
print("The corrected isotope relative abundances are:")
print('-'*50)
print( ' | '.join(map(lambda d: ' M + %d ' % d, range(len(counts)))))
print(' | '.join(map(lambda s: '%4.1e' % (s*100), Y)))
print('-'*50)
print(compute_fractions([1]*7)) | eladnoor/ms-tools | james/isotope_util.py | Python | mit | 1,554 | 0.01287 |
"""
Utility module to manipulate directories.
"""
import os
import types
import shutil
__author__ = "Jenson Jose"
__email__ = "jensonjose@live.in"
__status__ = "Alpha"
class DirUtils:
"""
Utility class containing methods to manipulate directories.
"""
def __init__(self):
pass
@staticmethod
def create_dir(dir_path):
"""
Creates a directory at the specified path.
:param dir_path: The full path of the directory to be created.
:return: True, if directory was created, False otherwise.
:rtype: bool
"""
try:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return True
return False
except Exception as ex:
return False
@staticmethod
def create_dir_path_string(path_components, separator="/"):
"""
Combines list of supplied path components to create a full directory path.
:param path_components: List of components to be part of the final directory path.
:param separator: Separator to be used for isolating directory path components.
:return: The full directory path string, if path_components is a valid list, False otherwise.
:rtype: str
"""
if isinstance(path_components, types.ListType):
if len(path_components) > 0:
path_string = ""
for component in path_components:
path_string += component + separator
path_string = path_string[:-1]
path_string = os.path.normpath(path_string)
return path_string
return False
@staticmethod
def fix_path(path1, path2):
"""
Combines 2 given paths to form OS compliant path with correct path separators.
Example:
1st path (Linux): /root/some_dir; 2nd path (Windows): \test\data
After combining the above paths,
On Windows: \root\some_dir\test\data
On Linux: /root/some_dir/test/data
:param path1: The first path to be combined.
:param path2: The second path to be combined.
:return: The final combined path.
:rtype: str
"""
return os.path.normpath(path1 + path2)
@staticmethod
def check_valid_dir(dir_path):
"""
Verifies if given directory path exists and is a valid directory.
:param dir_path: The full path of the directory to be verified.
:return: True if path contains a valid directory, False otherwise.
:rtype: bool
"""
if os.path.exists(dir_path):
if os.path.isdir(dir_path):
return True
return False
@staticmethod
def create_archive(output_file_name, source_path, archive_format="zip"):
"""
Creates a compressed archive of the specified directory.
:param output_file_name: Name of the output archive file.
:param source_path: The full path of the source to be archived.
:param archive_format: The format to be used for archiving, and can be either ZIP, TAR, BZTAR or GZTAR.
:return: True if archiving was successful, False otherwise.
:rtype: bool
"""
if shutil.make_archive(output_file_name, archive_format.lower(), source_path):
return True
return False
@staticmethod
def get_dir_contents(source_dir, filter_pattern=None, meta_data=False):
"""
Returns a list of directory contents matching the supplied search pattern.
If no pattern is supplied all directory contents are returned.
:param source_dir: The path of the directory to be searched.
:param filter_pattern: The pattern to be used to search the directory.
:param meta_data: If True, returns a list of dictionaries containing meta data of each individual entry.
:return: List of matching entries if the directory is valid, False otherwise.
:rtype: list
"""
from utilbox.os_utils import FileUtils
filtered_entry_list = []
if DirUtils.check_valid_dir(source_dir):
dir_entries = os.listdir(source_dir)
for dir_entry in dir_entries:
if filter_pattern is not None:
import re
compiled_pattern = re.compile(filter_pattern)
if len(compiled_pattern.findall(dir_entry)) > 0:
if meta_data:
dir_entry_path = DirUtils.create_dir_path_string([source_dir,
dir_entry])
if DirUtils.check_valid_dir(dir_entry_path):
meta_data = DirUtils.get_dir_metadata(dir_entry_path)
elif FileUtils.check_valid_file(dir_entry_path):
meta_data = FileUtils.get_file_metadata(dir_entry_path)
if meta_data:
filtered_entry_list.append(meta_data)
else:
filtered_entry_list.append(dir_entry)
else:
if meta_data:
dir_entry_path = DirUtils.create_dir_path_string([source_dir,
dir_entry])
if DirUtils.check_valid_dir(dir_entry_path):
meta_data = DirUtils.get_dir_metadata(dir_entry_path)
elif FileUtils.check_valid_file(dir_entry_path):
meta_data = FileUtils.get_file_metadata(dir_entry_path)
if meta_data:
filtered_entry_list.append(meta_data)
else:
filtered_entry_list.append(dir_entry)
return filtered_entry_list
return False
@staticmethod
def get_dir_metadata(dir_path, size_unit="k", time_format="%Y-%m-%d %I:%M:%S"):
"""
Returns directory meta-data containing,
- Last modified time
- Directory size (sum of all file sizes)
- Directory name
- Directory parent directory
- Directory full path
:param dir_path: The full path of the directory to be analyzed.
:param size_unit: Units in which to report directory size.
:param time_format: Format in which to report directory modification time.
:return: Dictionary containing relevant directory meta data.
:rtype: dict
"""
if DirUtils.check_valid_dir(dir_path):
import datetime
last_modified_time = datetime.datetime.fromtimestamp(os.path.getmtime(dir_path)).strftime(time_format)
# get file size in bytes
file_size = os.path.getsize(dir_path)
base_unit = 1024.0
decimal_limit = 2
if size_unit == "b":
pass
elif size_unit == "k":
file_size /= base_unit
elif size_unit == "m":
file_size = (file_size / base_unit) / base_unit
elif size_unit == "g":
file_size = ((file_size / base_unit) / base_unit) / base_unit
# limit floating-point value to X decimal points
if size_unit != "b":
file_size = round(file_size, decimal_limit)
return {"LAST_MODIFIED": str(last_modified_time),
"SIZE": str(file_size),
"NAME": str(os.path.basename(dir_path)),
"PARENT_DIRECTORY": str(os.path.dirname(dir_path)),
"FULL_PATH": str(dir_path)}
return False
| jensonjose/utilbox | utilbox/os_utils/dir_utils.py | Python | mit | 7,866 | 0.003051 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import sys
from collections import deque
from datetime import datetime
from typing import Dict, Generator, Optional
from botocore.waiter import Waiter
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator, XCom
from airflow.providers.amazon.aws.exceptions import ECSOperatorError
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.hooks.logs import AwsLogsHook
from airflow.typing_compat import Protocol, runtime_checkable
from airflow.utils.session import provide_session
def should_retry(exception: Exception):
"""Check if exception is related to ECS resource quota (CPU, MEM)."""
if isinstance(exception, ECSOperatorError):
return any(
quota_reason in failure['reason']
for quota_reason in ['RESOURCE:MEMORY', 'RESOURCE:CPU']
for failure in exception.failures
)
return False
@runtime_checkable
class ECSProtocol(Protocol):
"""
A structured Protocol for ``boto3.client('ecs')``. This is used for type hints on
:py:meth:`.ECSOperator.client`.
.. seealso::
- https://mypy.readthedocs.io/en/latest/protocols.html
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html
"""
def run_task(self, **kwargs) -> Dict:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task""" # noqa: E501
...
def get_waiter(self, x: str) -> Waiter:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.get_waiter""" # noqa: E501
...
def describe_tasks(self, cluster: str, tasks) -> Dict:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.describe_tasks""" # noqa: E501
...
def stop_task(self, cluster, task, reason: str) -> Dict:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.stop_task""" # noqa: E501
...
def describe_task_definition(self, taskDefinition: str) -> Dict:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.describe_task_definition""" # noqa: E501
...
def list_tasks(self, cluster: str, launchType: str, desiredStatus: str, family: str) -> Dict:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.list_tasks""" # noqa: E501
...
class ECSOperator(BaseOperator):
"""
Execute a task on AWS ECS (Elastic Container Service)
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ECSOperator`
:param task_definition: the task definition name on Elastic Container Service
:type task_definition: str
:param cluster: the cluster name on Elastic Container Service
:type cluster: str
:param overrides: the same parameter that boto3 will receive (templated):
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task
:type overrides: dict
:param aws_conn_id: connection id of AWS credentials / region name. If None,
credential boto3 strategy will be used
(http://boto3.readthedocs.io/en/latest/guide/configuration.html).
:type aws_conn_id: str
:param region_name: region name to use in AWS Hook.
Override the region_name in connection (if provided)
:type region_name: str
:param launch_type: the launch type on which to run your task ('EC2' or 'FARGATE')
:type launch_type: str
:param capacity_provider_strategy: the capacity provider strategy to use for the task.
When capacity_provider_strategy is specified, the launch_type parameter is omitted.
If no capacity_provider_strategy or launch_type is specified,
the default capacity provider strategy for the cluster is used.
:type capacity_provider_strategy: list
:param group: the name of the task group associated with the task
:type group: str
:param placement_constraints: an array of placement constraint objects to use for
the task
:type placement_constraints: list
:param placement_strategy: an array of placement strategy objects to use for
the task
:type placement_strategy: list
:param platform_version: the platform version on which your task is running
:type platform_version: str
:param network_configuration: the network configuration for the task
:type network_configuration: dict
:param tags: a dictionary of tags in the form of {'tagKey': 'tagValue'}.
:type tags: dict
:param awslogs_group: the CloudWatch group where your ECS container logs are stored.
Only required if you want logs to be shown in the Airflow UI after your job has
finished.
:type awslogs_group: str
:param awslogs_region: the region in which your CloudWatch logs are stored.
If None, this is the same as the `region_name` parameter. If that is also None,
this is the default AWS region based on your connection settings.
:type awslogs_region: str
:param awslogs_stream_prefix: the stream prefix that is used for the CloudWatch logs.
This is usually based on some custom name combined with the name of the container.
Only required if you want logs to be shown in the Airflow UI after your job has
finished.
:type awslogs_stream_prefix: str
:param reattach: If set to True, will check if the task previously launched by the task_instance
is already running. If so, the operator will attach to it instead of starting a new task.
This is to avoid relaunching a new task when the connection drops between Airflow and ECS while
the task is running (when the Airflow worker is restarted for example).
:type reattach: bool
:param quota_retry: Config if and how to retry _start_task() for transient errors.
:type quota_retry: dict
"""
ui_color = '#f0ede4'
template_fields = ('overrides',)
template_fields_renderers = {
"overrides": "json",
"network_configuration": "json",
"tags": "json",
"quota_retry": "json",
}
REATTACH_XCOM_KEY = "ecs_task_arn"
REATTACH_XCOM_TASK_ID_TEMPLATE = "{task_id}_task_arn"
def __init__(
self,
*,
task_definition: str,
cluster: str,
overrides: dict,
aws_conn_id: Optional[str] = None,
region_name: Optional[str] = None,
launch_type: str = 'EC2',
capacity_provider_strategy: Optional[list] = None,
group: Optional[str] = None,
placement_constraints: Optional[list] = None,
placement_strategy: Optional[list] = None,
platform_version: Optional[str] = None,
network_configuration: Optional[dict] = None,
tags: Optional[dict] = None,
awslogs_group: Optional[str] = None,
awslogs_region: Optional[str] = None,
awslogs_stream_prefix: Optional[str] = None,
propagate_tags: Optional[str] = None,
quota_retry: Optional[dict] = None,
reattach: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.task_definition = task_definition
self.cluster = cluster
self.overrides = overrides
self.launch_type = launch_type
self.capacity_provider_strategy = capacity_provider_strategy
self.group = group
self.placement_constraints = placement_constraints
self.placement_strategy = placement_strategy
self.platform_version = platform_version
self.network_configuration = network_configuration
self.tags = tags
self.awslogs_group = awslogs_group
self.awslogs_stream_prefix = awslogs_stream_prefix
self.awslogs_region = awslogs_region
self.propagate_tags = propagate_tags
self.reattach = reattach
if self.awslogs_region is None:
self.awslogs_region = region_name
self.hook: Optional[AwsBaseHook] = None
self.client: Optional[ECSProtocol] = None
self.arn: Optional[str] = None
self.retry_args = quota_retry
@provide_session
def execute(self, context, session=None):
self.log.info(
'Running ECS Task - Task definition: %s - on cluster %s', self.task_definition, self.cluster
)
self.log.info('ECSOperator overrides: %s', self.overrides)
self.client = self.get_hook().get_conn()
if self.reattach:
self._try_reattach_task(context)
if not self.arn:
self._start_task(context)
self._wait_for_task_ended()
self._check_success_task()
self.log.info('ECS Task has been successfully executed')
if self.reattach:
# Clear the XCom value storing the ECS task ARN if the task has completed
# as we can't reattach it anymore
self._xcom_del(session, self.REATTACH_XCOM_TASK_ID_TEMPLATE.format(task_id=self.task_id))
if self.do_xcom_push:
return self._last_log_message()
return None
def _xcom_del(self, session, task_id):
session.query(XCom).filter(XCom.dag_id == self.dag_id, XCom.task_id == task_id).delete()
def _start_task(self, context):
run_opts = {
'cluster': self.cluster,
'taskDefinition': self.task_definition,
'overrides': self.overrides,
'startedBy': self.owner,
}
if self.capacity_provider_strategy:
run_opts['capacityProviderStrategy'] = self.capacity_provider_strategy
elif self.launch_type:
run_opts['launchType'] = self.launch_type
if self.platform_version is not None:
run_opts['platformVersion'] = self.platform_version
if self.group is not None:
run_opts['group'] = self.group
if self.placement_constraints is not None:
run_opts['placementConstraints'] = self.placement_constraints
if self.placement_strategy is not None:
run_opts['placementStrategy'] = self.placement_strategy
if self.network_configuration is not None:
run_opts['networkConfiguration'] = self.network_configuration
if self.tags is not None:
run_opts['tags'] = [{'key': k, 'value': v} for (k, v) in self.tags.items()]
if self.propagate_tags is not None:
run_opts['propagateTags'] = self.propagate_tags
response = self.client.run_task(**run_opts)
failures = response['failures']
if len(failures) > 0:
raise ECSOperatorError(failures, response)
self.log.info('ECS Task started: %s', response)
self.arn = response['tasks'][0]['taskArn']
ecs_task_id = self.arn.split("/")[-1]
self.log.info(f"ECS task ID is: {ecs_task_id}")
if self.reattach:
# Save the task ARN in XCom to be able to reattach it if needed
self._xcom_set(
context,
key=self.REATTACH_XCOM_KEY,
value=self.arn,
task_id=self.REATTACH_XCOM_TASK_ID_TEMPLATE.format(task_id=self.task_id),
)
def _xcom_set(self, context, key, value, task_id):
XCom.set(
key=key,
value=value,
task_id=task_id,
dag_id=self.dag_id,
execution_date=context["ti"].execution_date,
)
def _try_reattach_task(self, context):
task_def_resp = self.client.describe_task_definition(taskDefinition=self.task_definition)
ecs_task_family = task_def_resp['taskDefinition']['family']
list_tasks_resp = self.client.list_tasks(
cluster=self.cluster, desiredStatus='RUNNING', family=ecs_task_family
)
running_tasks = list_tasks_resp['taskArns']
# Check if the ECS task previously launched is already running
previous_task_arn = self.xcom_pull(
context,
task_ids=self.REATTACH_XCOM_TASK_ID_TEMPLATE.format(task_id=self.task_id),
key=self.REATTACH_XCOM_KEY,
)
if previous_task_arn in running_tasks:
self.arn = previous_task_arn
self.log.info("Reattaching previously launched task: %s", self.arn)
else:
self.log.info("No active previously launched task found to reattach")
def _wait_for_task_ended(self) -> None:
if not self.client or not self.arn:
return
waiter = self.client.get_waiter('tasks_stopped')
waiter.config.max_attempts = sys.maxsize # timeout is managed by airflow
waiter.wait(cluster=self.cluster, tasks=[self.arn])
return
def _cloudwatch_log_events(self) -> Generator:
if self._aws_logs_enabled():
task_id = self.arn.split("/")[-1]
stream_name = f"{self.awslogs_stream_prefix}/{task_id}"
yield from self.get_logs_hook().get_log_events(self.awslogs_group, stream_name)
else:
yield from ()
def _aws_logs_enabled(self):
return self.awslogs_group and self.awslogs_stream_prefix
def _last_log_message(self):
try:
return deque(self._cloudwatch_log_events(), maxlen=1).pop()["message"]
except IndexError:
return None
def _check_success_task(self) -> None:
if not self.client or not self.arn:
return
response = self.client.describe_tasks(cluster=self.cluster, tasks=[self.arn])
self.log.info('ECS Task stopped, check status: %s', response)
# Get logs from CloudWatch if the awslogs log driver was used
for event in self._cloudwatch_log_events():
event_dt = datetime.fromtimestamp(event['timestamp'] / 1000.0)
self.log.info("[%s] %s", event_dt.isoformat(), event['message'])
if len(response.get('failures', [])) > 0:
raise AirflowException(response)
for task in response['tasks']:
# This is a `stoppedReason` that indicates a task has not
# successfully finished, but there is no other indication of failure
# in the response.
# https://docs.aws.amazon.com/AmazonECS/latest/developerguide/stopped-task-errors.html
if re.match(r'Host EC2 \(instance .+?\) (stopped|terminated)\.', task.get('stoppedReason', '')):
raise AirflowException(
'The task was stopped because the host instance terminated: {}'.format(
task.get('stoppedReason', '')
)
)
containers = task['containers']
for container in containers:
if container.get('lastStatus') == 'STOPPED' and container['exitCode'] != 0:
raise AirflowException(f'This task is not in success state {task}')
elif container.get('lastStatus') == 'PENDING':
raise AirflowException(f'This task is still pending {task}')
elif 'error' in container.get('reason', '').lower():
raise AirflowException(
'This containers encounter an error during launching : {}'.format(
container.get('reason', '').lower()
)
)
def get_hook(self) -> AwsBaseHook:
"""Create and return an AwsHook."""
if self.hook:
return self.hook
self.hook = AwsBaseHook(aws_conn_id=self.aws_conn_id, client_type='ecs', region_name=self.region_name)
return self.hook
def get_logs_hook(self) -> AwsLogsHook:
"""Create and return an AwsLogsHook."""
return AwsLogsHook(aws_conn_id=self.aws_conn_id, region_name=self.awslogs_region)
def on_kill(self) -> None:
if not self.client or not self.arn:
return
response = self.client.stop_task(
cluster=self.cluster, task=self.arn, reason='Task killed by the user'
)
self.log.info(response)
| dhuang/incubator-airflow | airflow/providers/amazon/aws/operators/ecs.py | Python | apache-2.0 | 17,123 | 0.002686 |
import numpy as np
import warnings
import subprocess
import pogoFunctions as pF
import pdb
from PolyInterface import poly
class PogoInput:
def __init__(self,
fileName,
elementTypes,
signals,
historyMeasurement,
nodes = None,
elements = None,
geometryFile = None,
precision=8,
targetMeshSize = 5e-5,
nDims=2,
nDofPerNode = None,
notes = None,
runName = 'pogoJob',
nt = 100,
dt = 1e-8,
elementTypeRefs = None,
materialTypeRefs = None,
orientationRefs = None,
elementParameters = None,
materials = [[0,7e10,0.34,2700],],
orientations = None,
boundaryConditions = None,
historyMeasurementFrequency = 20,
fieldStoreIncrements = None,
folderIn = None,
totalForce = False,
version = 1.03,
writeFile = True):
self.fileName = fileName
### Header
self.header = np.array(['']*20, dtype='str')
if version not in [1.03, 1.04]:
raise ValueError('Input file version must be 1.03 or 1.04.')
headerString = '%pogo-inp{}'.format(version)
for c1 in range(0, len(headerString)):
self.header[c1] = headerString[c1]
### Precision
if precision not in [4,8]:
raise ValueError('Precision must be 4 or 8.')
self.precision = np.array([precision,],dtype='int32')
self.nDims = np.array([nDims,],dtype='int32')
### Number of degrees of freedom per node
if nDofPerNode == None:
nDofPerNode = self.nDims
if nDofPerNode not in [1,2,3]:
raise ValueError('Number of degrees of freedom must be 1, 2 or 3')
self.nDofPerNode = np.array([nDofPerNode,],dtype='int32')
### Set notes
self.notes = np.array(['']*1024, dtype='str')
if notes != None:
if len(notes) > 1024:
notes = notes[:1024]
for character in range(len(notes)):
self.notes[character] = notes[character]
### Set runname
self.runName = np.array(['']*80, dtype='str')
if len(runName) > 80:
runName = runName[:80]
for character in range(0, len(runName)):
self.runName[character] = runName[character]
### Set time step and run time
self.nt = np.array([nt,],dtype='int32')
self.dt = np.array([dt,],dtype=self.getPrecString())
### Node generation if necessary
if not np.any(nodes) and not geometryFile:
raise ValueError('Either a poly file or node/element definitions are required')
elif geometryFile and targetMeshSize and not np.any(elements) and not np.any(nodes):
if geometryFile.split('.')[-1] == 'dxf':
print 'Creating poly file from {}'.format(geometryFile)
poly.poly(geometryFile,elementSize = targetMeshSize,writeFile=True)
if geometryFile.split('.')[-1] == 'poly':
geometryFile = geometryFile[:-5]
if self.nDims == 2:
targetMeshArea = targetMeshSize*targetMeshSize
subprocess.call('triangle -q -j -a{:.12}F {}.poly'.format(targetMeshArea,geometryFile))
elif self.nDims == 3:
targetMeshVolume = targetMeshSize*targetMeshSize*targetMeshSize
### Add cwd
subprocess.call('tetgen {:.12}F {}.poly'.format(targetMeshVolume,geometryFile))
nodes = pF.loadNodeFile(geometryFile+'.1.node')
elements = pF.loadElementFile(geometryFile+'.1.ele')
### Number of nodes and node positions
if np.shape(nodes)[0] != nDims:
raise ValueError('nodes must be in shape (nDims, nNodes).')
self.nNodes = np.array([np.shape(nodes)[1],],dtype = 'int32')
self.nodes = nodes.astype(self.getPrecString()).T
### Number of elements and nodes per element
self.nElements = np.array([np.shape(elements)[1],],dtype='int32')
self.nNodesPerElement = np.array([np.shape(elements)[0],],dtype='int32')
### Element type refs
if elementTypeRefs == None:
elementTypeRefs = np.zeros(self.nElements)
if len(elementTypeRefs) != self.nElements:
raise ValueError('elementTypeRefs must be of length nElements.')
#if min(elementTypeRefs) != 0:
# raise ValueError('elementTypeRefs must be 1 indexed.')
self.elementTypeRefs = elementTypeRefs.astype('int32')# - 1
### Material type refs
if materialTypeRefs == None:
materialTypeRefs = np.zeros(self.nElements)
if len(materialTypeRefs) != self.nElements:
raise ValueError('materialTypeRefs must be of length nElements.')
#if min(materialTypeRefs) != 1:
# raise ValueError('materialTypeRefs must be 1 indexed.')
self.materialTypeRefs = materialTypeRefs.astype('int32') #- 1
### Element orientations
if orientationRefs == None:
orientationRefs = np.zeros(self.nElements,dtype = 'int32')
if len(orientationRefs)!= self.nElements:
raise ValueError('orientationRefs must be of length nElements.')
if min(elementTypeRefs) < 0: #unused values are set to 0 so -1 in zero indexing
raise ValueError('orientationRefs must be 1 indexed.')
self.orientationRefs = orientationRefs.astype('int32')# - 1
### Elements
if np.max(elements) > self.nNodes:
raise ValueError('elements points to nodes which are greater than nNodes.')
if np.min(elements) < 0:
raise ValueError('elements must be 1 indexed.')
self.elements = elements.astype('int32') - 1 #convert to zero indexing
self.elements = self.elements.T
### PML sets
self.nPmlSets = np.array([0,],dtype = 'int32')
self.pmlParams = np.array([0,],dtype = 'int32')
### Element types
self.nElementTypes = np.array([len(elementTypes),],dtype = 'int32')
if elementParameters == None:
elementParameters = np.array([0,]*len(elementTypes), dtype = 'int32')
if np.max(self.elementTypeRefs) > self.nElementTypes - 1:
raise ValueError('elementTypeRefs points to element types greater than the number of types of element.')
self.elementTypes = []
for ii,elementType in enumerate(elementTypes):
self.elementTypes.append(ElementType(elementType,elementParameters[ii],self.getPrecString()))
### Material types
self.nMaterials = np.array([len(materials),], dtype = 'int32')
self.materials = []
for material in materials:
self.materials.append(Material(material,self.getPrecString()))
### Orientations
if orientations == None:
self.nOr = np.array([0,],dtype ='int32')
self.orientations = None
else:
self.orientations = []
self.nOr = np.array([len(orientations),],dtype = 'int32')
for orientation in orientations:
self.orientations.append(Orientation(orientation,self.getPrecString()))
### Boundary conditions
if boundaryConditions == None:
self.nFixDof = np.array([0,],dtype ='int32')
self.boundaryConditions = None
else:
nSets = len(boundaryConditions) / 2
self.nFixDof = np.array([sum([len(boundaryConditions[c1*2]) for c1 in range(nSets)]),],dtype = 'int32')
self.boundaryConditions = []
for c1 in range(0,nSets):
#self.boundaryConditions.append(BoundaryCondition(boundaryConditions[c1]))
self.boundaryConditions.append(np.array([(boundaryConditions[c1*2]-1)*4 + boundaryConditions[c1*2+1]-1,],dtype='int32'))
### Input signals
self.nInputSignals = np.array([len(signals),],dtype = 'int32')
self.signals = []
for signal in signals:
self.signals.append(Signal(signal,totalForce,self.getPrecString(),dt))
### History measurements
if historyMeasurement == None:
warnings.warn('Warning : No history measurements requested.')
self.nMeas = 0
self.historyMeasurement = 0
else:
self.nMeas = np.array([len(historyMeasurement),],dtype = 'int32')
self.historyMeasurement = HistoryMeasurement(historyMeasurement,historyMeasurementFrequency)
### Field measurements
if fieldStoreIncrements == None:
self.nFieldStore = np.array([0,],dtype='int32')
self.fieldStoreIncrements = np.array([0,],dtype ='int32')
else:
self.nFieldStore = np.array([len(fieldStoreIncrements),],dtype = 'int32')
if np.max(fieldStoreIncrements) > nt or np.min(fieldStoreIncrements) < 1:
raise ValueError('fieldStoreIncrements out of range [1, nt].')
self.fieldStoreIncrements = np.array([fieldStoreIncrements-1,],dtype = 'int32')
### Write to file
if writeFile:
self.writeFile()
def getPrecString(self):
precString = 'float64'
if self.precision == 4:
self.precString = 'float32'
return precString
def writeFile(self):
with open(self.fileName + '.pogo-inp','wb') as f:
self.header.tofile(f)
self.precision.tofile(f)
self.nDims.tofile(f)
self.nDofPerNode.tofile(f)
self.notes.tofile(f)
self.runName.tofile(f)
self.nt.tofile(f)
self.dt.tofile(f)
self.nNodes.tofile(f)
self.nodes.tofile(f)
self.nElements.tofile(f)
self.nNodesPerElement.tofile(f)
self.elementTypeRefs.tofile(f)
self.materialTypeRefs.tofile(f)
self.orientationRefs.tofile(f)
self.elements.tofile(f)
self.nPmlSets.tofile(f)
self.pmlParams.tofile(f)
self.nElementTypes.tofile(f)
for elementType in self.elementTypes:
elementType.writeElementType(f)
self.nMaterials.tofile(f)
for material in self.materials:
material.writeMaterial(f)
self.nOr.tofile(f)
if not self.orientations == None:
for orientation in self.orientations:
orientation.writeOrientation(f)
self.nFixDof.tofile(f)
if not self.boundaryConditions == None:
for bc in self.boundaryConditions:
bc.tofile(f)
self.nInputSignals.tofile(f)
self.signals[0].nt.tofile(f)
self.signals[0].dt.tofile(f)
for signal in self.signals:
signal.writeSignal(f)
if self.nMeas>0:
self.historyMeasurement.writeHistory(f)
else:
np.array([0,], dtype='int32').tofile(f)
np.array([0,], dtype='int32').tofile(f)
self.nFieldStore.tofile(f)
self.fieldStoreIncrements.tofile(f)
class Material:
def __init__(self,materialInfo,precString):
self.matType = np.array([materialInfo[0],],dtype='int32')
self.matProps = np.array([materialInfo[1:],],dtype=precString)
self.nMatParams = np.array([len(materialInfo[1:]),],dtype='int32')
def writeMaterial(self,fileId):
self.matType.tofile(fileId)
self.nMatParams.tofile(fileId)
self.matProps.tofile(fileId)
class ElementType:
def __init__(self,elementType,elementParams,precString):
self.elTypeSave = np.array(['']*20,dtype='str')
for character in range(len(elementType)):
self.elTypeSave[character] = elementType[character]
if elementParams:
self.nParams = np.array([len(elementParameters),],dtype='int32')
self.params = np.array(elementParams,dtype = precString)
else:
self.params = np.array([0,],dtype='int32')
self.nParams = np.array([0,],dtype='int32')
def writeElementType(self,fileId):
self.elTypeSave.tofile(fileId)
self.nParams.tofile(fileId)
self.params.tofile(fileId)
class Orientation:
def __init__(self,orInfo,precString):
self.paramType = np.array([orInfo[0],], dtype='int32')
self.nOrParams = np.array([len(orInfo[1:]),],dtype='int32')
self.paramValues = np.array([orInfo[1:],],dtype = precString)
def writeOrientation(self,fileId):
self.paramType.tofile(fileId)
self.nOrParams.tofile(fileId)
self.paramValues.tofile(fileId)
class BoundaryCondition:
def __init__(self,BCs):
self.nodes = np.array(BCs[0])
self.dof = np.array(BCs[1])
def writeBoundaryCondition(self,fileId):
dofOut = np.array([(self.nodes-1)*4 + self.dof-1,],dtype='int32')
dofOut.tofile(fileId)
class HistoryMeasurement:
nodes = np.array([],dtype='int32')
dofs = np.array([],dtype='int32')
def __init__(self,histInfo,frequency):
###Add Input checking
for history in histInfo:
self.nodes = np.hstack((self.nodes,history[0]))
self.dofs = np.hstack((self.dofs,history[1]))
self.frequency = np.array([frequency,],dtype = 'int32')
self.nMeas = np.array([len(self.nodes),],dtype = 'int32')
###Must Add Version 1.04 support
def writeHistory(self,fileId):
self.nMeas.tofile(fileId)
self.frequency.tofile(fileId)
pdb.set_trace()
outHist = (self.nodes*4 + self.dofs).astype('int32')# - 1
outHist.tofile(fileId)
class FieldMeasurement:
def __init__(self,increments=0):
###Add input checking
self.increments = np.array([increments - 1],dtype='int32')
class Signal:
def __init__(self, signalInfo, totalForce, precString,dt):
if signalInfo:
nNodes = len(signalInfo[0])
self.type = np.array([signalInfo[3],],dtype = 'int32')
# if len(np.unique(signalInfo[0])) != nNodes:
# errStr = 'Duplicate nodes cannot be specified for a signal'
# raise ValueError(errStr)
if np.size(signalInfo[1]) != 1 and len(signalInfo[1]) != nNodes:
raise ValueError('Signal amplitude must be a scalar or a vector of amplitudes for each node signal applied to.')
if signalInfo[3] not in [0,1]:
raise ValueError('Signal type for signal {} must be 0 or 1.'.format(ii))
self.nNodes = np.array([len(signalInfo[0]),],dtype='int32')
self.nodes = np.array(signalInfo[0],dtype = 'int32')
if type(signalInfo[1]) is float:
if totalForce == True:
if sigType == 1:
raise ValueError('totalForce not supported for displacement load.')
else:
ampVal = signalInfo[1]/nNodes
else:
ampVal = signalInfo[1]
amp = np.array(np.ones(nNodes)*ampVal, dtype=precString)
elif type(signalInfo[1]) is np.ndarray:
if len(signalInfo[1]) != self.nNodes:
raise ValueError('If signal amplitude is an array, a value must be specified for each node in the transducer.')
if totalForce == True:
raise Warning('totalForce is not supported for loads specified for individual nodes.')
amp = np.array([signalInfo[1],], dtype=precString)
else:
raise ValueError('Signal amplitude not recognised')
self.amplitude = amp
self.dof = np.array(signalInfo[2],dtype ='int32')
self.shape = np.array(signalInfo[4],dtype = precString)
self.dt = np.array(dt,dtype=precString)
self.nt = np.array(len(signalInfo[4]),dtype = 'int32')
def writeSignal(self,fileId):
self.nNodes.tofile(fileId)
self.type.tofile(fileId)
dof = self.nodes*4 + self.dof-1
dof.tofile(fileId)
self.amplitude.tofile(fileId)
self.shape.tofile(fileId) | ab9621/PogoLibrary | pogoInput.py | Python | gpl-3.0 | 17,129 | 0.021834 |
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert Android xml resources to API 14 compatible.
There are two reasons that we cannot just use API 17 attributes,
so we are generating another set of resources by this script.
1. paddingStart attribute can cause a crash on Galaxy Tab 2.
2. There is a bug that paddingStart does not override paddingLeft on
JB-MR1. This is fixed on JB-MR2. b/8654490
Therefore, this resource generation script can be removed when
we drop the support for JB-MR1.
Please refer to http://crbug.com/235118 for the details.
"""
import optparse
import os
import re
import shutil
import sys
import xml.dom.minidom as minidom
from util import build_utils
# Note that we are assuming 'android:' is an alias of
# the namespace 'http://schemas.android.com/apk/res/android'.
GRAVITY_ATTRIBUTES = ('android:gravity', 'android:layout_gravity')
# Almost all the attributes that has "Start" or "End" in
# its name should be mapped.
ATTRIBUTES_TO_MAP = {'paddingStart' : 'paddingLeft',
'drawableStart' : 'drawableLeft',
'layout_alignStart' : 'layout_alignLeft',
'layout_marginStart' : 'layout_marginLeft',
'layout_alignParentStart' : 'layout_alignParentLeft',
'layout_toStartOf' : 'layout_toLeftOf',
'paddingEnd' : 'paddingRight',
'drawableEnd' : 'drawableRight',
'layout_alignEnd' : 'layout_alignRight',
'layout_marginEnd' : 'layout_marginRight',
'layout_alignParentEnd' : 'layout_alignParentRight',
'layout_toEndOf' : 'layout_toRightOf'}
ATTRIBUTES_TO_MAP = dict(['android:' + k, 'android:' + v] for k, v
in ATTRIBUTES_TO_MAP.iteritems())
ATTRIBUTES_TO_MAP_REVERSED = dict([v, k] for k, v
in ATTRIBUTES_TO_MAP.iteritems())
def IterateXmlElements(node):
"""minidom helper function that iterates all the element nodes.
Iteration order is pre-order depth-first."""
if node.nodeType == node.ELEMENT_NODE:
yield node
for child_node in node.childNodes:
for child_node_element in IterateXmlElements(child_node):
yield child_node_element
def ParseAndReportErrors(filename):
try:
return minidom.parse(filename)
except Exception:
import traceback
traceback.print_exc()
sys.stderr.write('Failed to parse XML file: %s\n' % filename)
sys.exit(1)
def AssertNotDeprecatedAttribute(name, value, filename):
"""Raises an exception if the given attribute is deprecated."""
msg = None
if name in ATTRIBUTES_TO_MAP_REVERSED:
msg = '{0} should use {1} instead of {2}'.format(filename,
ATTRIBUTES_TO_MAP_REVERSED[name], name)
elif name in GRAVITY_ATTRIBUTES and ('left' in value or 'right' in value):
msg = '{0} should use start/end instead of left/right for {1}'.format(
filename, name)
if msg:
msg += ('\nFor background, see: http://android-developers.blogspot.com/'
'2013/03/native-rtl-support-in-android-42.html\n'
'If you have a legitimate need for this attribute, discuss with '
'kkimlabs@chromium.org or newt@chromium.org')
raise Exception(msg)
def WriteDomToFile(dom, filename):
"""Write the given dom to filename."""
build_utils.MakeDirectory(os.path.dirname(filename))
with open(filename, 'w') as f:
dom.writexml(f, '', ' ', '\n', encoding='utf-8')
def HasStyleResource(dom):
"""Return True if the dom is a style resource, False otherwise."""
root_node = IterateXmlElements(dom).next()
return bool(root_node.nodeName == 'resources' and
list(root_node.getElementsByTagName('style')))
def ErrorIfStyleResourceExistsInDir(input_dir):
"""If a style resource is in input_dir, raises an exception."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
dom = ParseAndReportErrors(input_filename)
if HasStyleResource(dom):
raise Exception('error: style file ' + input_filename +
' should be under ' + input_dir +
'-v17 directory. Please refer to '
'http://crbug.com/243952 for the details.')
def GenerateV14LayoutResourceDom(dom, filename, assert_not_deprecated=True):
"""Convert layout resource to API 14 compatible layout resource.
Args:
dom: Parsed minidom object to be modified.
filename: Filename that the DOM was parsed from.
assert_not_deprecated: Whether deprecated attributes (e.g. paddingLeft) will
cause an exception to be thrown.
Returns:
True if dom is modified, False otherwise.
"""
is_modified = False
# Iterate all the elements' attributes to find attributes to convert.
for element in IterateXmlElements(dom):
for name, value in list(element.attributes.items()):
# Convert any API 17 Start/End attributes to Left/Right attributes.
# For example, from paddingStart="10dp" to paddingLeft="10dp"
# Note: gravity attributes are not necessary to convert because
# start/end values are backward-compatible. Explained at
# https://plus.sandbox.google.com/+RomanNurik/posts/huuJd8iVVXY?e=Showroom
if name in ATTRIBUTES_TO_MAP:
element.setAttribute(ATTRIBUTES_TO_MAP[name], value)
del element.attributes[name]
is_modified = True
elif assert_not_deprecated:
AssertNotDeprecatedAttribute(name, value, filename)
return is_modified
def GenerateV14StyleResourceDom(dom, filename, assert_not_deprecated=True):
"""Convert style resource to API 14 compatible style resource.
Args:
dom: Parsed minidom object to be modified.
filename: Filename that the DOM was parsed from.
assert_not_deprecated: Whether deprecated attributes (e.g. paddingLeft) will
cause an exception to be thrown.
Returns:
True if dom is modified, False otherwise.
"""
is_modified = False
for style_element in dom.getElementsByTagName('style'):
for item_element in style_element.getElementsByTagName('item'):
name = item_element.attributes['name'].value
value = item_element.childNodes[0].nodeValue
if name in ATTRIBUTES_TO_MAP:
item_element.attributes['name'].value = ATTRIBUTES_TO_MAP[name]
is_modified = True
elif assert_not_deprecated:
AssertNotDeprecatedAttribute(name, value, filename)
return is_modified
def GenerateV14LayoutResource(input_filename, output_v14_filename,
output_v17_filename):
"""Convert API 17 layout resource to API 14 compatible layout resource.
It's mostly a simple replacement, s/Start/Left s/End/Right,
on the attribute names.
If the generated resource is identical to the original resource,
don't do anything. If not, write the generated resource to
output_v14_filename, and copy the original resource to output_v17_filename.
"""
dom = ParseAndReportErrors(input_filename)
is_modified = GenerateV14LayoutResourceDom(dom, input_filename)
if is_modified:
# Write the generated resource.
WriteDomToFile(dom, output_v14_filename)
# Copy the original resource.
build_utils.MakeDirectory(os.path.dirname(output_v17_filename))
shutil.copy2(input_filename, output_v17_filename)
def GenerateV14StyleResource(input_filename, output_v14_filename):
"""Convert API 17 style resources to API 14 compatible style resource.
Write the generated style resource to output_v14_filename.
It's mostly a simple replacement, s/Start/Left s/End/Right,
on the attribute names.
"""
dom = ParseAndReportErrors(input_filename)
GenerateV14StyleResourceDom(dom, input_filename)
# Write the generated resource.
WriteDomToFile(dom, output_v14_filename)
def GenerateV14LayoutResourcesInDir(input_dir, output_v14_dir, output_v17_dir):
"""Convert layout resources to API 14 compatible resources in input_dir."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
rel_filename = os.path.relpath(input_filename, input_dir)
output_v14_filename = os.path.join(output_v14_dir, rel_filename)
output_v17_filename = os.path.join(output_v17_dir, rel_filename)
GenerateV14LayoutResource(input_filename, output_v14_filename,
output_v17_filename)
def GenerateV14StyleResourcesInDir(input_dir, output_v14_dir):
"""Convert style resources to API 14 compatible resources in input_dir."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
rel_filename = os.path.relpath(input_filename, input_dir)
output_v14_filename = os.path.join(output_v14_dir, rel_filename)
GenerateV14StyleResource(input_filename, output_v14_filename)
def ParseArgs():
"""Parses command line options.
Returns:
An options object as from optparse.OptionsParser.parse_args()
"""
parser = optparse.OptionParser()
parser.add_option('--res-dir',
help='directory containing resources '
'used to generate v14 compatible resources')
parser.add_option('--res-v14-compatibility-dir',
help='output directory into which '
'v14 compatible resources will be generated')
parser.add_option('--stamp', help='File to touch on success')
options, args = parser.parse_args()
if args:
parser.error('No positional arguments should be given.')
# Check that required options have been provided.
required_options = ('res_dir', 'res_v14_compatibility_dir')
build_utils.CheckOptions(options, parser, required=required_options)
return options
def GenerateV14Resources(res_dir, res_v14_dir):
for name in os.listdir(res_dir):
if not os.path.isdir(os.path.join(res_dir, name)):
continue
dir_pieces = name.split('-')
resource_type = dir_pieces[0]
qualifiers = dir_pieces[1:]
api_level_qualifier_index = -1
api_level_qualifier = ''
for index, qualifier in enumerate(qualifiers):
if re.match('v[0-9]+$', qualifier):
api_level_qualifier_index = index
api_level_qualifier = qualifier
break
# Android pre-v17 API doesn't support RTL. Skip.
if 'ldrtl' in qualifiers:
continue
input_dir = os.path.abspath(os.path.join(res_dir, name))
# We also need to copy the original v17 resource to *-v17 directory
# because the generated v14 resource will hide the original resource.
output_v14_dir = os.path.join(res_v14_dir, name)
output_v17_dir = os.path.join(res_v14_dir, name + '-v17')
# We only convert layout resources under layout*/, xml*/,
# and style resources under values*/.
if resource_type in ('layout', 'xml'):
if not api_level_qualifier:
GenerateV14LayoutResourcesInDir(input_dir, output_v14_dir,
output_v17_dir)
elif resource_type == 'values':
if api_level_qualifier == 'v17':
output_qualifiers = qualifiers[:]
del output_qualifiers[api_level_qualifier_index]
output_v14_dir = os.path.join(res_v14_dir,
'-'.join([resource_type] +
output_qualifiers))
GenerateV14StyleResourcesInDir(input_dir, output_v14_dir)
elif not api_level_qualifier:
ErrorIfStyleResourceExistsInDir(input_dir)
def main():
options = ParseArgs()
res_v14_dir = options.res_v14_compatibility_dir
build_utils.DeleteDirectory(res_v14_dir)
build_utils.MakeDirectory(res_v14_dir)
GenerateV14Resources(options.res_dir, res_v14_dir)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main())
| mdakin/engine | build/android/gyp/generate_v14_compatible_resources.py | Python | bsd-3-clause | 11,922 | 0.008136 |
#!/usr/bin/env python
"""Main Django renderer."""
import importlib
import os
import pdb
import time
from django import http
from django import shortcuts
from django import template
from django.views.decorators import csrf
import psutil
import logging
from grr import gui
from grr.gui import api_call_renderers
from grr.gui import renderers
from grr.gui import urls
from grr.gui import webauth
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import stats
DOCUMENT_ROOT = os.path.join(os.path.dirname(gui.__file__), "static")
class ViewsInit(registry.InitHook):
pre = ["StatsInit"]
def RunOnce(self):
"""Run this once on init."""
# Renderer-aware metrics
stats.STATS.RegisterEventMetric(
"ui_renderer_latency", fields=[("renderer", str)])
stats.STATS.RegisterEventMetric(
"ui_renderer_response_size", fields=[("renderer", str)],
units=stats.MetricUnits.BYTES)
stats.STATS.RegisterCounterMetric(
"ui_renderer_failure", fields=[("renderer", str)])
# General metrics
stats.STATS.RegisterCounterMetric("ui_unknown_renderer")
stats.STATS.RegisterCounterMetric("http_access_denied")
stats.STATS.RegisterCounterMetric("http_server_error")
@webauth.SecurityCheck
@csrf.ensure_csrf_cookie # Set the csrf cookie on the homepage.
def Homepage(request):
"""Basic handler to render the index page."""
# We build a list of all js files to include by looking at the list
# of renderers modules. JS files are always named in accordance with
# renderers modules names. I.e. if there's a renderers package called
# grr.gui.plugins.acl_manager, we expect a js files called acl_manager.js.
renderers_js_files = set()
for cls in renderers.Renderer.classes.values():
if aff4.issubclass(cls, renderers.Renderer) and cls.__module__:
module_components = cls.__module__.split(".")
# Only include files corresponding to renderers in "plugins" package.
if module_components[-2] == "plugins":
renderers_js_files.add(module_components[-1] + ".js")
create_time = psutil.Process(os.getpid()).create_time()
context = {"page_title": config_lib.CONFIG["AdminUI.page_title"],
"heading": config_lib.CONFIG["AdminUI.heading"],
"report_url": config_lib.CONFIG["AdminUI.report_url"],
"help_url": config_lib.CONFIG["AdminUI.help_url"],
"use_precompiled_js": config_lib.CONFIG[
"AdminUI.use_precompiled_js"],
"renderers_js": renderers_js_files,
"timestamp": create_time}
return shortcuts.render_to_response(
"base.html", context, context_instance=template.RequestContext(request))
@webauth.SecurityCheck
def RenderBinaryDownload(request):
"""Basic handler to allow downloads of aff4:/config/executables files."""
path, filename = request.path.split("/", 2)[-1].rsplit("/", 1)
if not path or not filename:
return AccessDenied("Error: Invalid path.")
request.REQ = request.REQUEST
def Generator():
with aff4.FACTORY.Open(aff4_path, aff4_type="GRRSignedBlob",
token=BuildToken(request, 60)) as fd:
while True:
data = fd.Read(1000000)
if not data: break
yield data
base_path = rdfvalue.RDFURN("aff4:/config/executables")
aff4_path = base_path.Add(path).Add(filename)
if not aff4_path.RelativeName(base_path):
# Check for path traversals.
return AccessDenied("Error: Invalid path.")
filename = aff4_path.Basename()
response = http.HttpResponse(content=Generator(),
content_type="binary/octet-stream")
response["Content-Disposition"] = ("attachment; filename=%s" % filename)
return response
@webauth.SecurityCheck
@renderers.ErrorHandler()
def RenderApi(request):
"""Handler for the /api/ requests."""
return api_call_renderers.RenderHttpResponse(request)
@webauth.SecurityCheck
@renderers.ErrorHandler()
def RenderGenericRenderer(request):
"""Django handler for rendering registered GUI Elements."""
try:
action, renderer_name = request.path.split("/")[-2:]
renderer_cls = renderers.Renderer.GetPlugin(name=renderer_name)
except KeyError:
stats.STATS.IncrementCounter("ui_unknown_renderer")
return AccessDenied("Error: Renderer %s not found" % renderer_name)
# Check that the action is valid
["Layout", "RenderAjax", "Download", "Validate"].index(action)
renderer = renderer_cls()
result = http.HttpResponse(content_type="text/html")
# Pass the request only from POST parameters. It is much more convenient to
# deal with normal dicts than Django's Query objects so we convert here.
if flags.FLAGS.debug:
# Allow both POST and GET for debugging
request.REQ = request.POST.dict()
request.REQ.update(request.GET.dict())
else:
# Only POST in production for CSRF protections.
request.REQ = request.POST.dict()
# Build the security token for this request
request.token = BuildToken(request, renderer.max_execution_time)
request.canary_mode = "canary_mode" in request.COOKIES
# Allow the renderer to check its own ACLs.
renderer.CheckAccess(request)
try:
# Does this renderer support this action?
method = getattr(renderer, action)
start_time = time.time()
try:
result = method(request, result) or result
finally:
total_time = time.time() - start_time
stats.STATS.RecordEvent("ui_renderer_latency",
total_time, fields=[renderer_name])
except access_control.UnauthorizedAccess, e:
result = http.HttpResponse(content_type="text/html")
result = renderers.Renderer.GetPlugin("UnauthorizedRenderer")().Layout(
request, result, exception=e)
except Exception:
stats.STATS.IncrementCounter("ui_renderer_failure",
fields=[renderer_name])
if flags.FLAGS.debug:
pdb.post_mortem()
raise
if not isinstance(result, http.HttpResponse):
raise RuntimeError("Renderer returned invalid response %r" % result)
return result
def RedirectToRemoteHelp(path):
"""Redirect to GitHub-hosted documentation."""
target_path = os.path.join(config_lib.CONFIG["AdminUI.github_docs_location"],
path.replace(".html", ".adoc"))
# We have to redirect via JavaScript to have access to and to preserve the
# URL hash. We don't know the hash part of the url on the server.
response = http.HttpResponse()
response.write("""
<script>
var friendly_hash = window.location.hash.replace('#_', '#').replace(/_/g, '-');
window.location = '%s' + friendly_hash;
</script>
""" % target_path)
return response
@webauth.SecurityCheck
def RenderHelp(request, path, document_root=None, content_type=None):
"""Either serves local help files or redirects to the remote ones."""
_ = document_root
_ = content_type
request.REQ = request.REQUEST
help_path = request.path.split("/", 2)[-1]
if not help_path:
return AccessDenied("Error: Invalid help path.")
try:
user_record = aff4.FACTORY.Open(
aff4.ROOT_URN.Add("users").Add(request.user), "GRRUser",
token=BuildToken(request, 60))
settings = user_record.Get(user_record.Schema.GUI_SETTINGS)
except IOError:
settings = aff4.GRRUser.SchemaCls.GUI_SETTINGS()
if settings.docs_location == settings.DocsLocation.REMOTE:
# Proxy remote documentation.
return RedirectToRemoteHelp(help_path)
else:
# Serve prebuilt docs using static handler. To do that we have
# to resolve static handler's name to an actual function object.
static_handler_components = urls.static_handler.split(".")
static_handler_module = importlib.import_module(".".join(
static_handler_components[0:-1]))
static_handler = getattr(static_handler_module,
static_handler_components[-1])
return static_handler(request, path, document_root=urls.help_root)
def BuildToken(request, execution_time):
"""Build an ACLToken from the request."""
token = access_control.ACLToken(
username=request.user,
reason=request.REQ.get("reason", ""),
process="GRRAdminUI",
expiry=rdfvalue.RDFDatetime().Now() + execution_time)
for field in ["REMOTE_ADDR", "HTTP_X_FORWARDED_FOR"]:
remote_addr = request.META.get(field, "")
if remote_addr:
token.source_ips.append(remote_addr)
return token
def AccessDenied(message):
"""Return an access denied Response object."""
response = shortcuts.render_to_response("404.html", {"message": message})
logging.warn(message)
response.status_code = 403
stats.STATS.IncrementCounter("http_access_denied")
return response
def ServerError(unused_request, template_name="500.html"):
"""500 Error handler."""
stats.STATS.IncrementCounter("http_server_error")
response = shortcuts.render_to_response(template_name)
response.status_code = 500
return response
| wandec/grr | gui/views.py | Python | apache-2.0 | 9,063 | 0.00982 |
"""
使用requests包装的页面请求
"""
import requests
from .headers import Headers
from proxy import proxy
class TimeoutException(Exception):
"""
连接超时异常
"""
pass
class ResponseException(Exception):
"""
响应异常
"""
pass
class WebRequest(object):
"""
包装requests
"""
def __init__(self):
self.headers = Headers().get()
self.proxies = proxy.get_proxy()
def get(self, url):
"""
页面请求
"""
try:
resp = requests.get(url, headers=self.headers,
proxies={'http': 'http://{}'.format(self.proxies)}, timeout=10)
return self.check_response(resp)
except Exception as e:
self.network_error(e)
def post(self, url, payload):
"""
页面post
"""
try:
resp = requests.post(url, data=payload, headers=self.headers,
proxies={'http': 'http://{}'.format(self.proxies)},
timeout=10)
return self.check_response(resp)
except Exception as e:
self.network_error(e)
def network_error(self, e):
proxy.delete_proxy(self.proxies)
print('error: {}'.format(e))
raise TimeoutException('timeout')
def check_response(self, resp):
"""
检查响应
:param resp:
:return:
"""
if resp.status_code == 200:
return resp
else:
raise ResponseException('response status error: {}'.format(resp.status_code))
| bobobo80/python-crawler-test | web_get/webget.py | Python | mit | 1,636 | 0.001906 |
#! /usr/bin/python
"""Src-depend is a simple tool for sketching source code dependency graphs
from source code itself. It iterates through all source code files in given
directory, finds import statements and turns them into edges of a dependency
graph.
Uses graphviz for sketching graphs."""
import argparse
import graphviz
import logging
import os.path
import re
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--lang', dest='lang', default='python',
help='specifies language plugin to be used (defaults to python)')
parser.add_argument('-o', '--output', dest='img_out',
help='output sketched graph to specified file (appends extension automatiaclly); source will be output to IMG_OUT')
parser.add_argument('-d', '--debug', dest='debug', action='store_true',
help='debug mode')
parser.add_argument('-f', '--output-format', dest='format', default='png',
help='specifies output image\'s format (defaults to .png')
parser.add_argument('-r', '--remove-redundant', dest='remove-redundant',
action='store_true', help='remove direct dependencies on modules that module depends on indirectly')
parser.add_argument('-e', '--exclude', dest='excludes', nargs='+', default=[],
help='a filename to ommit (multiple names possible)')
parser.add_argument('--exclude-regex', dest='exclude-regex', default=None,
help='filenames matching specified regex will be ignored')
parser.add_argument('-q', '--quiet', dest='quiet', action='store_true',
help='quiet mode')
parser.add_argument('target', help='source code directory to analyse')
return parser.parse_args().__dict__
def main(args):
log_level = logging.INFO
if args['debug']:
log_level = logging.DEBUG
elif args['quiet']:
log_level = logging.ERROR
logging.basicConfig(
level=log_level,
format='[%(asctime)s; %(levelname)s]: %(message)s'
)
is_excluded = exclude_checker(args['excludes'], args['exclude-regex'])
try:
import_obj = __import__('plugins.{}'.format(args['lang']))
plugin = getattr(import_obj, args['lang'])
except ImportError:
logging.error('Could not find plugin for {}!'.format(args['lang']))
return 1
files = find_source_files(args['target'], plugin.Module.filename_ext, is_excluded)
for f in files:
with open(f, 'r') as file:
plugin.Module(file, args['target'])
plugin.Module.create_dependency_tree()
if args['remove-redundant']:
plugin.Module.remove_redundant_dependencies()
graph = make_graph(*plugin.Module.registry)
graph.format = args['format']
if not args['img_out'] is None:
output = graph.render(args['img_out'])
logging.info('Writing graph image to {}...'.format(output))
def make_graph(*modules):
graph = graphviz.Digraph()
for module in modules:
graph.node(module.filename, module.name, module.attributes)
logging.debug('Creating node {}...'.format(module.name))
for dep in module.dependencies:
if not dep is None:
logging.debug('Creating dependency of {} on {}'.format(
module.name, dep.name
))
graph.edge(module.filename, dep.filename)
return graph
def find_source_files(path, ext, is_excluded):
basename = os.path.basename(path)
if is_excluded(basename):
logging.debug('Ommitting excluded path: {}...'.format(path))
elif not basename == '.' and basename.startswith('.'):
logging.debug('Ommitting hidden path: {}...'.format(path))
elif os.path.isfile(path) and path.endswith(ext):
logging.info('{} recoginzed as source file.'.format(path))
yield path
elif os.path.isdir(path):
logging.debug('In dir "{}": {}'.format(path, os.listdir(path)))
for f in os.listdir(path):
for el in find_source_files(os.path.join(path, f), ext, is_excluded):
yield el
else:
logging.debug('{} is not a source file.'.format(path))
def exclude_checker(excluded, regex):
if regex is None:
return lambda filename: filename in excluded
else:
compiled_regex = re.compile(regex)
return lambda filename:filename in excluded \
or compiled_regex.match(filename)
if __name__ == '__main__':
exit(main(parseargs()))
| Sventimir/src-depend | depend.py | Python | apache-2.0 | 4,469 | 0.00358 |
# -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansSylotiNagri-Regular'
native_name = ''
def glyphs(self):
glyphs = []
glyphs.append(0x0039) #glyph00057
glyphs.append(0x0034) #uniA82A
glyphs.append(0x0035) #uniA82B
glyphs.append(0x0036) #glyph00054
glyphs.append(0x0040) #glyph00064
glyphs.append(0x0053) #uni09EE
glyphs.append(0x0038) #glyph00056
glyphs.append(0x0015) #uniA80B
glyphs.append(0x0016) #uniA80C
glyphs.append(0x003D) #glyph00061
glyphs.append(0x0014) #uniA80A
glyphs.append(0x0019) #uniA80F
glyphs.append(0x0037) #glyph00055
glyphs.append(0x0017) #uniA80D
glyphs.append(0x0018) #uniA80E
glyphs.append(0x0032) #uniA828
glyphs.append(0x0001) #uniFEFF
glyphs.append(0x004D) #uni09E8
glyphs.append(0x0054) #uni09EF
glyphs.append(0x0048) #uni2055
glyphs.append(0x0050) #uni09EB
glyphs.append(0x0002) #uni000D
glyphs.append(0x0051) #uni09EC
glyphs.append(0x0052) #uni09ED
glyphs.append(0x002C) #uniA822
glyphs.append(0x0049) #uni0964
glyphs.append(0x004A) #uni0965
glyphs.append(0x003E) #glyph00062
glyphs.append(0x0042) #glyph00066
glyphs.append(0x002D) #uniA823
glyphs.append(0x0023) #uniA819
glyphs.append(0x0022) #uniA818
glyphs.append(0x0033) #uniA829
glyphs.append(0x0043) #glyph00067
glyphs.append(0x001F) #uniA815
glyphs.append(0x001E) #uniA814
glyphs.append(0x0021) #uniA817
glyphs.append(0x0020) #uniA816
glyphs.append(0x001B) #uniA811
glyphs.append(0x001A) #uniA810
glyphs.append(0x001D) #uniA813
glyphs.append(0x001C) #uniA812
glyphs.append(0x0047) #glyph00071
glyphs.append(0x0041) #glyph00065
glyphs.append(0x004C) #uni09E7
glyphs.append(0x0044) #glyph00068
glyphs.append(0x0045) #glyph00069
glyphs.append(0x0028) #uniA81E
glyphs.append(0x0027) #uniA81D
glyphs.append(0x0003) #uni00A0
glyphs.append(0x0029) #uniA81F
glyphs.append(0x0024) #uniA81A
glyphs.append(0x003F) #glyph00063
glyphs.append(0x0026) #uniA81C
glyphs.append(0x0025) #uniA81B
glyphs.append(0x0005) #uni200C
glyphs.append(0x0004) #uni200B
glyphs.append(0x003B) #glyph00059
glyphs.append(0x0006) #uni200D
glyphs.append(0x003A) #glyph00058
glyphs.append(0x004E) #uni09E9
glyphs.append(0x002F) #uniA825
glyphs.append(0x0007) #uni2010
glyphs.append(0x0008) #uni2011
glyphs.append(0x004B) #uni09E6
glyphs.append(0x0009) #uni25CC
glyphs.append(0x004F) #uni09EA
glyphs.append(0x003C) #glyph00060
glyphs.append(0x0046) #glyph00070
glyphs.append(0x002A) #uniA820
glyphs.append(0x002B) #uniA821
glyphs.append(0x0012) #uniA808
glyphs.append(0x0013) #uniA809
glyphs.append(0x002E) #uniA824
glyphs.append(0x0000) #.notdef
glyphs.append(0x0030) #uniA826
glyphs.append(0x0031) #uniA827
glyphs.append(0x000C) #uniA802
glyphs.append(0x000D) #uniA803
glyphs.append(0x000A) #uniA800
glyphs.append(0x000B) #uniA801
glyphs.append(0x0010) #uniA806
glyphs.append(0x0011) #uniA807
glyphs.append(0x000E) #uniA804
glyphs.append(0x000F) #uniA805
return glyphs
| davelab6/pyfontaine | fontaine/charsets/noto_glyphs/notosanssylotinagri_regular.py | Python | gpl-3.0 | 3,639 | 0.023633 |
"""
This test illustrate how to generate an XML Mapnik style sheet from a pycnik
style sheet written in Python.
"""
import os
from pycnik import pycnik
import artefact
actual_xml_style_sheet = 'artefacts/style_sheet.xml'
expected_xml_style_sheet = 'style_sheet.xml'
class TestPycnik(artefact.TestCaseWithArtefacts):
def test_pycnik(self):
python_style_sheet = pycnik.import_style('style_sheet.py')
pycnik.translate(python_style_sheet, actual_xml_style_sheet)
with open(actual_xml_style_sheet) as actual, \
open(expected_xml_style_sheet) as expected:
self.assertEquals(actual.read(), expected.read())
| Mappy/pycnikr | tests/test_pycnik.py | Python | lgpl-3.0 | 660 | 0.001515 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# cbk_crm_information: CRM Information Tab
# Copyright (c) 2013 Codeback Software S.L. (http://codeback.es)
# @author: Miguel García <miguel@codeback.es>
# @author: Javier Fuentes <javier@codeback.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Commission Filter',
'version': '0.1',
'author': 'Codeback Software',
'summary': '',
'description' : 'Añade campos para que los filtros funcionen correctamente',
'website': 'http://codeback.es',
'images': [],
'depends': ['sale', 'account'],
'category': '',
'sequence': 26,
'demo': [],
'data': ['sale_view.xml', 'account_invoice_view.xml'],
'test': [],
'installable': True,
'application': False,
'auto_install': False,
'css': [],
} | codeback/openerp-cbk_sale_commission_filter | __openerp__.py | Python | agpl-3.0 | 1,616 | 0.004337 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from collections import OrderedDict
from twisted.internet import defer
from twisted.trial import unittest
from buildbot import config
from buildbot.process.properties import Interpolate
from buildbot.process.results import SUCCESS
from buildbot.steps.package.rpm import rpmbuild
from buildbot.test.fake.remotecommand import ExpectShell
from buildbot.test.util import steps
from buildbot.test.util.misc import TestReactorMixin
class RpmBuild(steps.BuildStepMixin, TestReactorMixin, unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_no_specfile(self):
with self.assertRaises(config.ConfigErrors):
rpmbuild.RpmBuild()
def test_success(self):
self.setupStep(rpmbuild.RpmBuild(specfile="foo.spec", dist=".el5"))
self.expectCommands(
ExpectShell(workdir='wkdir', command='rpmbuild --define "_topdir '
'`pwd`" --define "_builddir `pwd`" --define "_rpmdir '
'`pwd`" --define "_sourcedir `pwd`" --define "_specdir '
'`pwd`" --define "_srcrpmdir `pwd`" --define "dist .el5" '
'-ba foo.spec')
+ ExpectShell.log('stdio',
stdout='lalala')
+ 0)
self.expectOutcome(result=SUCCESS, state_string='RPMBUILD')
return self.runStep()
def test_autoRelease(self):
self.setupStep(rpmbuild.RpmBuild(specfile="foo.spec", autoRelease=True))
self.expectCommands(
ExpectShell(workdir='wkdir', command='rpmbuild --define "_topdir '
'`pwd`" --define "_builddir `pwd`" --define "_rpmdir `pwd`" '
'--define "_sourcedir `pwd`" --define "_specdir `pwd`" '
'--define "_srcrpmdir `pwd`" --define "_release 0" '
'--define "dist .el6" -ba foo.spec')
+ ExpectShell.log('stdio',
stdout='Your code has been rated at 10/10')
+ 0)
self.expectOutcome(result=SUCCESS, state_string='RPMBUILD')
return self.runStep()
def test_define(self):
defines = [("a", "1"), ("b", "2")]
self.setupStep(rpmbuild.RpmBuild(specfile="foo.spec",
define=OrderedDict(defines)))
self.expectCommands(
ExpectShell(workdir='wkdir', command='rpmbuild --define "_topdir '
'`pwd`" --define "_builddir `pwd`" --define "_rpmdir '
'`pwd`" --define "_sourcedir `pwd`" --define '
'"_specdir `pwd`" --define "_srcrpmdir `pwd`" '
'--define "a 1" --define "b 2" --define "dist .el6" '
'-ba foo.spec')
+ ExpectShell.log('stdio',
stdout='Your code has been rated at 10/10')
+ 0)
self.expectOutcome(result=SUCCESS, state_string='RPMBUILD')
return self.runStep()
def test_define_none(self):
self.setupStep(rpmbuild.RpmBuild(specfile="foo.spec", define=None))
self.expectCommands(
ExpectShell(workdir='wkdir', command='rpmbuild --define "_topdir '
'`pwd`" --define "_builddir `pwd`" --define "_rpmdir '
'`pwd`" --define "_sourcedir `pwd`" --define '
'"_specdir `pwd`" --define "_srcrpmdir `pwd`" '
'--define "dist .el6" -ba foo.spec')
+ ExpectShell.log('stdio',
stdout='Your code has been rated at 10/10')
+ 0)
self.expectOutcome(result=SUCCESS, state_string='RPMBUILD')
return self.runStep()
@defer.inlineCallbacks
def test_renderable_dist(self):
self.setupStep(rpmbuild.RpmBuild(specfile="foo.spec",
dist=Interpolate('%(prop:renderable_dist)s')))
self.properties.setProperty('renderable_dist', '.el7', 'test')
self.expectCommands(
ExpectShell(workdir='wkdir', command='rpmbuild --define "_topdir '
'`pwd`" --define "_builddir `pwd`" --define "_rpmdir '
'`pwd`" --define "_sourcedir `pwd`" --define "_specdir '
'`pwd`" --define "_srcrpmdir `pwd`" --define "dist .el7" '
'-ba foo.spec')
+ ExpectShell.log('stdio',
stdout='lalala')
+ 0)
self.expectOutcome(result=SUCCESS, state_string='RPMBUILD')
yield self.runStep()
| anish/buildbot | master/buildbot/test/unit/test_steps_package_rpm_rpmbuild.py | Python | gpl-2.0 | 5,421 | 0.001476 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyMarkupsafe(PythonPackage):
"""MarkupSafe is a library for Python that implements a unicode
string that is aware of HTML escaping rules and can be used to
implement automatic string escaping. It is used by Jinja 2, the
Mako templating engine, the Pylons web framework and many more."""
homepage = "http://www.pocoo.org/projects/markupsafe/"
url = "https://pypi.io/packages/source/M/MarkupSafe/MarkupSafe-1.0.tar.gz"
import_modules = ['markupsafe']
version('1.0', '2fcedc9284d50e577b5192e8e3578355')
version('0.23', 'f5ab3deee4c37cd6a922fb81e730da6e')
version('0.22', 'cb3ec29fd5361add24cfd0c6e2953b3e')
version('0.21', 'fde838d9337fa51744283f46a1db2e74')
version('0.20', '7da066d9cb191a70aa85d0a3d43565d1')
version('0.19', 'ccb3f746c807c5500850987006854a6d')
depends_on('py-setuptools', type='build')
| wscullin/spack | var/spack/repos/builtin/packages/py-markupsafe/package.py | Python | lgpl-2.1 | 2,130 | 0.000939 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2018 Célande Adrien
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
from weboob.capabilities.bill import DocumentTypes, Subscription, Document
from weboob.browser.pages import LoggedPage, HTMLPage
from weboob.browser.filters.standard import CleanText, Regexp, Env, Date, Format, Field
from weboob.browser.filters.html import Link, Attr, TableCell
from weboob.browser.elements import ListElement, ItemElement, method, TableElement
class SubscriptionPage(LoggedPage, HTMLPage):
# because of freaking JS from hell
STATEMENT_TYPES = ('RCE', 'RPT', 'RCO')
@method
class iter_subscriptions(ListElement):
item_xpath = '//select[@id="compte"]/option'
class item(ItemElement):
klass = Subscription
obj_id = Regexp(Attr('.', 'value'), r'\w-(\w+)')
obj_label = CleanText('.')
obj_subscriber = Env('subscriber')
@method
class iter_documents(ListElement):
def condition(self):
return not (
CleanText('//p[contains(text(), "est actuellement indisponible")]')(self)
or CleanText('//p[contains(text(), "Aucun e-Relevé n\'est disponible")]')(self)
)
item_xpath = '//ul[contains(@class, "liste-cpte")]/li'
# you can have twice the same statement: same month, same subscription
ignore_duplicate = True
class item(ItemElement):
klass = Document
obj_id = Format('%s_%s%s', Env('sub_id'), Regexp(CleanText('.//a/@title'), r' (\d{2}) '), CleanText('.//span[contains(@class, "date")]' ,symbols='/'))
obj_label = Format('%s - %s', CleanText('.//span[contains(@class, "lib")]'), CleanText('.//span[contains(@class, "date")]'))
obj_url = Format('/voscomptes/canalXHTML/relevePdf/relevePdf_historique/%s', Link('./a'))
obj_format = 'pdf'
obj_type = DocumentTypes.OTHER
def obj_date(self):
date = CleanText('.//span[contains(@class, "date")]')(self)
m = re.search(r'(\d{2}/\d{2}/\d{4})', date)
if m:
return Date(CleanText('.//span[contains(@class, "date")]'), dayfirst=True)(self)
else:
return Date(
Format(
'%s/%s',
Regexp(CleanText('.//a/@title'), r' (\d{2}) '),
CleanText('.//span[contains(@class, "date")]')
),
dayfirst=True
)(self)
def get_params(self, sub_label):
# the id is in the label
sub_value = Attr('//select[@id="compte"]/option[contains(text(), "%s")]' % sub_label, 'value')(self.doc)
form = self.get_form(name='formulaireHistorique')
form['formulaire.numeroCompteRecherche'] = sub_value
return form
def get_years(self):
return self.doc.xpath('//select[@id="annee"]/option/@value')
def has_error(self):
return (
CleanText('//p[contains(text(), "est actuellement indisponible")]')(self.doc)
or CleanText('//p[contains(text(), "Aucun e-Relevé n\'est disponible")]')(self.doc)
)
class DownloadPage(LoggedPage, HTMLPage):
def get_content(self):
if self.doc.xpath('//iframe'):
# the url has the form
# ../relevePdf_telechargement/affichagePDF-telechargementPDF.ea?date=XXX
part_link = Attr('//iframe', 'src')(self.doc).replace('..', '')
return self.browser.open('/voscomptes/canalXHTML/relevePdf%s' % part_link).content
return self.content
class ProSubscriptionPage(LoggedPage, HTMLPage):
@method
class iter_subscriptions(ListElement):
item_xpath = '//select[@id="numeroCompteRechercher"]/option[not(@disabled)]'
class item(ItemElement):
klass = Subscription
obj_label = CleanText('.')
obj_id = Regexp(Field('label'), r'\w? ?- (\w+)')
obj_subscriber = Env('subscriber')
obj__number = Attr('.', 'value')
@method
class iter_documents(TableElement):
item_xpath = '//table[@id="relevesPDF"]//tr[td]'
head_xpath = '//table[@id="relevesPDF"]//th'
# may have twice the same statement for a given month
ignore_duplicate = True
col_date = re.compile('Date du relevé')
col_label = re.compile('Type de document')
class item(ItemElement):
klass = Document
obj_date = Date(CleanText(TableCell('date')), dayfirst=True)
obj_label = Format('%s %s', CleanText(TableCell('label')), CleanText(TableCell('date')))
obj_id = Format('%s_%s', Env('sub_id'), CleanText(TableCell('date'), symbols='/'))
# the url uses an id depending on the page where the document is
# by example, if the id is 0,
# it means that it is the first document that you can find
# on the page of the year XXX for the subscription YYYY
obj_url = Link('.//a')
obj_format = 'pdf'
obj_type = DocumentTypes.OTHER
def submit_form(self, sub_number, year):
form = self.get_form(name='formRechHisto')
form['historiqueReleveParametre.numeroCompteRecherche'] = sub_number
form['typeRecherche'] = 'annee'
form['anneeRechercheDefaut'] = year
form.submit()
def get_years(self):
return self.doc.xpath('//select[@name="anneeRechercheDefaut"]/option/@value')
def no_statement(self):
return self.doc.xpath('//p[has-class("noresult")]')
def has_document(self, date):
return self.doc.xpath('//td[@headers="dateReleve" and contains(text(), "%s")]' % date.strftime('%d/%m/%Y'))
def get_sub_number(self, doc_id):
sub_id = doc_id.split('_')[0]
return Attr('//select[@id="numeroCompteRechercher"]/option[contains(text(), "%s")]' % sub_id, 'value')(self.doc)
| vicnet/weboob | modules/bp/pages/subscription.py | Python | lgpl-3.0 | 6,761 | 0.003108 |
from base import BaseHandler
from functions import *
from models import User
class SignupHandler(BaseHandler):
"""Sign up handler that is used to signup users."""
def get(self):
self.render("signup.html")
def post(self):
error = False
self.username = self.request.get("username")
self.password = self.request.get("password")
self.password_check = self.request.get("password_check")
self.email = self.request.get("email")
template_vars = dict(username=self.username,
email=self.email)
if not valid_username(self.username):
template_vars['error_username'] = "That's not a valid username."
error = True
elif User.by_username(self.username):
template_vars['error_username'] = "This username already exists."
error = True
if not valid_password(self.password):
template_vars['error_password'] = "That wasn't a valid password."
error = True
elif self.password != self.password_check:
template_vars['error_check'] = "Your passwords didn't match."
error = True
if not valid_email(self.email):
template_vars['error_email'] = "That's not a valid email."
error = True
if error:
self.render('signup.html', **template_vars)
else:
u = User.register(self.username,
self.password,
self.email)
u.put()
self.login(u)
self.redirect('/?')
| kevink1986/my-first-blog | handlers/signup.py | Python | apache-2.0 | 1,621 | 0 |
"""
The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
algorithm.
"""
from .ball_tree import BallTree
from .kd_tree import KDTree
from .dist_metrics import DistanceMetric
from .graph import kneighbors_graph, radius_neighbors_graph
from .unsupervised import NearestNeighbors
from .classification import KNeighborsClassifier, RadiusNeighborsClassifier
from .regression import KNeighborsRegressor, RadiusNeighborsRegressor
from .nearest_centroid import NearestCentroid
from .kde import KernelDensity
from .lof import LocalOutlierFactor
from .nca import NeighborhoodComponentsAnalysis
from .base import VALID_METRICS, VALID_METRICS_SPARSE
__all__ = ['BallTree',
'DistanceMetric',
'KDTree',
'KNeighborsClassifier',
'KNeighborsRegressor',
'NearestCentroid',
'NearestNeighbors',
'RadiusNeighborsClassifier',
'RadiusNeighborsRegressor',
'kneighbors_graph',
'radius_neighbors_graph',
'KernelDensity',
'LocalOutlierFactor',
'NeighborhoodComponentsAnalysis',
'VALID_METRICS',
'VALID_METRICS_SPARSE']
| chrsrds/scikit-learn | sklearn/neighbors/__init__.py | Python | bsd-3-clause | 1,176 | 0 |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import multiprocessing
import os
import sys
from collections import defaultdict
from typing import Dict, List, NamedTuple, Optional, Tuple
from rich.console import Console
from tabulate import tabulate
from airflow.utils.helpers import partition
from docs.exts.docs_build import dev_index_generator, lint_checks
from docs.exts.docs_build.code_utils import CONSOLE_WIDTH, PROVIDER_INIT_FILE
from docs.exts.docs_build.docs_builder import DOCS_DIR, AirflowDocsBuilder, get_available_packages
from docs.exts.docs_build.errors import DocBuildError, display_errors_summary
from docs.exts.docs_build.fetch_inventories import fetch_inventories
from docs.exts.docs_build.github_action_utils import with_group
from docs.exts.docs_build.package_filter import process_package_filters
from docs.exts.docs_build.spelling_checks import SpellingError, display_spelling_error_summary
TEXT_RED = '\033[31m'
TEXT_RESET = '\033[0m'
if __name__ not in ("__main__", "__mp_main__"):
raise SystemExit(
"This file is intended to be executed as an executable program. You cannot use it as a module."
"To run this script, run the ./build_docs.py command"
)
CHANNEL_INVITATION = """\
If you need help, write to #documentation channel on Airflow's Slack.
Channel link: https://apache-airflow.slack.com/archives/CJ1LVREHX
Invitation link: https://s.apache.org/airflow-slack\
"""
ERRORS_ELIGIBLE_TO_REBUILD = [
'failed to reach any of the inventories with the following issues',
'undefined label:',
'unknown document:',
]
ON_GITHUB_ACTIONS = os.environ.get('GITHUB_ACTIONS', 'false') == "true"
console = Console(force_terminal=True, color_system="standard", width=CONSOLE_WIDTH)
def _promote_new_flags():
console.print()
console.print("[yellow]Still tired of waiting for documentation to be built?[/]")
console.print()
if ON_GITHUB_ACTIONS:
console.print("You can quickly build documentation locally with just one command.")
console.print(" [blue]./breeze build-docs[/]")
console.print()
console.print("[yellow]Still too slow?[/]")
console.print()
console.print("You can only build one documentation package:")
console.print(" [blue]./breeze build-docs -- --package-filter <PACKAGE-NAME>[/]")
console.print()
console.print("This usually takes from [yellow]20 seconds[/] to [yellow]2 minutes[/].")
console.print()
console.print("You can also use other extra flags to iterate faster:")
console.print(" [blue]--docs-only - Only build documentation[/]")
console.print(" [blue]--spellcheck-only - Only perform spellchecking[/]")
console.print()
console.print("For more info:")
console.print(" [blue]./breeze build-docs --help[/]")
console.print()
def _get_parser():
available_packages_list = " * " + "\n * ".join(get_available_packages())
parser = argparse.ArgumentParser(
description='Builds documentation and runs spell checking',
epilog=f"List of supported documentation packages:\n{available_packages_list}",
)
parser.formatter_class = argparse.RawTextHelpFormatter
parser.add_argument(
'--disable-checks', dest='disable_checks', action='store_true', help='Disables extra checks'
)
parser.add_argument(
"--package-filter",
action="append",
help=(
"Filter specifying for which packages the documentation is to be built. Wildcard are supported."
),
)
parser.add_argument('--docs-only', dest='docs_only', action='store_true', help='Only build documentation')
parser.add_argument(
'--spellcheck-only', dest='spellcheck_only', action='store_true', help='Only perform spellchecking'
)
parser.add_argument(
'--for-production',
dest='for_production',
action='store_true',
help='Builds documentation for official release i.e. all links point to stable version',
)
parser.add_argument(
"-j",
"--jobs",
dest='jobs',
type=int,
default=0,
help=(
"""\
Number of parallel processes that will be spawned to build the docs.
If passed 0, the value will be determined based on the number of CPUs.
"""
),
)
parser.add_argument(
"-v",
"--verbose",
dest='verbose',
action='store_true',
help=(
'Increases the verbosity of the script i.e. always displays a full log of '
'the build process, not just when it encounters errors'
),
)
return parser
class BuildSpecification(NamedTuple):
"""Specification of single build."""
package_name: str
for_production: bool
verbose: bool
class BuildDocsResult(NamedTuple):
"""Result of building documentation."""
package_name: str
log_file_name: str
errors: List[DocBuildError]
class SpellCheckResult(NamedTuple):
"""Result of spellcheck."""
package_name: str
log_file_name: str
errors: List[SpellingError]
def perform_docs_build_for_single_package(build_specification: BuildSpecification) -> BuildDocsResult:
"""Performs single package docs build."""
builder = AirflowDocsBuilder(
package_name=build_specification.package_name, for_production=build_specification.for_production
)
console.print(f"[blue]{build_specification.package_name:60}:[/] Building documentation")
result = BuildDocsResult(
package_name=build_specification.package_name,
errors=builder.build_sphinx_docs(
verbose=build_specification.verbose,
),
log_file_name=builder.log_build_filename,
)
return result
def perform_spell_check_for_single_package(build_specification: BuildSpecification) -> SpellCheckResult:
"""Performs single package spell check."""
builder = AirflowDocsBuilder(
package_name=build_specification.package_name, for_production=build_specification.for_production
)
console.print(f"[blue]{build_specification.package_name:60}:[/] Checking spelling started")
result = SpellCheckResult(
package_name=build_specification.package_name,
errors=builder.check_spelling(
verbose=build_specification.verbose,
),
log_file_name=builder.log_spelling_filename,
)
console.print(f"[blue]{build_specification.package_name:60}:[/] Checking spelling completed")
return result
def build_docs_for_packages(
current_packages: List[str],
docs_only: bool,
spellcheck_only: bool,
for_production: bool,
jobs: int,
verbose: bool,
) -> Tuple[Dict[str, List[DocBuildError]], Dict[str, List[SpellingError]]]:
"""Builds documentation for all packages and combines errors."""
all_build_errors: Dict[str, List[DocBuildError]] = defaultdict(list)
all_spelling_errors: Dict[str, List[SpellingError]] = defaultdict(list)
with with_group("Cleaning documentation files"):
for package_name in current_packages:
console.print(f"[blue]{package_name:60}:[/] Cleaning files")
builder = AirflowDocsBuilder(package_name=package_name, for_production=for_production)
builder.clean_files()
if jobs > 1:
run_in_parallel(
all_build_errors,
all_spelling_errors,
current_packages,
docs_only,
for_production,
jobs,
spellcheck_only,
verbose,
)
else:
run_sequentially(
all_build_errors,
all_spelling_errors,
current_packages,
docs_only,
for_production,
spellcheck_only,
verbose,
)
return all_build_errors, all_spelling_errors
def run_sequentially(
all_build_errors,
all_spelling_errors,
current_packages,
docs_only,
for_production,
spellcheck_only,
verbose,
):
"""Run both - spellcheck and docs build sequentially without multiprocessing"""
if not spellcheck_only:
for package_name in current_packages:
build_result = perform_docs_build_for_single_package(
build_specification=BuildSpecification(
package_name=package_name,
for_production=for_production,
verbose=verbose,
)
)
if build_result.errors:
all_build_errors[package_name].extend(build_result.errors)
print_build_output(build_result)
if not docs_only:
for package_name in current_packages:
spellcheck_result = perform_spell_check_for_single_package(
build_specification=BuildSpecification(
package_name=package_name,
for_production=for_production,
verbose=verbose,
)
)
if spellcheck_result.errors:
all_spelling_errors[package_name].extend(spellcheck_result.errors)
print_spelling_output(spellcheck_result)
def run_in_parallel(
all_build_errors,
all_spelling_errors,
current_packages,
docs_only,
for_production,
jobs,
spellcheck_only,
verbose,
):
"""Run both - spellcheck and docs build sequentially without multiprocessing"""
with multiprocessing.Pool(processes=jobs) as pool:
if not spellcheck_only:
run_docs_build_in_parallel(
all_build_errors=all_build_errors,
for_production=for_production,
current_packages=current_packages,
verbose=verbose,
pool=pool,
)
if not docs_only:
run_spell_check_in_parallel(
all_spelling_errors=all_spelling_errors,
for_production=for_production,
current_packages=current_packages,
verbose=verbose,
pool=pool,
)
def print_build_output(result: BuildDocsResult):
"""Prints output of docs build job."""
with with_group(f"{TEXT_RED}Output for documentation build {result.package_name}{TEXT_RESET}"):
console.print()
console.print(f"[blue]{result.package_name:60}: " + "#" * 80)
with open(result.log_file_name) as output:
for line in output.read().splitlines():
console.print(f"{result.package_name:60} {line}")
console.print(f"[blue]{result.package_name:60}: " + "#" * 80)
def run_docs_build_in_parallel(
all_build_errors: Dict[str, List[DocBuildError]],
for_production: bool,
current_packages: List[str],
verbose: bool,
pool,
):
"""Runs documentation building in parallel."""
doc_build_specifications: List[BuildSpecification] = []
with with_group("Scheduling documentation to build"):
for package_name in current_packages:
console.print(f"[blue]{package_name:60}:[/] Scheduling documentation to build")
doc_build_specifications.append(
BuildSpecification(
package_name=package_name,
for_production=for_production,
verbose=verbose,
)
)
with with_group("Running docs building"):
console.print()
result_list = pool.map(perform_docs_build_for_single_package, doc_build_specifications)
for result in result_list:
if result.errors:
all_build_errors[result.package_name].extend(result.errors)
print_build_output(result)
def print_spelling_output(result: SpellCheckResult):
"""Prints output of spell check job."""
with with_group(f"{TEXT_RED}Output for spelling check: {result.package_name}{TEXT_RESET}"):
console.print()
console.print(f"[blue]{result.package_name:60}: " + "#" * 80)
with open(result.log_file_name) as output:
for line in output.read().splitlines():
console.print(f"{result.package_name:60} {line}")
console.print(f"[blue]{result.package_name:60}: " + "#" * 80)
console.print()
def run_spell_check_in_parallel(
all_spelling_errors: Dict[str, List[SpellingError]],
for_production: bool,
current_packages: List[str],
verbose: bool,
pool,
):
"""Runs spell check in parallel."""
spell_check_specifications: List[BuildSpecification] = []
with with_group("Scheduling spell checking of documentation"):
for package_name in current_packages:
console.print(f"[blue]{package_name:60}:[/] Scheduling spellchecking")
spell_check_specifications.append(
BuildSpecification(package_name=package_name, for_production=for_production, verbose=verbose)
)
with with_group("Running spell checking of documentation"):
console.print()
result_list = pool.map(perform_spell_check_for_single_package, spell_check_specifications)
for result in result_list:
if result.errors:
all_spelling_errors[result.package_name].extend(result.errors)
print_spelling_output(result)
def display_packages_summary(
build_errors: Dict[str, List[DocBuildError]], spelling_errors: Dict[str, List[SpellingError]]
):
"""Displays a summary that contains information on the number of errors in each packages"""
packages_names = {*build_errors.keys(), *spelling_errors.keys()}
tabular_data = [
{
"Package name": f"[blue]{package_name}[/]",
"Count of doc build errors": len(build_errors.get(package_name, [])),
"Count of spelling errors": len(spelling_errors.get(package_name, [])),
}
for package_name in sorted(packages_names, key=lambda k: k or '')
]
console.print("#" * 20, " Packages errors summary ", "#" * 20)
console.print(tabulate(tabular_data=tabular_data, headers="keys"))
console.print("#" * 50)
def print_build_errors_and_exit(
build_errors: Dict[str, List[DocBuildError]],
spelling_errors: Dict[str, List[SpellingError]],
) -> None:
"""Prints build errors and exists."""
if build_errors or spelling_errors:
if build_errors:
display_errors_summary(build_errors)
console.print()
if spelling_errors:
display_spelling_error_summary(spelling_errors)
console.print()
console.print("The documentation has errors.")
display_packages_summary(build_errors, spelling_errors)
console.print()
console.print(CHANNEL_INVITATION)
sys.exit(1)
else:
console.print("[green]Documentation build is successful[/]")
def main():
"""Main code"""
args = _get_parser().parse_args()
available_packages = get_available_packages()
docs_only = args.docs_only
spellcheck_only = args.spellcheck_only
disable_checks = args.disable_checks
package_filters = args.package_filter
for_production = args.for_production
with with_group("Available packages"):
for pkg in sorted(available_packages):
console.print(f" - {pkg}")
if package_filters:
console.print("Current package filters: ", package_filters)
current_packages = process_package_filters(available_packages, package_filters)
with with_group("Fetching inventories"):
# Inventories that could not be retrieved should be built first. This may mean this is a
# new package.
packages_without_inventories = fetch_inventories()
normal_packages, priority_packages = partition(
lambda d: d in packages_without_inventories, current_packages
)
normal_packages, priority_packages = list(normal_packages), list(priority_packages)
jobs = args.jobs if args.jobs != 0 else os.cpu_count()
with with_group(
f"Documentation will be built for {len(current_packages)} package(s) with {jobs} parallel jobs"
):
for pkg_no, pkg in enumerate(current_packages, start=1):
console.print(f"{pkg_no}. {pkg}")
all_build_errors: Dict[Optional[str], List[DocBuildError]] = {}
all_spelling_errors: Dict[Optional[str], List[SpellingError]] = {}
if priority_packages:
# Build priority packages
package_build_errors, package_spelling_errors = build_docs_for_packages(
current_packages=priority_packages,
docs_only=docs_only,
spellcheck_only=spellcheck_only,
for_production=for_production,
jobs=jobs,
verbose=args.verbose,
)
if package_build_errors:
all_build_errors.update(package_build_errors)
if package_spelling_errors:
all_spelling_errors.update(package_spelling_errors)
# Build normal packages
# If only one inventory is missing, the remaining packages are correct. If we are missing
# two or more inventories, it is better to try to build for all packages as the previous packages
# may have failed as well.
package_build_errors, package_spelling_errors = build_docs_for_packages(
current_packages=current_packages if len(priority_packages) > 1 else normal_packages,
docs_only=docs_only,
spellcheck_only=spellcheck_only,
for_production=for_production,
jobs=jobs,
verbose=args.verbose,
)
if package_build_errors:
all_build_errors.update(package_build_errors)
if package_spelling_errors:
all_spelling_errors.update(package_spelling_errors)
# Build documentation for some packages again if it can help them.
to_retry_packages = [
package_name
for package_name, errors in package_build_errors.items()
if any(any((m in e.message) for m in ERRORS_ELIGIBLE_TO_REBUILD) for e in errors)
]
if to_retry_packages:
for package_name in to_retry_packages:
if package_name in all_build_errors:
del all_build_errors[package_name]
if package_name in all_spelling_errors:
del all_spelling_errors[package_name]
package_build_errors, package_spelling_errors = build_docs_for_packages(
current_packages=to_retry_packages,
docs_only=docs_only,
spellcheck_only=spellcheck_only,
for_production=for_production,
jobs=jobs,
verbose=args.verbose,
)
if package_build_errors:
all_build_errors.update(package_build_errors)
if package_spelling_errors:
all_spelling_errors.update(package_spelling_errors)
if not disable_checks:
general_errors = lint_checks.run_all_check()
if general_errors:
all_build_errors[None] = general_errors
dev_index_generator.generate_index(f"{DOCS_DIR}/_build/index.html")
if not package_filters:
_promote_new_flags()
if os.path.exists(PROVIDER_INIT_FILE):
os.remove(PROVIDER_INIT_FILE)
print_build_errors_and_exit(
all_build_errors,
all_spelling_errors,
)
if __name__ == "__main__":
main()
| dhuang/incubator-airflow | docs/build_docs.py | Python | apache-2.0 | 19,953 | 0.002356 |
# from fileios import *
# msg = 'Enter Absolute Path to file: '
# f_name = raw_input(msg).strip()
#
# path = file_data_and_path(f_name)
# if path != None:
# print 'Path:',path
# from Tkinter import Tk
# from tkFileDialog import askopenfilename
#
# Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
# filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
# print(filename)
# def get_filename(file_type):
# while True:
# print('enter ' + file_type + ' filename: ')
# filename = input()
# print(filename)
# try:
# with open(filename, 'r') as f:
# my_file = f.read()
# return my_file
# except FileNotFoundError:
# print('No such file. Check file name and path and try again.')
#
#
# x = get_filename('TEMPLATE')
# print(x)
# -*- coding: utf-8 -*-
"""To add test methods.
"""
# from time import sleep
# from halo import Halo
# from time import time
#
# def rocket_launch():
# #spinner = Halo({'spinner': 'shark'})
# spinner = Halo({
# 'spinner': {
# 'interval': 100,
# 'frames': ['-', '\\', '|', '/', '-']
# }
# })
# spinner.start()
# while(1):
# spinner.text = 'Running... Time Elapsed: {} seconds'.format(time())
# sleep(10)
# break
# spinner.succeed('Rocket launched')
#
# rocket_launch()
from antpcf import *
# bins = np.arange(0.01, 0.201, 0.01)
# atpcf('/Users/rohin/Downloads/DR7-Full.ascii', bins, randfile='/Users/rohin/Downloads/random-DR7-Ful.ascii',permetric='apzdth', parmetric='apdz', weights=True)
# tpcf('/Users/rohin/Downloads/DR3-ns.ascii',bins,randfile='/Users/rohin/Downloads/random-DR3-ns.ascii',weights=True)
# def pmethod():
bins = np.arange(0.002, 0.06, 0.002)
correl = tpcf('./testw.dat',bins,randfile='./testw.dat',weights=True)
# return correl
# pool = multiprocessing.Pool(processes=ncount)
# correl = pool.map(pmethod)
# print correl
# atpcf('./testw.dat',bins,randfile='./testw.dat',permetric='apzdth',parmetric='apdz',method='ls',weights=True)
# blha=readfitsfile('/Users/rohin/Documents/ipy_notebooks/galsurveystudy/input/galaxy_DR12v5_CMASS_North.fits','data')
# dr12gcmn, weights = datprep('/Users/rohin/Documents/ipy_notebooks/galsurveystudy/input/galaxy_DR12v5_LOWZ_South.fits','data','lcdm')
# dat = readfitsfile('/Users/rohin/Documents/ipy_notebooks/galsurveystudy/input/galaxy_DR12v5_LOWZ_South.fits','data')
# weights = dat['WEIGHT_SYSTOT']
# import pyfits
# dpy = pyfits.open('/Users/rohin/Documents/ipy_notebooks/galsurveystudy/input/galaxy_DR12v5_LOWZ_South.fits')
# dpyd = dpy[1].data
# wts = dpyd['WEIGHT_SYSTOT']
# print(wts)
# print(min(wts))
# print(max(wts))
# print(dr12gcmn)
# print(weights)
# print (min(weights))
# print(max(weights))
# dr12gls=tpcf('/Users/rohin/Documents/ipy_notebooks/galsurveystudy/input/galaxy_DR12v5_LOWZ_South.fits',bins,randfile='/Users/rohin/Downloads/random0_DR12v5_LOWZ_South.fits',weights=True)
# Planck run with changed parameters in param.py
# corrdr3milne = tpcf('/Users/rohin/Downloads/DR3-ns.ascii', bins, randfile='/Users/rohin/Downloads/random-DR3-ns.ascii', weights=True, geometry='open', cosmology='lc')
# corrdr3milne = tpcf('/Users/rohin/Downloads/DR3-ns.ascii', bins, weights=True, mask='/Users/rohin/Documents/ipy_notebooks/galsurveystudy/masks/window.dr72safe0.ply')
# corrdr3milne = tpcf('./testw.dat', bins, weights=True, mask='/Users/rohin/Documents/ipy_notebooks/galsurveystudy/masks/window.dr72safe0.ply')
| rohinkumar/correlcalc | correlcalc/test.py | Python | mit | 3,556 | 0.003375 |
[] = c
y = []
for [] in x:
BLOCK
[] = []
| zrax/pycdc | tests/input/unpack_empty.py | Python | gpl-3.0 | 45 | 0 |
from django.contrib.auth.models import User
from selectable.base import ModelLookup
from selectable.registry import registry
class UserLookup(ModelLookup):
model = User
search_fields = (
'username__icontains',
'first_name__icontains',
'last_name__icontains',
)
filters = {'is_active': True, }
def get_item_value(self, item):
# Display for currently selected item
return item.get_full_name()
def get_item_label(self, item):
# Display for choice listings
return u"%s (%s)" % (item.username, item.get_full_name())
registry.register(UserLookup) | seanherron/data-inventory | inventory_project/datasets/lookups.py | Python | mit | 625 | 0.0032 |
#!/usr/bin/python
#coding:utf-8
import os
import sys
import re
def usage():
help_info="Usage: %s <recinfo_file> <sendinfo_file>" % sys.argv[0]
print help_info
def main():
try:
recinfo_file=sys.argv[1]
sendinfo_file=sys.argv[2]
except:
usage()
sys.exit(-1)
if not os.path.exists(recinfo_file):
print "ERROR: recinfo_file does not exists!"
usage()
sys.exit(-1)
if not os.path.exists(sendinfo_file):
print "ERROR: recinfo_file does not exists!"
usage()
sys.exit(-1)
delays = []
cnt = 0
with open(sendinfo_file, 'r') as sf:
sinfo = sf.read()
with open(recinfo_file, 'r') as rf:
rl = rf.readline()
while True:
rl = rf.readline()
if not rl: break
if re.search('#', rl): continue
rl_list = rl.split()
if rl_list[1] == '0': continue
pattern = rl_list[0] + ".*?\n"
result = re.search(pattern, sinfo)
if result:
sl = result.group()
sl_list = sl.split()
delay_time = int(rl_list[3]) - int(sl_list[3])
if delay_time == 0:
print rl_list[0]
delays.append(delay_time)
print(delays)
print "rec number:%d" % len(delays)
print "rec delay max :%d" % max(delays)
print "rec delay min :%d" % min(delays)
print "rec delay avg:%.2f" % (sum(delays)/float(len(delays)),)
if __name__ == "__main__":
sys.exit(main())
| melon-li/tools | netem/statdata/statdelay.py | Python | apache-2.0 | 1,581 | 0.01265 |
#http://www.thelatinlibrary.com/gestafrancorum.html
#prose
import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
# functions are mostly made by Sarah Otts
def add_to_database(verse_entries, db):
logger.info("Adding {} entries to the database".format(len(verse_entries)))
curs = db.cursor()
curs.execute("DELETE FROM texts WHERE author='Gesta Francorum'")
for i, v in enumerate(verse_entries):
data = curs.execute("SELECT * FROM texts")
curs.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, v["title"], v["book"], "Latin", v["author"], v["date"], v["chapter"], v["verse"],
v["text"], v["link"], "prose"))
def add_entry_to_list(entries, title, book, author, date, chapter, verse, text, txturl):
entry_dict = {"title": title, "book": book, "author": author, "date": date, "chapter": chapter, "verse": verse,
"text": text, "link": txturl}
entries.append(entry_dict)
def get_verses(soup):
# if there's nothing in the paragraph, return an empty array
if len(soup.contents) == 0:
return None
para_text = soup.get_text()
verses = re.split('\[?[0-9]+[A-Z]?\]?|\[[ivx]+\]',
para_text) # "[x]" can contain arabic numerals, lower case roman numerals, or upper case letters
verses = [re.sub(r'^\s+', '', v) for v in verses] # remove whitespace
verses = [re.sub(r'^\n', '', v) for v in verses] # remove \n
verses = filter(lambda x: len(x) > 0, verses)
verses = [v for v in verses]
# print verses
return verses
def get_name_and_author_of_book(soup, url):
# attempt to get it from the page title
# print soup
pagetitle = soup.title.string
split_title = pagetitle.split(":")
if len(split_title) >= 2:
book = split_title[-1]
# if that doesn't work, get the author from the page title and the
else:
book = soup.p.br.next_sibling
# remove any surrounding spaces
book = re.sub(r'^\s+|\s+$|\n', '', book)
author = "Anonymous" #Gesta Francorum has an anonymous author.
return [book, author]
def get_title_and_date(soup):
title_soup = soup.find('h1')
title = ""
date = ""
if title_soup != None:
title = title_soup.string
else:
pagehead = soup.find('p', class_="pagehead")
if (pagehead is not None):
title = pagehead.find(text=True)
if (pagehead.find('span') is not None):
date = pagehead.find('span').string.replace("(", '').replace(")", '')
else:
h1 = soup.find('h1')
title = h1.string
if date is None or date == "":
date_tag = soup.find('h2', class_='date')
if (not date_tag is None):
date = date_tag.find(text=True).replace('(', '').replace(')', '')
else:
date = ""
date = date.replace(u"\u2013", '-')
title = title.upper()
return [title, date]
def main():
# collection name: Gesta Francorum
gestaURL = 'http://www.thelatinlibrary.com/gestafrancorum.html'
siteURL = 'http://www.thelatinlibrary.com'
gestaMain = urllib.request.urlopen(gestaURL)
soup = BeautifulSoup(gestaMain, "html5lib")
textsUrl = []
# search through soup for prose and links
for a in soup.find_all('a', href=True):
link = a['href']
textsUrl.append("{}/{}".format(siteURL, a['href']))
# remove some unnecessary urls
while ("http://www.thelatinlibrary.com/index.html" in textsUrl):
textsUrl.remove("http://www.thelatinlibrary.com/index.html")
textsUrl.remove("http://www.thelatinlibrary.com/classics.html")
textsUrl.remove("http://www.thelatinlibrary.com/medieval.html")
logger.info("\n".join(textsUrl))
# extract data
# get titles of this collection
title_dict_ges, date_dict_ges = get_title_and_date(soup)
verses = []
for u in textsUrl:
uURL = urllib.request.urlopen(u)
soup = BeautifulSoup(uURL, "html5lib") # check pep 8 for file/fuction name
book, author = get_name_and_author_of_book(soup, uURL)
date = date_dict_ges
# go through text to find chapters
para = soup.findAll('p')[:-1]
chapter = "1" #Note that chapters aren't integers.
verse = 0
text = ""
for p in para:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
chap_found = False
# in other books, chapters are bold or italicized
potential_chap = p.find('b')
if potential_chap is not None:
chapter = potential_chap.find(text=True)
# Include italicized part in chap name
italic = potential_chap.i
if italic is not None:
chapter += italic.string
chapter = chapter.replace("\n", "")
chapter = chapter.replace(u'\xa0', '')
#Note: Some chapters have Roman numerals as part of the chapter name.
#e.g. Roman numeral is only part of the string and is not capitalized. Needs fixing.
chapnum = {'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X',
'XI', 'XII', 'XIII', 'XIV', 'XV', 'XVI'}
if chapter in chapnum:
chapter = chapter.upper() #Roman numerals need to be uppercase
else:
chapter = chapter.title()
verse = 0
continue
# go through text to find verses
if (get_verses(p)):
for i, t in enumerate(get_verses(p)):
verse += 1
text = t
# text = unicode.encode(text, errors="ignore")
# add the entry
add_entry_to_list(verses, title_dict_ges, book, author, date, chapter, verse, text, u)
with sqlite3.connect('texts.db') as db:
# open cursor
curs = db.cursor()
# create the database if it doesn't already exist
curs.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
db.commit()
# put it all in the db
add_to_database(verses,db)
db.commit()
logger.info("Process finished")
if __name__ == '__main__':
main() #do thsi dunder thing for everything else
| oudalab/phyllo | phyllo/extractors/gestafrancDB.py | Python | apache-2.0 | 6,972 | 0.005164 |
# Copywrite © 2017 Joe Rogge, Jacob Gasyna and Adele Rehkemper
#This file is part of Rhythm Trainer Pro. Rhythm Trainer Pro is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version. Rhythm Trainer Pro is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details. You should have received a copy of the GNU General Public License
#along with Rhythm Trainer Pro. If not, see <http://www.gnu.org/licenses/>.
from Tkinter import *
from PIL import Image, ImageTk
def commandTest():
print "test"
#def makeButtons(sidebar):
#""" place all the buttons in the sidebar"""
#TODO add commands for each button
root = Tk()
screenWidth = root.winfo_screenwidth()
screenHeight = root.winfo_screenheight()
root.geometry('%dx%d+%d+%d' % (screenWidth, screenHeight, 0, 0))
# main area
mainarea = Canvas(root, bg='#FFA', width=screenWidth-200)
mainarea.pack(expand=True, fill='both', side='right')
# sidebar
sidebar = Frame(root, bg='#FFF', width=200)
sidebar.pack(expand=False, fill='both', side='left', anchor='nw')
# make buttons
wnPath = 'WholeNoteicon.png'
wnImage = Image.open(wnPath)
wn = ImageTk.PhotoImage(wnImage)
wnButton = Button(sidebar, image=wn, border=0)
wnButton.place(x=25, y=0)
wnRestPath = 'WholeResticon.png'
wnRestImage = Image.open(wnRestPath)
wnRest = ImageTk.PhotoImage(wnRestImage)
wnRestButton = Button(sidebar, image=wnRest, border=0)
wnRestButton.place(x=100, y=0)
#hnPath = 'HalfNoteicon.png'
#hnImage = Image.open(hnPath)
#hn = ImageTk.PhotoImage(hnImage)
#hnButton = Button(sidebar, image=hn, border=0)
#hnButton.place(x=25, y=0)
hnRestPath = 'HalfResticon.png'
hnRestImage = Image.open(hnRestPath)
hnRest = ImageTk.PhotoImage(hnRestImage)
hnRestButton = Button(sidebar, image=hnRest, border=0)
hnRestButton.place(x=100, y=75)
qnPath = 'QuarterNoteicon.png'
qnImage = Image.open(qnPath)
qn = ImageTk.PhotoImage(qnImage)
qnButton = Button(sidebar, image=qn, border=0)
qnButton.place(x=25, y=150)
qnRestPath = 'QuarterResticon.png'
qnRestImage = Image.open(qnRestPath)
qnRest = ImageTk.PhotoImage(qnRestImage)
qnRestButton = Button(sidebar, image=qnRest, border=0)
qnRestButton.place(x=100, y=150)
#enPath = 'EighthNoteicon.png'
#enImage = Image.open(enPath)
#en = ImageTk.PhotoImage(enImage)
#enButton = Button(sidebar, image=en, border=0)
#enButton.place(x=25, y=150)
#enRestPath = 'EighthResticon.png'
#enRestImage = Image.open(enRestPath)
#enRest = ImageTk.PhotoImage(enRestImage)
#enRestButton = Button(sidebar, image=enRest, border=0)
#enRestButton.place(x=100, y=150)
snPath = 'SixteenthNoteicon.png'
snImage = Image.open(snPath)
sn = ImageTk.PhotoImage(snImage)
snButton = Button(sidebar, image=sn, border=0)
snButton.place(x=25, y=225)
snRestPath = 'SixteenthResticon.png'
snRestImage = Image.open(snRestPath)
snRest = ImageTk.PhotoImage(snRestImage)
snRestButton = Button(sidebar, image=snRest, border=0)
snRestButton.place(x=100, y=225)
if __name__ == '__main__':
root.mainloop()
| jacobgasyna/Hackathon2017 | basics.py | Python | gpl-3.0 | 3,296 | 0.011229 |
import json
import logging
from logging.config import dictConfig
import threading
import pickle
import redis
import aws
from settings import Settings
def terminate_worker(worker_id, instance, client):
result = aws.terminate_machine(instance)
if result is None or len(result) == 0:
logging.error('could not remove worker %s, remove manually!' % instance)
client.delete(worker_id)
class Consuela(threading.Thread):
""" Manages the termination of machines """
def __init__(self):
with open('logging.json') as jl:
dictConfig(json.load(jl))
logging.info('Consuela: Starting.')
threading.Thread.__init__(self)
self.daemon = True
self.settings = Settings()
self.client = redis.Redis('db')
self.job_pub_sub = self.client.pubsub()
self.job_pub_sub.subscribe(['jobs'])
def run(self):
for item in self.job_pub_sub.listen():
job_id = item['data']
if job_id == 'KILL':
self.job_pub_sub.unsubscribe()
logging.info('Consuela: Stopping.')
return
#
worker_id, worker = self.get_worker(job_id)
if worker and self.client.exists(job_id):
job = pickle.loads(self.client.get(job_id))
if job.state == 'finished' and worker.instance is not None:
if not self.settings.recycle_workers:
logging.info('recycle workers off, %s finished, shutting down machine' % worker.instance)
terminate_worker(worker_id, worker.instance, self.client)
else:
if self.recycle_worker(job_id, job):
logging.info('going to recycle worker %s' % worker.instance)
worker.job_id = None
self.client.set(worker_id, pickle.dumps(worker))
else:
logging.info('no work left for %s, shutting down machine' % worker.instance)
terminate_worker(worker_id, worker.instance, self.client)
elif job.state == 'failed' and worker.instance is not None:
logging.warning('%s finished with failure' % job_id)
if self.settings.auto_remove_failed and not self.settings.recycle_workers:
logging.info('auto-remove on failure enabled, trying to remove %s' % worker.instance)
terminate_worker(worker_id, worker.instance, self.client)
else:
logging.warning('auto-remove on failure not performed, manually remove %s!' % worker.instance)
elif job.state == 'broken' and worker.instance is not None:
logging.info('Terminating worker with a broken job.')
terminate_worker(worker_id, worker.instance, self.client)
job.state = 'failed'
self.client.set(job_id, pickle.dumps(job))
elif worker_id and worker and worker.instance:
terminate_worker(worker_id, worker.instance, self.client)
else:
logging.debug('no worker found for %s' % job_id)
def get_worker(self, job_id):
for worker_id in [worker_key for worker_key in self.client.keys() if worker_key.startswith('jm-')]: # Redis keys(pattern='*') does not filter at all.
pickled_worker = self.client.get(worker_id)
if pickled_worker is None:
continue
worker = pickle.loads(pickled_worker)
if worker.job_id is not None and worker.job_id == job_id:
return worker_id, worker
return None, None
def recycle_worker(self, job_id, job):
if job.batch_id is None or not self.client.exists(job.batch_id):
logging.info('could not find a "real" batch id for %s' % job.batch_id)
return False
batch = pickle.loads(self.client.get(job.batch_id))
for batch_job_id in pickle.loads(batch.jobs):
logging.debug('have job %s in batch %s' % (batch_job_id, job.batch_id))
if batch_job_id != job_id:
logging.debug('found other job in batch, checking state')
if self.client.exists(batch_job_id):
batch_job = pickle.loads(self.client.get(batch_job_id))
logging.debug('state is %s (for %s)' % (batch_job.state, batch_job_id))
if batch_job.state == 'spawned' or batch_job.state == 'received' or batch_job.state == 'delayed':
return True
return False
| witlox/dcs | controller/ilm/consuela.py | Python | gpl-2.0 | 4,723 | 0.003176 |
from typing import Dict
from urllib.parse import quote
def request_path(env: Dict):
return quote('/' + env.get('PATH_INFO', '').lstrip('/'))
| bugsnag/bugsnag-python | bugsnag/wsgi/__init__.py | Python | mit | 147 | 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import openerp
import openerp.tools as tools
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.safe_eval import safe_eval as eval
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
class mail_group(osv.Model):
""" A mail_group is a collection of users sharing messages in a discussion
group. The group mechanics are based on the followers. """
_description = 'Discussion group'
_name = 'mail.group'
_mail_flat_thread = False
_inherit = ['mail.thread']
_inherits = {'mail.alias': 'alias_id'}
def _get_image(self, cr, uid, ids, name, args, context=None):
result = {}
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
'name': fields.char('Name', required=True, translate=True),
'description': fields.text('Description'),
'menu_id': fields.many2one('ir.ui.menu', string='Related Menu', required=True, ondelete="cascade"),
'public': fields.selection([('public', 'Everyone'), ('private', 'Invited people only'), ('groups', 'Selected group of users')], 'Privacy', required=True,
help='This group is visible by non members. \
Invisible groups can add members through the invite button.'),
'group_public_id': fields.many2one('res.groups', string='Authorized Group'),
'group_ids': fields.many2many('res.groups', rel='mail_group_res_group_rel',
id1='mail_group_id', id2='groups_id', string='Auto Subscription',
help="Members of those groups will automatically added as followers. "\
"Note that they will be able to manage their subscription manually "\
"if necessary."),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Photo",
help="This field holds the image used as photo for the group, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized photo", type="binary", multi="_get_image",
store={
'mail.group': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized photo of the group. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized photo", type="binary", multi="_get_image",
store={
'mail.group': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized photo of the group. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="The email address associated with this group. New emails received will automatically "
"create new topics."),
}
def _get_default_employee_group(self, cr, uid, context=None):
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_user')
return ref and ref[1] or False
def _get_default_image(self, cr, uid, context=None):
image_path = openerp.modules.get_module_resource('mail', 'static/src/img', 'groupdefault.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
_defaults = {
'public': 'groups',
'group_public_id': _get_default_employee_group,
'image': _get_default_image,
}
def _generate_header_description(self, cr, uid, group, context=None):
header = ''
if group.description:
header = '%s' % group.description
if group.alias_id and group.alias_name and group.alias_domain:
if header:
header = '%s<br/>' % header
return '%sGroup email gateway: %s@%s' % (header, group.alias_name, group.alias_domain)
return header
def _subscribe_users(self, cr, uid, ids, context=None):
for mail_group in self.browse(cr, uid, ids, context=context):
partner_ids = []
for group in mail_group.group_ids:
partner_ids += [user.partner_id.id for user in group.users]
self.message_subscribe(cr, uid, ids, partner_ids, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
# get parent menu
menu_parent = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'mail_group_root')
menu_parent = menu_parent and menu_parent[1] or False
# Create menu id
mobj = self.pool.get('ir.ui.menu')
menu_id = mobj.create(cr, SUPERUSER_ID, {'name': vals['name'], 'parent_id': menu_parent}, context=context)
vals['menu_id'] = menu_id
# Create group and alias
create_context = dict(context, alias_model_name=self._name, alias_parent_model_name=self._name, mail_create_nolog=True)
mail_group_id = super(mail_group, self).create(cr, uid, vals, context=create_context)
group = self.browse(cr, uid, mail_group_id, context=context)
self.pool.get('mail.alias').write(cr, uid, [group.alias_id.id], {"alias_force_thread_id": mail_group_id, 'alias_parent_thread_id': mail_group_id}, context)
group = self.browse(cr, uid, mail_group_id, context=context)
# Create client action for this group and link the menu to it
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'action_mail_group_feeds')
if ref:
search_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'view_message_search')
params = {
'search_view_id': search_ref and search_ref[1] or False,
'domain': [
('model', '=', 'mail.group'),
('res_id', '=', mail_group_id),
],
'context': {
'default_model': 'mail.group',
'default_res_id': mail_group_id,
},
'res_model': 'mail.message',
'thread_level': 1,
'header_description': self._generate_header_description(cr, uid, group, context=context),
'view_mailbox': True,
'compose_placeholder': 'Send a message to the group',
}
cobj = self.pool.get('ir.actions.client')
newref = cobj.copy(cr, SUPERUSER_ID, ref[1], default={'params': str(params), 'name': vals['name']}, context=context)
mobj.write(cr, SUPERUSER_ID, menu_id, {'action': 'ir.actions.client,' + str(newref), 'mail_group_id': mail_group_id}, context=context)
if vals.get('group_ids'):
self._subscribe_users(cr, uid, [mail_group_id], context=context)
return mail_group_id
def unlink(self, cr, uid, ids, context=None):
groups = self.browse(cr, uid, ids, context=context)
alias_ids = [group.alias_id.id for group in groups if group.alias_id]
menu_ids = [group.menu_id.id for group in groups if group.menu_id]
# Delete mail_group
try:
all_emp_group = self.pool['ir.model.data'].get_object_reference(cr, uid, 'mail', 'group_all_employees')[1]
except ValueError:
all_emp_group = None
if all_emp_group and all_emp_group in ids:
raise osv.except_osv(_('Warning!'), _('You cannot delete those groups, as the Whole Company group is required by other modules.'))
res = super(mail_group, self).unlink(cr, uid, ids, context=context)
# Cascade-delete mail aliases as well, as they should not exist without the mail group.
self.pool.get('mail.alias').unlink(cr, SUPERUSER_ID, alias_ids, context=context)
# Cascade-delete menu entries as well
self.pool.get('ir.ui.menu').unlink(cr, SUPERUSER_ID, menu_ids, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
result = super(mail_group, self).write(cr, uid, ids, vals, context=context)
if vals.get('group_ids'):
self._subscribe_users(cr, uid, ids, context=context)
# if description, name or alias is changed: update client action
if vals.get('description') or vals.get('name') or vals.get('alias_id') or vals.get('alias_name'):
cobj = self.pool.get('ir.actions.client')
for action in [group.menu_id.action for group in self.browse(cr, uid, ids, context=context)]:
new_params = action.params
new_params['header_description'] = self._generate_header_description(cr, uid, group, context=context)
cobj.write(cr, SUPERUSER_ID, [action.id], {'params': str(new_params)}, context=context)
# if name is changed: update menu
if vals.get('name'):
mobj = self.pool.get('ir.ui.menu')
mobj.write(cr, SUPERUSER_ID,
[group.menu_id.id for group in self.browse(cr, uid, ids, context=context)],
{'name': vals.get('name')}, context=context)
return result
def action_follow(self, cr, uid, ids, context=None):
""" Wrapper because message_subscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_subscribe_users(cr, uid, ids, context=context)
def action_unfollow(self, cr, uid, ids, context=None):
""" Wrapper because message_unsubscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_unsubscribe_users(cr, uid, ids, context=context)
def get_suggested_thread(self, cr, uid, removed_suggested_threads=None, context=None):
"""Show the suggestion of groups if display_groups_suggestions if the
user perference allows it."""
user = self.pool.get('res.users').browse(cr, uid, uid, context)
if not user.display_groups_suggestions:
return []
else:
return super(mail_group, self).get_suggested_thread(cr, uid, removed_suggested_threads, context)
def message_get_email_values(self, cr, uid, id, notif_mail=None, context=None):
res = super(mail_group, self).message_get_email_values(cr, uid, id, notif_mail=notif_mail, context=context)
group = self.browse(cr, uid, id, context=context)
headers = {}
if res.get('headers'):
try:
headers.update(eval(res['headers']))
except Exception:
pass
headers['Precedence'] = 'list'
# avoid out-of-office replies from MS Exchange
# http://blogs.technet.com/b/exchange/archive/2006/10/06/3395024.aspx
headers['X-Auto-Response-Suppress'] = 'OOF'
if group.alias_domain and group.alias_name:
headers['List-Id'] = '%s.%s' % (group.alias_name, group.alias_domain)
headers['List-Post'] = '<mailto:%s@%s>' % (group.alias_name, group.alias_domain)
# Avoid users thinking it was a personal message
# X-Forge-To: will replace To: after SMTP envelope is determined by ir.mail.server
list_to = '"%s" <%s@%s>' % (group.name, group.alias_name, group.alias_domain)
headers['X-Forge-To'] = list_to
res['headers'] = repr(headers)
return res
| ritchyteam/odoo | addons/mail/mail_group.py | Python | agpl-3.0 | 12,895 | 0.004731 |
#
# Copyright (c) 2008--2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
from spacewalk.common import rhnFlags
from spacewalk.common.rhnLog import log_debug
from spacewalk.server.rhnServer import server_kickstart
# the "exposed" functions
__rhnexport__ = ['reboot']
def reboot(server_id, action_id, data={}):
log_debug(3, action_id)
action_status = rhnFlags.get('action_status')
server_kickstart.update_kickstart_session(server_id, action_id,
action_status, kickstart_state='restarted',
next_action_type=None)
| dmacvicar/spacewalk | backend/server/action_extra_data/reboot.py | Python | gpl-2.0 | 1,085 | 0.003687 |
"""Convenient parallelization of higher order functions.
This module provides two helper functions, with appropriate fallbacks on
Python 2 and on systems lacking support for synchronization mechanisms:
- map_multiprocess
- map_multithread
These helpers work like Python 3's map, with two differences:
- They don't guarantee the order of processing of
the elements of the iterable.
- The underlying process/thread pools chop the iterable into
a number of chunks, so that for very long iterables using
a large value for chunksize can make the job complete much faster
than using the default value of 1.
"""
__all__ = ['map_multiprocess', 'map_multithread']
from contextlib import contextmanager
from multiprocessing import Pool as ProcessPool
from multiprocessing.dummy import Pool as ThreadPool
from pip._vendor.requests.adapters import DEFAULT_POOLSIZE
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from multiprocessing import pool
from typing import Callable, Iterable, Iterator, TypeVar, Union
Pool = Union[pool.Pool, pool.ThreadPool]
S = TypeVar('S')
T = TypeVar('T')
# On platforms without sem_open, multiprocessing[.dummy] Pool
# cannot be created.
try:
import multiprocessing.synchronize # noqa
except ImportError:
LACK_SEM_OPEN = True
else:
LACK_SEM_OPEN = False
# Incredibly large timeout to work around bpo-8296 on Python 2.
TIMEOUT = 2000000
@contextmanager
def closing(pool):
# type: (Pool) -> Iterator[Pool]
"""Return a context manager making sure the pool closes properly."""
try:
yield pool
finally:
# For Pool.imap*, close and join are needed
# for the returned iterator to begin yielding.
pool.close()
pool.join()
pool.terminate()
def _map_fallback(func, iterable, chunksize=1):
# type: (Callable[[S], T], Iterable[S], int) -> Iterator[T]
"""Make an iterator applying func to each element in iterable.
This function is the sequential fallback either on Python 2
where Pool.imap* doesn't react to KeyboardInterrupt
or when sem_open is unavailable.
"""
return map(func, iterable)
def _map_multiprocess(func, iterable, chunksize=1):
# type: (Callable[[S], T], Iterable[S], int) -> Iterator[T]
"""Chop iterable into chunks and submit them to a process pool.
For very long iterables using a large value for chunksize can make
the job complete much faster than using the default value of 1.
Return an unordered iterator of the results.
"""
with closing(ProcessPool()) as pool:
return pool.imap_unordered(func, iterable, chunksize)
def _map_multithread(func, iterable, chunksize=1):
# type: (Callable[[S], T], Iterable[S], int) -> Iterator[T]
"""Chop iterable into chunks and submit them to a thread pool.
For very long iterables using a large value for chunksize can make
the job complete much faster than using the default value of 1.
Return an unordered iterator of the results.
"""
with closing(ThreadPool(DEFAULT_POOLSIZE)) as pool:
return pool.imap_unordered(func, iterable, chunksize)
if LACK_SEM_OPEN:
map_multiprocess = map_multithread = _map_fallback
else:
map_multiprocess = _map_multiprocess
map_multithread = _map_multithread
| nataddrho/DigiCue-USB | Python3/src/venv/Lib/site-packages/pip/_internal/utils/parallel.py | Python | mit | 3,327 | 0 |
from flask import Flask
from flask_cqlalchemy import CQLAlchemy
app = Flask(__name__)
app.config['CASSANDRA_HOSTS'] = ['127.0.0.1']
app.config['CASSANDRA_KEYSPACE'] = "cqlengine"
app.config['CASSANDRA_SETUP_KWARGS'] = {'protocol_version': 3}
db = CQLAlchemy(app)
class Address(db.UserType):
street = db.columns.Text()
zipcode = db.columns.Integer()
class Users(db.Model):
__keyspace__ = 'cqlengine'
name = db.columns.Text(primary_key=True)
addr = db.columns.UserDefinedType(Address)
| thegeorgeous/flask-cqlalchemy | examples/example_app_udt.py | Python | isc | 509 | 0 |
import numpy as np
def init_network():
network = {}
network['W1'] = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])
network['b1'] = np.array([0.1, 0.2, 0.3])
network['W2'] = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])
network['b2'] = np.array([0.1, 0.2])
network['W3'] = np.array([[0.1, 0.3], [0.2, 0.4]])
network['b3'] = np.array([0.1, 0.2])
return network
def identity_function(x):
return x
def forward(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = identity_function(a3)
return y
def sigmoid(x):
return 1/(1+np.exp(-x))
network = init_network()
x = np.array([1.0, 0.5])
y = forward(network, x)
print(y)
| nobukatsu/deep-learning-from-scratch | ch03/nn-3layer.py | Python | mit | 894 | 0.008949 |
>>> myTuple = (1, 2, 3)
>>> myTuple[1]
2
>>> myTuple[1:3]
(2, 3)
| schmit/intro-python-course | lectures/code/tuples_basics.py | Python | mit | 65 | 0.046154 |
#! /usr/bin/env pypy
"""
Command-line options for translate:
See below
"""
import os
import sys
import py
from rpython.config.config import (to_optparse, OptionDescription, BoolOption,
ArbitraryOption, StrOption, IntOption, Config, ChoiceOption, OptHelpFormatter)
from rpython.config.translationoption import (get_combined_translation_config,
set_opt_level, OPT_LEVELS, DEFAULT_OPT_LEVEL, set_platform, CACHE_DIR)
# clean up early rpython/_cache
try:
py.path.local(CACHE_DIR).remove()
except Exception:
pass
GOALS = [
("annotate", "do type inference", "-a --annotate", ""),
("rtype", "do rtyping", "-t --rtype", ""),
("pyjitpl", "JIT generation step", "--pyjitpl", ""),
("jittest", "JIT test with llgraph backend", "--pyjittest", ""),
("backendopt", "do backend optimizations", "--backendopt", ""),
("source", "create source", "-s --source", ""),
("compile", "compile", "-c --compile", " (default goal)"),
("llinterpret", "interpret the rtyped flow graphs", "--llinterpret", ""),
]
def goal_options():
result = []
for name, doc, cmdline, extra in GOALS:
optional = False
if name.startswith('?'):
optional = True
name = name[1:]
yesdoc = doc[0].upper() + doc[1:] + extra
result.append(BoolOption(name, yesdoc, default=False, cmdline=cmdline,
negation=False))
if not optional:
result.append(BoolOption("no_%s" % name, "Don't " + doc, default=False,
cmdline="--no-" + name, negation=False))
return result
translate_optiondescr = OptionDescription("translate", "XXX", [
StrOption("targetspec", "XXX", default='targetpypystandalone',
cmdline=None),
ChoiceOption("opt",
"optimization level", OPT_LEVELS, default=DEFAULT_OPT_LEVEL,
cmdline="--opt -O"),
BoolOption("profile",
"cProfile (to debug the speed of the translation process)",
default=False,
cmdline="--profile"),
BoolOption("pdb",
"Always run pdb even if the translation succeeds",
default=False,
cmdline="--pdb"),
BoolOption("batch", "Don't run interactive helpers", default=False,
cmdline="--batch", negation=False),
IntOption("huge", "Threshold in the number of functions after which "
"a local call graph and not a full one is displayed",
default=100, cmdline="--huge"),
BoolOption("view", "Start the pygame viewer", default=False,
cmdline="--view", negation=False),
BoolOption("help", "show this help message and exit", default=False,
cmdline="-h --help", negation=False),
BoolOption("fullhelp", "show full help message and exit", default=False,
cmdline="--full-help", negation=False),
ArbitraryOption("goals", "XXX",
defaultfactory=list),
# xxx default goals ['annotate', 'rtype', 'backendopt', 'source', 'compile']
ArbitraryOption("skipped_goals", "XXX",
defaultfactory=list),
OptionDescription("goal_options",
"Goals that should be reached during translation",
goal_options()),
])
import optparse
from rpython.tool.ansi_print import ansi_log
log = py.log.Producer("translation")
py.log.setconsumer("translation", ansi_log)
def load_target(targetspec):
log.info("Translating target as defined by %s" % targetspec)
if not targetspec.endswith('.py'):
targetspec += '.py'
thismod = sys.modules[__name__]
sys.modules['translate'] = thismod
specname = os.path.splitext(os.path.basename(targetspec))[0]
sys.path.insert(0, os.path.dirname(targetspec))
mod = __import__(specname)
if 'target' not in mod.__dict__:
raise Exception("file %r is not a valid targetxxx.py." % (targetspec,))
return mod.__dict__
def parse_options_and_load_target():
opt_parser = optparse.OptionParser(usage="%prog [options] [target] [target-specific-options]",
prog="translate",
formatter=OptHelpFormatter(),
add_help_option=False)
opt_parser.disable_interspersed_args()
config = get_combined_translation_config(translating=True)
to_optparse(config, parser=opt_parser, useoptions=['translation.*'])
translateconfig = Config(translate_optiondescr)
to_optparse(translateconfig, parser=opt_parser)
options, args = opt_parser.parse_args()
# set goals and skipped_goals
reset = False
for name, _, _, _ in GOALS:
if name.startswith('?'):
continue
if getattr(translateconfig.goal_options, name):
if name not in translateconfig.goals:
translateconfig.goals.append(name)
if getattr(translateconfig.goal_options, 'no_' + name):
if name not in translateconfig.skipped_goals:
if not reset:
translateconfig.skipped_goals[:] = []
reset = True
translateconfig.skipped_goals.append(name)
if args:
arg = args[0]
args = args[1:]
if os.path.isfile(arg + '.py'):
assert not os.path.isfile(arg), (
"ambiguous file naming, please rename %s" % arg)
translateconfig.targetspec = arg
elif os.path.isfile(arg) and arg.endswith('.py'):
translateconfig.targetspec = arg[:-3]
else:
log.ERROR("Could not find target %r" % (arg, ))
sys.exit(1)
else:
show_help(translateconfig, opt_parser, None, config)
# print the version of the host
# (if it's PyPy, it includes the hg checksum)
log.info(sys.version)
# apply the platform settings
set_platform(config)
targetspec = translateconfig.targetspec
targetspec_dic = load_target(targetspec)
if args and not targetspec_dic.get('take_options', False):
log.WARNING("target specific arguments supplied but will be ignored: %s" % ' '.join(args))
# give the target the possibility to get its own configuration options
# into the config
if 'get_additional_config_options' in targetspec_dic:
optiondescr = targetspec_dic['get_additional_config_options']()
config = get_combined_translation_config(
optiondescr,
existing_config=config,
translating=True)
# show the target-specific help if --help was given
show_help(translateconfig, opt_parser, targetspec_dic, config)
# apply the optimization level settings
set_opt_level(config, translateconfig.opt)
# let the target modify or prepare itself
# based on the config
if 'handle_config' in targetspec_dic:
targetspec_dic['handle_config'](config, translateconfig)
return targetspec_dic, translateconfig, config, args
def show_help(translateconfig, opt_parser, targetspec_dic, config):
if translateconfig.help:
if targetspec_dic is None:
opt_parser.print_help()
print "\n\nDefault target: %s" % translateconfig.targetspec
print "Run '%s --help %s' for target-specific help" % (
sys.argv[0], translateconfig.targetspec)
elif 'print_help' in targetspec_dic:
print "\n\nTarget specific help for %s:\n\n" % (
translateconfig.targetspec,)
targetspec_dic['print_help'](config)
else:
print "\n\nNo target-specific help available for %s" % (
translateconfig.targetspec,)
print "\n\nFor detailed descriptions of the command line options see"
print "http://pypy.readthedocs.org/en/latest/config/commandline.html"
sys.exit(0)
def log_options(options, header="options in effect"):
# list options (xxx filter, filter for target)
log('%s:' % header)
optnames = options.__dict__.keys()
optnames.sort()
for name in optnames:
optvalue = getattr(options, name)
log('%25s: %s' % (name, optvalue))
def log_config(config, header="config used"):
log('%s:' % header)
log(str(config))
for warning in config.get_warnings():
log.WARNING(warning)
def main():
targetspec_dic, translateconfig, config, args = parse_options_and_load_target()
from rpython.translator import translator
from rpython.translator import driver
from rpython.translator.tool.pdbplus import PdbPlusShow
if translateconfig.view:
translateconfig.pdb = True
if translateconfig.profile:
from cProfile import Profile
prof = Profile()
prof.enable()
else:
prof = None
t = translator.TranslationContext(config=config)
pdb_plus_show = PdbPlusShow(t) # need a translator to support extended commands
def finish_profiling():
if prof:
prof.disable()
statfilename = 'prof.dump'
log.info('Dumping profiler stats to: %s' % statfilename)
prof.dump_stats(statfilename)
def debug(got_error):
tb = None
if got_error:
import traceback
stacktrace_errmsg = ["Error:\n"]
exc, val, tb = sys.exc_info()
stacktrace_errmsg.extend([" %s" % line for line in traceback.format_tb(tb)])
summary_errmsg = traceback.format_exception_only(exc, val)
block = getattr(val, '__annotator_block', None)
if block:
class FileLike:
def write(self, s):
summary_errmsg.append(" %s" % s)
summary_errmsg.append("Processing block:\n")
t.about(block, FileLike())
log.info(''.join(stacktrace_errmsg))
log.ERROR(''.join(summary_errmsg))
else:
log.event('Done.')
if translateconfig.batch:
log.event("batch mode, not calling interactive helpers")
return
log.event("start debugger...")
if translateconfig.view:
try:
t1 = drv.hint_translator
except (NameError, AttributeError):
t1 = t
from rpython.translator.tool import graphpage
page = graphpage.TranslatorPage(t1, translateconfig.huge)
page.display_background()
pdb_plus_show.start(tb)
try:
drv = driver.TranslationDriver.from_targetspec(targetspec_dic, config, args,
empty_translator=t,
disable=translateconfig.skipped_goals,
default_goal='compile')
log_config(translateconfig, "translate.py configuration")
if config.translation.jit:
if (translateconfig.goals != ['annotate'] and
translateconfig.goals != ['rtype']):
drv.set_extra_goals(['pyjitpl'])
# early check:
from rpython.jit.backend.detect_cpu import getcpuclassname
getcpuclassname(config.translation.jit_backend)
log_config(config.translation, "translation configuration")
pdb_plus_show.expose({'drv': drv, 'prof': prof})
if config.translation.output:
drv.exe_name = config.translation.output
elif drv.exe_name is None and '__name__' in targetspec_dic:
drv.exe_name = targetspec_dic['__name__'] + '-%(backend)s'
# Double check to ensure we are not overwriting the current interpreter
goals = translateconfig.goals
if not goals or 'compile' in goals:
try:
this_exe = py.path.local(sys.executable).new(ext='')
exe_name = drv.compute_exe_name()
samefile = this_exe.samefile(exe_name)
assert not samefile, (
'Output file %s is the currently running '
'interpreter (please move the executable, and '
'possibly its associated libpypy-c, somewhere else '
'before you execute it)' % exe_name)
except EnvironmentError:
pass
try:
drv.proceed(goals)
finally:
drv.timer.pprint()
except SystemExit:
raise
except:
finish_profiling()
debug(True)
raise SystemExit(1)
else:
finish_profiling()
if translateconfig.pdb:
debug(False)
if __name__ == '__main__':
main()
| jptomo/rpython-lang-scheme | rpython/translator/goal/translate.py | Python | mit | 12,703 | 0.001889 |
def phone_num_lists():
"""
Gets a dictionary of 0-9 integer values (as Strings) mapped to their potential Backpage ad manifestations, such as "zer0" or "seven".
Returns:
dictionary of 0-9 integer values mapped to a list of strings containing the key's possible manifestations
"""
all_nums = {}
all_nums['2'] = ['2', 'two']
all_nums['3'] = ['3', 'three']
all_nums['4'] = ['4', 'four', 'fuor']
all_nums['5'] = ['5', 'five', 'fith']
all_nums['6'] = ['6', 'six']
all_nums['7'] = ['7', 'seven', 'sven']
all_nums['8'] = ['8', 'eight']
all_nums['9'] = ['9', 'nine']
all_nums['0'] = ['0', 'zero', 'zer0', 'oh', 'o']
all_nums['1'] = ['1', 'one', '!' 'l', 'i']
return all_nums
def phone_text_subs():
"""
Gets a dictionary of dictionaries that each contain alphabetic number manifestations mapped to their actual
Number value.
Returns:
dictionary of dictionaries containing Strings mapped to Numbers
"""
Small = {
'zero': 0,
'zer0': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'fuor': 4,
'five': 5,
'fith': 5,
'six': 6,
'seven': 7,
'sven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90,
'oh': 0
}
Magnitude = {
'thousand': 000,
'million': 000000,
}
Others = {
'!': 1,
'o': 0,
'l': 1,
'i': 1
}
output = {}
output['Small'] = Small
output['Magnitude'] = Magnitude
output['Others'] = Others
return output
| usc-isi-i2/etk | etk/data_extractors/htiExtractors/misc.py | Python | mit | 1,773 | 0.0141 |
# Copyright (C) 2005 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Testament - a summary of a revision for signing.
A testament can be defined as "something that serves as tangible
proof or evidence." In bzr we use them to allow people to certify
particular revisions as authentic.
The goal is that if two revisions are semantically equal, then they will
have a byte-for-byte equal testament. We can define different versions of
"semantically equal" by using different testament classes; e.g. one that
includes or ignores file-ids.
We sign a testament rather than the revision XML itself for several reasons.
The most important is that the form in which the revision is stored
internally is designed for that purpose, and contains information which need
not be attested to by the signer. For example the inventory contains the
last-changed revision for a file, but this is not necessarily something the
user cares to sign.
Having unnecessary fields signed makes the signatures brittle when the same
revision is stored in different branches or when the format is upgraded.
Handling upgrades is another motivation for using testaments separate from
the stored revision. We would like to be able to compare a signature
generated from an old-format tree to newer tree, or vice versa. This could
be done by comparing the revisions but that makes it unclear about exactly
what is being compared or not.
Different signing keys might indicate different levels of trust; we can in
the future extend this to allow signatures indicating not just that a
particular version is authentic but that it has other properties.
The signature can be applied to either the full testament or to just a
hash of it.
Testament format 1
~~~~~~~~~~~~~~~~~~
* timestamps are given as integers to avoid rounding errors
* parents given in lexicographical order
* indented-text form similar to log; intended to be human readable
* paths are given with forward slashes
* files are named using paths for ease of comparison/debugging
* the testament uses unix line-endings (\n)
"""
from __future__ import absolute_import
# XXX: At the moment, clients trust that the graph described in a weave
# is accurate, but that's not covered by the testament. Perhaps the best
# fix is when verifying a revision to make sure that every file mentioned
# in the revision has compatible ancestry links.
# TODO: perhaps write timestamp in a more readable form
# TODO: Perhaps these should just be different formats in which inventories/
# revisions can be serialized.
from copy import copy
from bzrlib.osutils import (
contains_whitespace,
contains_linebreaks,
sha_strings,
)
from bzrlib.tree import Tree
class Testament(object):
"""Reduced summary of a revision.
Testaments can be
- produced from a revision
- written to a stream
- loaded from a stream
- compared to a revision
"""
long_header = 'bazaar-ng testament version 1\n'
short_header = 'bazaar-ng testament short form 1\n'
include_root = False
@classmethod
def from_revision(cls, repository, revision_id):
"""Produce a new testament from a historical revision."""
rev = repository.get_revision(revision_id)
tree = repository.revision_tree(revision_id)
return cls(rev, tree)
@classmethod
def from_revision_tree(cls, tree):
"""Produce a new testament from a revision tree."""
rev = tree._repository.get_revision(tree.get_revision_id())
return cls(rev, tree)
def __init__(self, rev, tree):
"""Create a new testament for rev using tree."""
self.revision_id = rev.revision_id
self.committer = rev.committer
self.timezone = rev.timezone or 0
self.timestamp = rev.timestamp
self.message = rev.message
self.parent_ids = rev.parent_ids[:]
if not isinstance(tree, Tree):
raise TypeError("As of bzr 2.4 Testament.__init__() takes a "
"Revision and a Tree.")
self.tree = tree
self.revprops = copy(rev.properties)
if contains_whitespace(self.revision_id):
raise ValueError(self.revision_id)
if contains_linebreaks(self.committer):
raise ValueError(self.committer)
def as_text_lines(self):
"""Yield text form as a sequence of lines.
The result is returned in utf-8, because it should be signed or
hashed in that encoding.
"""
r = []
a = r.append
a(self.long_header)
a('revision-id: %s\n' % self.revision_id)
a('committer: %s\n' % self.committer)
a('timestamp: %d\n' % self.timestamp)
a('timezone: %d\n' % self.timezone)
# inventory length contains the root, which is not shown here
a('parents:\n')
for parent_id in sorted(self.parent_ids):
if contains_whitespace(parent_id):
raise ValueError(parent_id)
a(' %s\n' % parent_id)
a('message:\n')
for l in self.message.splitlines():
a(' %s\n' % l)
a('inventory:\n')
for path, ie in self._get_entries():
a(self._entry_to_line(path, ie))
r.extend(self._revprops_to_lines())
return [line.encode('utf-8') for line in r]
def _get_entries(self):
return ((path, ie) for (path, versioned, kind, file_id, ie) in
self.tree.list_files(include_root=self.include_root))
def _escape_path(self, path):
if contains_linebreaks(path):
raise ValueError(path)
return unicode(path.replace('\\', '/').replace(' ', '\ '))
def _entry_to_line(self, path, ie):
"""Turn an inventory entry into a testament line"""
if contains_whitespace(ie.file_id):
raise ValueError(ie.file_id)
content = ''
content_spacer=''
if ie.kind == 'file':
# TODO: avoid switching on kind
if not ie.text_sha1:
raise AssertionError()
content = ie.text_sha1
content_spacer = ' '
elif ie.kind == 'symlink':
if not ie.symlink_target:
raise AssertionError()
content = self._escape_path(ie.symlink_target)
content_spacer = ' '
l = u' %s %s %s%s%s\n' % (ie.kind, self._escape_path(path),
ie.file_id.decode('utf8'),
content_spacer, content)
return l
def as_text(self):
return ''.join(self.as_text_lines())
def as_short_text(self):
"""Return short digest-based testament."""
return (self.short_header +
'revision-id: %s\n'
'sha1: %s\n'
% (self.revision_id, self.as_sha1()))
def _revprops_to_lines(self):
"""Pack up revision properties."""
if not self.revprops:
return []
r = ['properties:\n']
for name, value in sorted(self.revprops.items()):
if contains_whitespace(name):
raise ValueError(name)
r.append(' %s:\n' % name)
for line in value.splitlines():
r.append(u' %s\n' % line)
return r
def as_sha1(self):
return sha_strings(self.as_text_lines())
class StrictTestament(Testament):
"""This testament format is for use as a checksum in bundle format 0.8"""
long_header = 'bazaar-ng testament version 2.1\n'
short_header = 'bazaar-ng testament short form 2.1\n'
include_root = False
def _entry_to_line(self, path, ie):
l = Testament._entry_to_line(self, path, ie)[:-1]
l += ' ' + ie.revision
l += {True: ' yes\n', False: ' no\n'}[ie.executable]
return l
class StrictTestament3(StrictTestament):
"""This testament format is for use as a checksum in bundle format 0.9+
It differs from StrictTestament by including data about the tree root.
"""
long_header = 'bazaar testament version 3 strict\n'
short_header = 'bazaar testament short form 3 strict\n'
include_root = True
def _escape_path(self, path):
if contains_linebreaks(path):
raise ValueError(path)
if path == '':
path = '.'
return unicode(path.replace('\\', '/').replace(' ', '\ '))
| Distrotech/bzr | bzrlib/testament.py | Python | gpl-2.0 | 9,034 | 0.001107 |
# Test reading hdf5 file that I created
import numpy as np
import Starfish
from Starfish.grid_tools import HDF5Interface
myHDF5 = HDF5Interface()
wl = myHDF5.wl
flux = myHDF5.load_flux(np.array([6100, 4.5, 0.0]))
| jason-neal/companion_simulations | misc/starfish_tests/read_HDF5.py | Python | mit | 215 | 0 |
import csv
import django.http
try:
import autotest.common as common
except ImportError:
import common
from autotest_lib.frontend.afe import rpc_utils
class CsvEncoder(object):
def __init__(self, request, response):
self._request = request
self._response = response
self._output_rows = []
def _append_output_row(self, row):
self._output_rows.append(row)
def _build_response(self):
response = django.http.HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = (
'attachment; filename=tko_query.csv')
writer = csv.writer(response)
writer.writerows(self._output_rows)
return response
def encode(self):
raise NotImplementedError
class UnhandledMethodEncoder(CsvEncoder):
def encode(self):
return rpc_utils.raw_http_response(
'Unhandled method %s (this indicates a bug)\r\n' %
self._request['method'])
class SpreadsheetCsvEncoder(CsvEncoder):
def _total_index(self, group, num_columns):
row_index, column_index = group['header_indices']
return row_index * num_columns + column_index
def _group_string(self, group):
result = '%s / %s' % (group['pass_count'], group['complete_count'])
if group['incomplete_count'] > 0:
result += ' (%s incomplete)' % group['incomplete_count']
if 'extra_info' in group:
result = '\n'.join([result] + group['extra_info'])
return result
def _build_value_table(self):
value_table = [''] * self._num_rows * self._num_columns
for group in self._response['groups']:
total_index = self._total_index(group, self._num_columns)
value_table[total_index] = self._group_string(group)
return value_table
def _header_string(self, header_value):
return '/'.join(header_value)
def _process_value_table(self, value_table, row_headers):
total_index = 0
for row_index in xrange(self._num_rows):
row_header = self._header_string(row_headers[row_index])
row_end_index = total_index + self._num_columns
row_values = value_table[total_index:row_end_index]
self._append_output_row([row_header] + row_values)
total_index += self._num_columns
def encode(self):
header_values = self._response['header_values']
assert len(header_values) == 2
row_headers, column_headers = header_values
self._num_rows, self._num_columns = (len(row_headers),
len(column_headers))
value_table = self._build_value_table()
first_line = [''] + [self._header_string(header_value)
for header_value in column_headers]
self._append_output_row(first_line)
self._process_value_table(value_table, row_headers)
return self._build_response()
class TableCsvEncoder(CsvEncoder):
def __init__(self, request, response):
super(TableCsvEncoder, self).__init__(request, response)
self._column_specs = request['columns']
def _format_row(self, row_object):
"""Extract data from a row object into a list of strings"""
return [row_object.get(field) for field, name in self._column_specs]
def _encode_table(self, row_objects):
self._append_output_row([column_spec[1] # header row
for column_spec in self._column_specs])
for row_object in row_objects:
self._append_output_row(self._format_row(row_object))
return self._build_response()
def encode(self):
return self._encode_table(self._response)
class GroupedTableCsvEncoder(TableCsvEncoder):
def encode(self):
return self._encode_table(self._response['groups'])
class StatusCountTableCsvEncoder(GroupedTableCsvEncoder):
_PASS_RATE_FIELD = '_test_pass_rate'
def __init__(self, request, response):
super(StatusCountTableCsvEncoder, self).__init__(request, response)
# inject a more sensible field name for test pass rate
for column_spec in self._column_specs:
field, name = column_spec
if name == 'Test pass rate':
column_spec[0] = self._PASS_RATE_FIELD
break
def _format_pass_rate(self, row_object):
result = '%s / %s' % (row_object['pass_count'],
row_object['complete_count'])
incomplete_count = row_object['incomplete_count']
if incomplete_count:
result += ' (%s incomplete)' % incomplete_count
return result
def _format_row(self, row_object):
row_object[self._PASS_RATE_FIELD] = self._format_pass_rate(row_object)
return super(StatusCountTableCsvEncoder, self)._format_row(row_object)
_ENCODER_MAP = {
'get_latest_tests' : SpreadsheetCsvEncoder,
'get_test_views' : TableCsvEncoder,
'get_group_counts' : GroupedTableCsvEncoder,
}
def _get_encoder_class(request):
method = request['method']
if method in _ENCODER_MAP:
return _ENCODER_MAP[method]
if method == 'get_status_counts':
if 'columns' in request:
return StatusCountTableCsvEncoder
return SpreadsheetCsvEncoder
return UnhandledMethodEncoder
def encoder(request, response):
EncoderClass = _get_encoder_class(request)
return EncoderClass(request, response)
| libvirt/autotest | frontend/tko/csv_encoder.py | Python | gpl-2.0 | 5,495 | 0.00364 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from superdesk.resource import Resource
from content_api import MONGO_PREFIX
class CompaniesResource(Resource):
"""
Company schema
"""
schema = {
"name": {"type": "string", "unique": True, "required": True},
"sd_subscriber_id": {"type": "string"},
"is_enabled": {"type": "boolean", "default": True},
"contact_name": {"type": "string"},
"phone": {"type": "string"},
"country": {"type": "string"},
}
datasource = {"source": "companies", "default_sort": [("name", 1)]}
item_methods = ["GET", "PATCH", "PUT"]
resource_methods = ["GET", "POST"]
mongo_prefix = MONGO_PREFIX
| petrjasek/superdesk-core | content_api/companies/resource.py | Python | agpl-3.0 | 963 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import income_range_view
from google.ads.googleads.v9.services.types import income_range_view_service
from .transports.base import (
IncomeRangeViewServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import IncomeRangeViewServiceGrpcTransport
class IncomeRangeViewServiceClientMeta(type):
"""Metaclass for the IncomeRangeViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[IncomeRangeViewServiceTransport]]
_transport_registry["grpc"] = IncomeRangeViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[IncomeRangeViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class IncomeRangeViewServiceClient(metaclass=IncomeRangeViewServiceClientMeta):
"""Service to manage income range views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
IncomeRangeViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
IncomeRangeViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> IncomeRangeViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
IncomeRangeViewServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def income_range_view_path(
customer_id: str, ad_group_id: str, criterion_id: str,
) -> str:
"""Return a fully-qualified income_range_view string."""
return "customers/{customer_id}/incomeRangeViews/{ad_group_id}~{criterion_id}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
criterion_id=criterion_id,
)
@staticmethod
def parse_income_range_view_path(path: str) -> Dict[str, str]:
"""Parse a income_range_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/incomeRangeViews/(?P<ad_group_id>.+?)~(?P<criterion_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, IncomeRangeViewServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the income range view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.IncomeRangeViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, IncomeRangeViewServiceTransport):
# transport is a IncomeRangeViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = IncomeRangeViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_income_range_view(
self,
request: Union[
income_range_view_service.GetIncomeRangeViewRequest, dict
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> income_range_view.IncomeRangeView:
r"""Returns the requested income range view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetIncomeRangeViewRequest, dict]):
The request object. Request message for
[IncomeRangeViewService.GetIncomeRangeView][google.ads.googleads.v9.services.IncomeRangeViewService.GetIncomeRangeView].
resource_name (:class:`str`):
Required. The resource name of the
income range view to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.IncomeRangeView:
An income range view.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a income_range_view_service.GetIncomeRangeViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, income_range_view_service.GetIncomeRangeViewRequest
):
request = income_range_view_service.GetIncomeRangeViewRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_income_range_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("IncomeRangeViewServiceClient",)
| googleads/google-ads-python | google/ads/googleads/v9/services/services/income_range_view_service/client.py | Python | apache-2.0 | 18,971 | 0.001054 |
from django.core.cache import cache
def pytest_runtest_setup(item):
# Clear the cache before every test
cache.clear()
| mozilla/standup | standup/status/tests/conftest.py | Python | bsd-3-clause | 128 | 0 |
from . import common
from .common import *
| richard-willowit/odoo | odoo/tests/__init__.py | Python | gpl-3.0 | 43 | 0 |
'''import datetime
daytime.MINYEAR = 1901
daytime.MAXYEAR = 2000
print(daytime.MAXYEAR)'''
import calendar
count = 0
year = 1901
endYear = 2001
month = 12
for x in range (year, endYear):
for y in range (1, month+1):
if calendar.weekday(x,y,1) == calendar.SUNDAY:
count = count+1
print("Count: " + str(count))
| DarrenBellew/CloudCompDT228-3 | Lab3/CountingSundays.py | Python | mit | 328 | 0.021341 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of RAFT."""
# pylint:skip-file
import tensorflow as tf
def create_update_Conv2d(c_in, c_out, k_size):
kernel_scale = 1.0 / 3.0
if isinstance(k_size, list) or isinstance(k_size, tuple):
bias_scale = c_out / (3.0 * c_in * k_size[0] * k_size[1])
else:
bias_scale = c_out / (3.0 * c_in * k_size * k_size)
return tf.keras.layers.Conv2D(
filters=c_out,
kernel_size=k_size,
kernel_initializer=tf.keras.initializers.VarianceScaling(
distribution='uniform', scale=kernel_scale, mode='fan_in'),
bias_initializer=tf.keras.initializers.VarianceScaling(
distribution='uniform', scale=bias_scale, mode='fan_in'))
class ConvGRU(tf.keras.layers.Layer):
def __init__(self, hidden_dim=128, input_dim=192 + 128, **kwargs):
super(ConvGRU, self).__init__(**kwargs)
self.convz = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=3)
self.convr = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=3)
self.convq = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=3)
def call(self, input_tensor):
h, x = input_tensor
hx = tf.concat([h, x], axis=3)
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
pad_hx = tf.pad(hx, paddings)
z = tf.math.sigmoid(self.convz(pad_hx))
r = tf.math.sigmoid(self.convr(pad_hx))
pad_q = tf.pad(tf.concat([r * h, x], axis=3), paddings)
q = tf.math.tanh(self.convq(pad_q))
h = (1 - z) * h + z * q
return h
class SepConvGRU(tf.keras.layers.Layer):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(1, 5))
self.convr1 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(1, 5))
self.convq1 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(1, 5))
self.convz2 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(5, 1))
self.convr2 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(5, 1))
self.convq2 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(5, 1))
def call(self, input_tensor):
h, x = input_tensor
# horizontal
hx = tf.concat([h, x], axis=3)
paddings = [[0, 0], [0, 0], [2, 2], [0, 0]]
pad_hx = tf.pad(hx, paddings)
z = tf.math.sigmoid(self.convz1(pad_hx))
r = tf.math.sigmoid(self.convr1(pad_hx))
pad_q = tf.pad(tf.concat([r * h, x], axis=3), paddings)
q = tf.math.tanh(self.convq1(pad_q))
h = (1 - z) * h + z * q
# vertical
hx = tf.concat([h, x], axis=3)
paddings = [[0, 0], [2, 2], [0, 0], [0, 0]]
pad_hx = tf.pad(hx, paddings)
z = tf.math.sigmoid(self.convz2(pad_hx))
r = tf.math.sigmoid(self.convr2(pad_hx))
pad_q = tf.pad(tf.concat([r * h, x], axis=3), paddings)
q = tf.math.tanh(self.convq2(pad_q))
h = (1 - z) * h + z * q
return h
class FlowHead(tf.keras.layers.Layer):
def __init__(self, hidden_dim=256, input_dim=128, **kwargs):
super(FlowHead, self).__init__(**kwargs)
self.conv1 = create_update_Conv2d(
c_in=input_dim, c_out=hidden_dim, k_size=3)
self.conv2 = create_update_Conv2d(c_in=hidden_dim, c_out=2, k_size=3)
def call(self, x):
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
conv = tf.nn.relu(self.conv1(tf.pad(x, paddings)))
return self.conv2(tf.pad(conv, paddings))
class BasicMotionEncoder(tf.keras.layers.Layer):
def __init__(self, args, **kwargs):
super(BasicMotionEncoder, self).__init__(**kwargs)
cor_planes = args.corr_levels * (2 * args.corr_radius + 1)**2
self.convc1 = create_update_Conv2d(c_in=cor_planes, c_out=256, k_size=1)
self.convc2 = create_update_Conv2d(c_in=256, c_out=192, k_size=3)
self.convf1 = create_update_Conv2d(c_in=2, c_out=128, k_size=7)
self.convf2 = create_update_Conv2d(c_in=128, c_out=64, k_size=3)
self.conv = create_update_Conv2d(c_in=64 + 192, c_out=128 - 2, k_size=3)
def call(self, input_tensor):
flow, corr = input_tensor
cor = tf.nn.relu(self.convc1(corr))
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
cor = tf.nn.relu(self.convc2(tf.pad(cor, paddings)))
paddings7 = [[0, 0], [3, 3], [3, 3], [0, 0]]
flo = tf.nn.relu(self.convf1(tf.pad(flow, paddings7)))
flo = tf.nn.relu(self.convf2(tf.pad(flo, paddings)))
cor_flo = tf.concat([cor, flo], axis=-1)
out = tf.nn.relu(self.conv(tf.pad(cor_flo, paddings)))
return tf.concat([out, flow], axis=-1)
class SmallMotionEncoder(tf.keras.layers.Layer):
def __init__(self, args, **kwargs):
super(SmallMotionEncoder, self).__init__(**kwargs)
cor_planes = args.corr_levels * (2 * args.corr_radius + 1)**2
self.convc1 = create_update_Conv2d(c_in=cor_planes, c_out=96, k_size=1)
self.convf1 = create_update_Conv2d(c_in=96, c_out=64, k_size=7)
self.convf2 = create_update_Conv2d(c_in=64, c_out=32, k_size=3)
self.conv = create_update_Conv2d(c_in=32, c_out=80, k_size=3)
def call(self, input_tensor):
flow, corr = input_tensor
cor = tf.nn.relu(self.convc1(corr))
paddings7 = [[0, 0], [3, 3], [3, 3], [0, 0]]
flo = tf.nn.relu(self.convf1(tf.pad(flow, paddings7)))
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
flo = tf.nn.relu(self.convf2(tf.pad(flo, paddings)))
cor_flo = tf.concat([cor, flo], axis=-1)
out = tf.nn.relu(self.conv(tf.pad(cor_flo, paddings)))
return tf.concat([out, flow], axis=-1)
class BasicUpdateBlock(tf.keras.layers.Layer):
def __init__(self, args, hidden_dim=128, **kwargs):
super(BasicUpdateBlock, self).__init__(**kwargs)
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128 + hidden_dim)
self.flow_head = FlowHead(hidden_dim=256, input_dim=hidden_dim)
if args.convex_upsampling:
self.mask = tf.keras.Sequential(
[create_update_Conv2d(c_in=128, c_out=256, k_size=3),
tf.keras.layers.ReLU(),
create_update_Conv2d(c_in=256, c_out=64 * 9, k_size=1)
])
def call(self, input_tensor, training):
net, inp, corr, flow = input_tensor
motion_features = self.encoder([flow, corr])
inp = tf.concat([inp, motion_features], axis=-1)
net = self.gru([net, inp])
delta_flow = self.flow_head(net)
if self.args.convex_upsampling:
# Scale mask to balance gradients.
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
pad_net = tf.pad(net, paddings)
mask = .25 * self.mask(pad_net)
else:
mask = None
return net, mask, delta_flow
class SmallUpdateBlock(tf.keras.layers.Layer):
def __init__(self, args, hidden_dim=96, **kwargs):
super(SmallUpdateBlock, self).__init__(**kwargs)
self.encoder = SmallMotionEncoder(args)
self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82 + 64)
self.flow_head = FlowHead(hidden_dim=128, input_dim=hidden_dim)
def call(self, input_tensor, training):
net, inp, corr, flow = input_tensor
motion_features = self.encoder([flow, corr])
inp = tf.concat([inp, motion_features], axis=-1)
net = self.gru([net, inp])
delta_flow = self.flow_head(net)
return net, None, delta_flow
| google-research/google-research | smurf/smurf_models/raft_update.py | Python | apache-2.0 | 8,034 | 0.003112 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# keyboard_widget.py
#
# Copyright © 2012 Linux Mint (QT version)
# Copyright © 2013 Manjaro (QT version)
# Copyright © 2013-2015 Antergos (GTK version)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
""" Keyboard widget that shows keyboard layout and variant types to the user """
from gi.repository import Gtk, GObject
import cairo
import subprocess
import math
def unicode_to_string(raw):
""" U+ , or +U+ ... to string """
if raw[0:2] == "U+":
return chr(int(raw[2:], 16))
elif raw[0:2] == "+U":
return chr(int(raw[3:], 16))
return ""
class KeyboardWidget(Gtk.DrawingArea):
__gtype_name__ = 'KeyboardWidget'
kb_104 = {
"extended_return": False,
"keys": [
(0x29, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd),
(0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x2b),
(0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28),
(0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35),
()]
}
kb_105 = {
"extended_return": True,
"keys": [
(0x29, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd),
(0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b),
(0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x2b),
(0x54, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35),
()]
}
kb_106 = {
"extended_return": True,
"keys": [
(0x29, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe),
(0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b),
(0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29),
(0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36),
()]
}
def __init__(self):
Gtk.DrawingArea.__init__(self)
self.set_size_request(460, 130)
self.codes = []
self.layout = "us"
self.variant = ""
self.font = "Helvetica"
self.space = 6
self.kb = None
def set_layout(self, layout):
self.layout = layout
def set_font(self):
""" Font depends on the keyboard layout """
# broken: ad (Andorra), lk (Sri Lanka), brai (Braille)
# ?!?: us:chr
self.font = "Helvetica"
# Load fonts from ttf-aboriginal-sans package
# us:chr
if self.variant == "chr":
self.font = "Aboriginal Sans"
# Load fonts from:
# ttf-indic-otf, ttf-khmer, ttf-lohit-fonts, ttf-myanmar3
# ttf-thaana-fonts, ttf-tlwg
# Font: Akaash
if self.layout == "bd":
self.font = "Akaash"
# Font: Gari
if self.layout == "np" or self.layout == "in":
self.font = "Gargi"
# Font: KhmerOS
if self.layout == "kh":
self.font = "KhmerOS"
# Font: Bengali
if self.variant == "ben_probhat" or self.variant == "ben":
self.font = "Lohit Bengali"
# Font: Padmaa
if self.variant == "guj": # not all keys
self.font = "Padmaa"
# Font: Punjabi
if self.variant == "guru" or self.variant == "jhelum":
self.font = "Lohit Punjabi"
# Font: Kannada
if self.variant == "kan":
self.font = "Lohit Kannada"
# Font: Malayalam
if self.variant == "mal" or self.variant == "mal_lalitha":
self.font = "Malayalam"
# Font: Tamil
if self.variant == "tam_keyboard_with_numerals" or self.variant == "tam":
self.font = "Lohit Tamil"
# Font: TSCu Times
lst = ["tam_TAB", "tam_TSCII", "tam_unicode"]
for i in lst:
if self.variant == i:
self.font = "TSCu_Times"
# Font: Telugu
if self.variant == "tel":
self.font = "Lohit Telugu"
# Font: Oriya
lst = ["af", "ara", "am", "cn", "ge", "gr", "gn", "ir", "iq", "ie", "il", "la", "ma", "pk", "lk", "sy"]
for i in lst:
if self.layout == i:
self.font = "Oriya"
lst = ["geo", "urd-phonetic3", "urd-phonetic", "urd-winkeys"]
for i in lst:
if self.variant == i:
self.font = "Oriya"
if self.variant == "ori":
self.font = "Lohit Oriya"
# Font: Mv Boli
if self.layout == "mv":
self.font = "MVBoli"
# Font: Myanmar
if self.layout == "mm":
self.font = "Myanmar3"
# Font: Tlwg
if self.layout == "th":
self.font = "Tlwg Mono"
def set_variant(self, variant):
self.variant = variant
self.load_codes()
self.load_info()
self.set_font()
# Force repaint
self.queue_draw()
def load_info(self):
kbl_104 = ["us", "th"]
kbl_106 = ["jp"]
# Most keyboards are 105 key so default to that
if self.layout in kbl_104:
self.kb = self.kb_104
elif self.layout in kbl_106:
self.kb = self.kb_106
elif self.kb != self.kb_105:
self.kb = self.kb_105
@staticmethod
def rounded_rectangle(cr, x, y, width, height, aspect=1.0):
corner_radius = height / 10.0
radius = corner_radius / aspect
degrees = math.pi / 180.0
cr.new_sub_path()
cr.arc(x + width - radius, y + radius, radius, -90 * degrees, 0 * degrees)
cr.arc(x + width - radius, y + height - radius, radius, 0 * degrees, 90 * degrees)
cr.arc(x + radius, y + height - radius, radius, 90 * degrees, 180 * degrees)
cr.arc(x + radius, y + radius, radius, 180 * degrees, 270 * degrees)
cr.close_path()
cr.set_source_rgb(0.5, 0.5, 0.5)
cr.fill_preserve()
cr.set_source_rgba(0.2, 0.2, 0.2, 0.5)
cr.set_line_width(2)
cr.stroke()
def do_draw(self, cr):
""" The 'cr' variable is the current Cairo context """
# alloc = self.get_allocation()
# real_width = alloc.width
# real_height = alloc.height
width = 460
# height = 130
usable_width = width - 6
key_w = (usable_width - 14 * self.space) / 15
# Set background color to transparent
cr.set_source_rgba(1.0, 1.0, 1.0, 0.0)
cr.paint()
cr.set_source_rgb(0.84, 0.84, 0.84)
cr.set_line_width(2)
cr.rectangle(0, 0, 640, 640)
cr.stroke()
cr.set_source_rgb(0.22, 0.22, 0.22)
rx = 3
space = self.space
w = usable_width
kw = key_w
# Use this to show real widget size (useful when debugging this widget)
# cr.rectangle(0, 0, real_width, real_height)
def draw_row(row, sx, sy, last_end=False):
x = sx
y = sy
keys = row
rw = w - sx
i = 0
for k in keys:
rect = (x, y, kw, kw)
if i == len(keys) - 1 and last_end:
rect = (rect[0], rect[1], rw, rect[3])
self.rounded_rectangle(cr, rect[0], rect[1], rect[2], rect[3])
px = rect[0] + 5
py = rect[1] + rect[3] - (rect[3] / 4)
if len(self.codes) > 0:
# Draw lower character
cr.set_source_rgb(1.0, 1.0, 1.0)
cr.select_font_face(self.font, cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
cr.set_font_size(10)
cr.move_to(px, py)
cr.show_text(self.regular_text(k))
px = rect[0] + 5
py = rect[1] + (rect[3] / 3)
# Draw upper character
cr.set_source_rgb(0.82, 0.82, 0.82)
cr.select_font_face(self.font, cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
cr.set_font_size(8)
cr.move_to(px, py)
cr.show_text(self.shift_text(k))
rw = rw - space - kw
x = x + space + kw
i += 1
return x, rw
x = 6
y = 6
keys = self.kb["keys"]
ext_return = self.kb["extended_return"]
first_key_w = 0
rows = 4
remaining_x = [0, 0, 0, 0]
remaining_widths = [0, 0, 0, 0]
for i in range(0, rows):
if first_key_w > 0:
first_key_w *= 1.375
if self.kb == self.kb_105 and i == 3:
first_key_w = kw * 1.275
self.rounded_rectangle(cr, 6, y, first_key_w, kw)
x = 6 + first_key_w + space
else:
first_key_w = kw
x, rw = draw_row(keys[i], x, y, i == 1 and not ext_return)
remaining_x[i] = x
remaining_widths[i] = rw
if i != 1 and i != 2:
self.rounded_rectangle(cr, x, y, rw, kw)
x = .5
y = y + space + kw
if ext_return:
# rx = rx * 2
x1 = remaining_x[1]
y1 = 6 + kw * 1 + space * 1
w1 = remaining_widths[1]
x2 = remaining_x[2]
y2 = 6 + kw * 2 + space * 2
# this is some serious crap... but it has to be so
# maybe one day keyboards won't look like this...
# one can only hope
degrees = math.pi / 180.0
cr.new_sub_path()
cr.move_to(x1, y1 + rx)
cr.arc(x1 + rx, y1 + rx, rx, 180 * degrees, -90 * degrees)
cr.line_to(x1 + w1 - rx, y1)
cr.arc(x1 + w1 - rx, y1 + rx, rx, -90 * degrees, 0)
cr.line_to(x1 + w1, y2 + kw - rx)
cr.arc(x1 + w1 - rx, y2 + kw - rx, rx, 0 * degrees, 90 * degrees)
cr.line_to(x2 + rx, y2 + kw)
cr.arc(x2 + rx, y2 + kw - rx, rx, 90 * degrees, 180 * degrees)
cr.line_to(x2, y1 + kw)
cr.line_to(x1 + rx, y1 + kw)
cr.arc(x1 + rx, y1 + kw - rx, rx, 90 * degrees, 180 * degrees)
cr.close_path()
cr.set_source_rgb(0.5, 0.5, 0.5)
cr.fill_preserve()
cr.set_source_rgba(0.2, 0.2, 0.2, 0.5)
cr.set_line_width(2)
cr.stroke()
else:
x = remaining_x[2]
# Changed .5 to 6 because return key was out of line
y = 6 + kw * 2 + space * 2
self.rounded_rectangle(cr, x, y, remaining_widths[2], kw)
def regular_text(self, index):
try:
return self.codes[index - 1][0]
except IndexError:
return " "
def shift_text(self, index):
try:
return self.codes[index - 1][1]
except IndexError:
return " "
def ctrl_text(self, index):
try:
return self.codes[index - 1][2]
except IndexError:
return " "
def alt_text(self, index):
try:
return self.codes[index - 1][3]
except IndexError:
return " "
def load_codes(self):
if self.layout is None:
return
variant_param = ""
if self.variant:
variant_param = "-variant {0}".format(self.variant)
cmd = "/usr/bin/ckbcomp -model pc106 -layout {0} {1} -compact".format(self.layout,
variant_param)
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=None)
cfile = pipe.communicate()[0].decode("utf-8").split('\n')
# Clear current codes
del self.codes[:]
for line in cfile:
if line[:7] != "keycode":
continue
codes = line.split('=')[1].strip().split(' ')
plain = unicode_to_string(codes[0])
shift = unicode_to_string(codes[1])
ctrl = unicode_to_string(codes[2])
alt = unicode_to_string(codes[3])
if ctrl == plain:
ctrl = ""
if alt == plain:
alt = ""
self.codes.append((plain, shift, ctrl, alt))
GObject.type_register(KeyboardWidget)
| manjaro/thus | thus/misc/keyboard_widget.py | Python | gpl-3.0 | 13,129 | 0.001371 |
"""
This package contains algorithms for extracting document representations from their raw
bag-of-word counts.
"""
# bring model classes directly into package namespace, to save some typing
from .hdpmodel import HdpModel
from .ldamodel import LdaModel
from .lsimodel import LsiModel
from .tfidfmodel import TfidfModel
from .rpmodel import RpModel
from .logentropy_model import LogEntropyModel
from .word2vec import Word2Vec
from .doc2vec import Doc2Vec
from .ldamulticore import LdaMulticore
from .phrases import Phrases
from . import wrappers
from gensim import interfaces, utils
class VocabTransform(interfaces.TransformationABC):
"""
Remap feature ids to new values.
Given a mapping between old ids and new ids (some old ids may be missing = these
features are to be discarded), this will wrap a corpus so that iterating over
`VocabTransform[corpus]` returns the same vectors but with the new ids.
Old features that have no counterpart in the new ids are discarded. This
can be used to filter vocabulary of a corpus "online"::
>>> old2new = dict((oldid, newid) for newid, oldid in enumerate(ids_you_want_to_keep))
>>> vt = VocabTransform(old2new)
>>> for vec_with_new_ids in vt[corpus_with_old_ids]:
>>> ...
"""
def __init__(self, old2new, id2token=None):
# id2word = dict((newid, oldid2word[oldid]) for oldid, newid in old2new.iteritems())
self.old2new = old2new
self.id2token = id2token
def __getitem__(self, bow):
"""
Return representation with the ids transformed.
"""
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
return sorted((self.old2new[oldid], weight) for oldid, weight in bow if oldid in self.old2new)
#endclass VocabTransform
| krishna11888/ai | third_party/gensim/gensim/models/__init__.py | Python | gpl-2.0 | 1,920 | 0.004688 |
#!/usr/bin/env python
import numpy as np
import pandas as pd
def build_allele_dict():
""" Take a sheet and build a dictionary with:
[gene][allele] = count
"""
fname = '/home/jfear/mclab/cegs_sem_sd_paper/from_matt/DSRP_and_CEGS_haps_1-6-15.xlsx'
data = pd.ExcelFile(fname)
dspr = data.parse('DSRP_haps')
f1 = data.parse('CEGS_haps')
data
| McIntyre-Lab/papers | fear_sem_sd_2015/scripts/haplotype_freqs.py | Python | lgpl-3.0 | 358 | 0.005587 |
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name = 'django_sprinkler',
version = '0.4',
packages = ["django_sprinkler", ],
include_package_data = True,
license = 'BSD License',
description = 'Home Automation Python Project Django app meant to control watering',
long_description = README,
url = 'http://blog.digitalhigh.es',
author = 'Javier Pardo Blasco(jpardobl)',
author_email = 'jpardo@digitalhigh.es',
extras_require = {
"json": "simplejson"
},
install_requires = (
"Django==1.5",
"simplejson==2.6.2",
"pyparsing",
"hautomation_restclient",
"astral",
"pytz",
),
# test_suite='test_project.tests.runtests',
# tests_require=("selenium", "requests"),
classifiers = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| jpardobl/django_sprinkler | setup.py | Python | bsd-3-clause | 1,456 | 0.021291 |
# coding: utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import json
china = json.loads(open('china.json', 'r').read()) # slow
new_provs = []
new_citys = []
for prov in china['children']:
new_provs.append(prov['name'])
for city in prov['children']:
if city['name'] not in [u'市辖区', u'县', u'省直辖县级行政区划']:
if city['name'][-1] == '市':
new_citys.append(city['name'][:-1])
else:
new_citys.append(city['name'])
print new_citys
with open('citys.json', 'w') as f:
f.write(json.dumps(new_citys, ensure_ascii=False, indent=4)) | phyng/phyip | geodata/provinces_script.py | Python | mit | 637 | 0.004926 |
#!/usr/bin/python
import numpy as np
import os
import sys
from keras.layers import Activation, Dense, Input
from keras.layers.normalization import BatchNormalization
from keras.models import Model, Sequential
from keras.optimizers import RMSprop
NUM_OF_HIDDEN_NEURONS = 100
QNETWORK_NAME = 'online_network'
TARGETNET_NAME = 'target_network'
TAU = 0.0001 # soft update / low pass filter
class QNetworks:
def __init__(self, num_of_actions, num_of_states, num_of_hidden_neurons=NUM_OF_HIDDEN_NEURONS, tau=TAU):
self.NUM_OF_ACTIONS = num_of_actions
self.NUM_OF_HIDDEN_NEURONS = num_of_hidden_neurons
self.NUM_OF_STATES = num_of_states
self.TAU = tau
self.online_net = self.init_model(QNETWORK_NAME)
self.target_net = self.init_model(QNETWORK_NAME)
def do_soft_update(self):
weights = self.online_net.get_weights()
target_weights = self.target_net.get_weights()
for i in xrange(len(weights)):
target_weights[i] = self.TAU*weights[i] + (1.0-self.TAU)*target_weights[i]
self.target_net.set_weights(target_weights)
return
def do_hard_update(self):
weights = self.online_net.get_weights()
target_weights = self.target_net.get_weights()
for i in xrange(len(weights)):
target_weights[i] = weights[i]
self.target_net.set_weights(target_weights)
return
def get_weights(self):
# get weights of the online Q network
return self.online_net.get_weights()
def init_model(self, net_name):
model = Sequential()
model.add(Dense(self.NUM_OF_HIDDEN_NEURONS, input_shape=(self.NUM_OF_STATES,)))
model.add(Activation('relu'))
model.add(Dense(self.NUM_OF_HIDDEN_NEURONS))
model.add(Activation('relu'))
model.add(Dense(self.NUM_OF_HIDDEN_NEURONS))
model.add(Activation('relu'))
model.add(Dense(self.NUM_OF_ACTIONS))
model.add(Activation('linear'))
model.compile(loss='mse', optimizer='rmsprop')
filename = net_name+'/'+net_name
if os.path.isfile(filename+str(0)+'.txt'):
weights = model.get_weights()
for i in xrange(len(weights)):
loaded_weights = np.loadtxt(filename+str(i)+'.txt')
weights[i] = loaded_weights
model.set_weights(weights)
else:
print 'No model', filename, 'found. Creating a new model.'
return model
def save_models(self):
weights = self.online_net.get_weights()
for i in xrange(len(weights)):
np.savetxt(QNETWORK_NAME+'/'+QNETWORK_NAME+str(i)+'.txt', weights[i])
weights = self.target_net.get_weights()
for i in xrange(len(weights)):
np.savetxt(TARGETNET_NAME+'/'+TARGETNET_NAME+str(i)+'.txt', weights[i])
print("Saved models to disk.") | 356255531/SpikingDeepRLControl | code/EnvBo/Q-Learning/Testing_Arm_4points/q_networks.py | Python | gpl-3.0 | 3,008 | 0.002992 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import lxml.html
import re
import urllib2
from urlparse import urlparse, urlunparse
from django.core.urlresolvers import reverse
from desktop.lib.view_util import format_duration_in_millis
from desktop.lib import i18n
from django.utils.html import escape
from filebrowser.views import location_to_url
from hadoop import job_tracker
from hadoop import confparse
from hadoop.api.jobtracker.ttypes import JobNotFoundException
import hadoop.api.jobtracker.ttypes as ttypes
from desktop.lib.exceptions_renderable import PopupException
from django.utils.translation import ugettext as _
LOGGER = logging.getLogger(__name__)
def can_view_job(username, job):
acl = get_acls(job).get('mapreduce.job.acl-view-job', '')
return acl == '*' or username in acl.split(',')
def can_modify_job(username, job):
acl = get_acls(job).get('mapreduce.job.acl-modify-job', '')
return acl == '*' or username in acl.split(',')
def get_acls(job):
if job.is_mr2:
return job.acls
else:
return job.full_job_conf
class JobLinkage(object):
"""
A thin representation of a job, without much of the details.
Its purpose is to wrap a JobID to allow us to get further
information from Hadoop, without instantiating a full Job object
(which requires talking to Hadoop).
"""
def __init__(self, jobtracker, jobid):
"""
JobLinkage(jobtracker, jobid) -> JobLinkage
The jobid is the jobid string (not the thrift jobid)
"""
self._jobtracker = jobtracker
self.jobId = jobid
self.jobId_short = "_".join(jobid.split("_")[-2:])
self.is_mr2 = False
def get_task(self, task_id):
"""Retrieve a TaskInProgress from hadoop."""
ttask = self._jobtracker.get_task(
self._jobtracker.thriftjobid_from_string(self.jobId),
self._jobtracker.thrifttaskid_from_string(task_id))
return Task(ttask, self._jobtracker)
class Job(JobLinkage):
"""
Creates a Job instance pulled from the job tracker Thrift interface.
"""
def __getitem__(self, item):
"""
For backwards-compatibility, resolve job["foo"] as job.foo
"""
return getattr(self, item)
@staticmethod
def from_id(jt, jobid, is_finished=False):
"""
Returns a Job instance given a job tracker interface and an id. The job tracker interface is typically
located in request.jt.
"""
try:
thriftjob = jt.get_job(jt.thriftjobid_from_string(jobid))
except JobNotFoundException:
try:
thriftjob = jt.get_retired_job(jt.thriftjobid_from_string(jobid))
except JobNotFoundException, e:
raise PopupException(_("Could not find job with id %(jobid)s.") % {'jobid': jobid}, detail=e)
return Job(jt, thriftjob)
@staticmethod
def from_thriftjob(jt, thriftjob):
"""
Returns a Job instance given a job tracker interface and a thriftjob object returned from that job tracker interface.
The job tracker interface is typically located in request.jt
"""
return Job(jt, thriftjob)
def __init__(self, jt, thriftJob):
"""
Returns a Job instance given a job tracker interface and a thriftjob object returned from that
job tracker interface. The job tracker interface is typically located in request.jt
"""
JobLinkage.__init__(self, jt, thriftJob.jobID.asString)
self.jt = jt
self.job = thriftJob
self.tasks = []
if self.job.tasks is not None:
self.tasks = TaskList.from_thriftTaskList(self.job.tasks, jt)
self.task_map = dict( (task.taskId, task) for task in self.tasks )
self._counters = None
self._conf_keys = None
self._full_job_conf = None
self._init_attributes()
self.is_retired = hasattr(thriftJob, 'is_retired')
self.is_mr2 = False
self.applicationType = 'MR2'
@property
def counters(self):
if self.is_retired:
self._counters = {}
elif self._counters is None:
rollups = self.jt.get_job_counter_rollups(self.job.jobID)
# We get back a structure with counter lists for maps, reduces, and total
# and we need to invert this
def aggregate_counters(ctrs_from_jt, key, target):
for group in ctrs_from_jt.groups:
if group.name not in target:
target[group.name] = {
'name': group.name,
'displayName': group.displayName,
'counters': {}
}
agg_counters = target[group.name]['counters']
for counter in group.counters.itervalues():
if counter.name not in agg_counters:
agg_counters[counter.name] = {
'name': counter.name,
'displayName': counter.displayName,
}
agg_counters[counter.name][key] = counter.value
self._counters = {}
aggregate_counters(rollups.mapCounters, "map", self._counters)
aggregate_counters(rollups.reduceCounters, "reduce", self._counters)
aggregate_counters(rollups.jobCounters, "total", self._counters)
return self._counters
@property
def conf_keys(self):
if self._conf_keys is None:
self._initialize_conf_keys()
return self._conf_keys
@property
def full_job_conf(self):
if self._full_job_conf is None:
self._initialize_conf_keys()
return self._full_job_conf
def _init_attributes(self):
self.queueName = i18n.smart_unicode(self.job.profile.queueName)
self.jobName = i18n.smart_unicode(self.job.profile.name)
self.user = i18n.smart_unicode(self.job.profile.user)
self.mapProgress = self.job.status.mapProgress
self.reduceProgress = self.job.status.reduceProgress
self.setupProgress = self.job.status.setupProgress
self.cleanupProgress = self.job.status.cleanupProgress
if self.job.desiredMaps == 0:
maps_percent_complete = 0
else:
maps_percent_complete = int(round(float(self.job.finishedMaps) / self.job.desiredMaps * 100))
self.desiredMaps = self.job.desiredMaps
if self.job.desiredReduces == 0:
reduces_percent_complete = 0
else:
reduces_percent_complete = int(round(float(self.job.finishedReduces) / self.job.desiredReduces * 100))
self.desiredReduces = self.job.desiredReduces
self.maps_percent_complete = maps_percent_complete
self.finishedMaps = self.job.finishedMaps
self.finishedReduces = self.job.finishedReduces
self.reduces_percent_complete = reduces_percent_complete
self.startTimeMs = self.job.startTime
self.startTimeFormatted = format_unixtime_ms(self.job.startTime)
self.launchTimeMs = self.job.launchTime
self.launchTimeFormatted = format_unixtime_ms(self.job.launchTime)
self.finishTimeMs = self.job.finishTime
self.finishTimeFormatted = format_unixtime_ms(self.job.finishTime)
self.status = self.job.status.runStateAsString
self.priority = self.job.priorityAsString
self.jobFile = self.job.profile.jobFile
finishTime = self.job.finishTime
if finishTime == 0:
finishTime = datetime.datetime.now()
else:
finishTime = datetime.datetime.fromtimestamp(finishTime / 1000)
self.duration = finishTime - datetime.datetime.fromtimestamp(self.job.startTime / 1000)
diff = int(finishTime.strftime("%s")) * 1000 - self.startTimeMs
self.durationFormatted = format_duration_in_millis(diff)
self.durationInMillis = diff
def kill(self):
self.jt.kill_job(self.job.jobID)
def get_task(self, id):
try:
return self.task_map[id]
except:
return JobLinkage.get_task(self, id)
def filter_tasks(self, task_types=None, task_states=None, task_text=None):
"""
Filters the tasks of the job.
Pass in task_type and task_state as sets; None for "all".
task_text is used to search in the state, mostRecentState, and the ID.
"""
assert task_types is None or job_tracker.VALID_TASK_TYPES.issuperset(task_types)
assert task_states is None or job_tracker.VALID_TASK_STATES.issuperset(task_states)
def is_good_match(t):
if task_types is not None:
if t.task.taskID.taskTypeAsString.lower() not in task_types:
return False
if task_states is not None:
if t.state.lower() not in task_states:
return False
if task_text is not None:
tt_lower = task_text.lower()
if tt_lower not in t.state.lower() and tt_lower not in t.mostRecentState.lower() and tt_lower not in t.task.taskID.asString.lower():
return False
return True
return [ t for t in self.tasks if is_good_match(t) ]
def _initialize_conf_keys(self):
if self.is_retired:
self._conf_keys = {}
self._full_job_conf = {}
else:
conf_keys = [
'mapred.mapper.class',
'mapred.reducer.class',
'mapred.input.format.class',
'mapred.output.format.class',
'mapred.input.dir',
'mapred.output.dir',
]
jobconf = get_jobconf(self.jt, self.jobId)
self._full_job_conf = jobconf
self._conf_keys = {}
for k, v in jobconf.iteritems():
if k in conf_keys:
self._conf_keys[dots_to_camel_case(k)] = v
class TaskList(object):
@staticmethod
def select(jt, jobid, task_types, task_states, text, count, offset):
"""
select(jt, jobid, task_types, task_states, text, count, offset) -> TaskList
Retrieve a TaskList from Hadoop according to the given criteria.
task_types is a set of job_tracker.VALID_TASK_TYPES. A value to None means everything.
task_states is a set of job_tracker.VALID_TASK_STATES. A value to None means everything.
"""
assert task_types is None or job_tracker.VALID_TASK_TYPES.issuperset(task_types)
assert task_states is None or job_tracker.VALID_TASK_STATES.issuperset(task_states)
if task_types is None:
task_types = job_tracker.VALID_TASK_TYPES
if task_states is None:
task_states = job_tracker.VALID_TASK_STATES
tjobid = jt.thriftjobid_from_string(jobid)
thrift_list = jt.get_task_list(tjobid, task_types, task_states, text, count, offset)
return TaskList.from_thriftTaskList(thrift_list, jt)
@staticmethod
def from_thriftTaskList(thrift_task_list, jobtracker):
"""TaskList.from_thriftTaskList(thrift_task_list, jobtracker) -> TaskList
"""
if thrift_task_list is None:
return None
return TaskList(thrift_task_list, jobtracker)
def __init__(self, tasklist, jobtracker):
self.__tasklist = tasklist # The thrift task list
self.__jt = jobtracker
self.__init_attributes()
def __init_attributes(self):
self.__tasksSoFar = [ Task(t, self.__jt) for t in self.__tasklist.tasks ]
self.__nTotalTasks = self.__tasklist.numTotalTasks
def __iter__(self):
return self.__tasksSoFar.__iter__()
def __len__(self):
return len(self.__tasksSoFar)
def __getitem__(self, key):
return self.__tasksSoFar[key]
@property
def tasks(self):
return self.__tasksSoFar
@property
def numTotalTasks(self):
return self.__nTotalTasks
class Task(object):
def __getitem__(self, item):
"""
For backwards-compatibility, resolve job["foo"] as job.foo
"""
return getattr(self, item)
def __init__(self, task, jt):
self.task = task
self.jt = jt
self._init_attributes()
self.attempt_map = {}
for id, attempt in self.task.taskStatuses.iteritems():
ta = TaskAttempt(attempt, task=self)
self.attempt_map[id] = ta
@property
def attempts(self):
return self.attempt_map.values()
def _init_attributes(self):
self.taskType = self.task.taskID.taskTypeAsString
self.taskId = self.task.taskID.asString
self.taskId_short = "_".join(self.taskId.split("_")[-2:])
self.startTimeMs = self.task.startTime
self.startTimeFormatted = format_unixtime_ms(self.task.startTime)
self.execStartTimeMs = self.task.execStartTime
self.execStartTimeFormatted = format_unixtime_ms(self.task.execStartTime)
self.execFinishTimeMs = self.task.execFinishTime
self.execFinishTimeFormatted = format_unixtime_ms(self.task.execFinishTime)
self.state = self.task.state
assert self.state in job_tracker.VALID_TASK_STATES
self.progress = self.task.progress
self.taskId = self.task.taskID.asString
self.jobId = self.task.taskID.jobID.asString
self.taskAttemptIds = self.task.taskStatuses.keys()
self.mostRecentState = self.task.mostRecentState
self.diagnosticMap = self.task.taskDiagnosticData
self.counters = self.task.counters
self.failed = self.task.failed
self.complete = self.task.complete
self.is_mr2 = False
def get_attempt(self, id):
"""
Returns a TaskAttempt for a given id.
"""
return self.attempt_map[id]
class TaskAttempt(object):
def __getitem__(self, item):
"""
For backwards-compatibility, resolve task["foo"] as task.foo.
"""
return getattr(self, item)
def __init__(self, task_attempt, task):
assert task_attempt is not None
self.task_attempt = task_attempt
self.task = task
self._init_attributes();
def _init_attributes(self):
self.taskType = self.task_attempt.taskID.taskID.taskTypeAsString
self.attemptId = self.task_attempt.taskID.asString
self.attemptId_short = "_".join(self.attemptId.split("_")[-2:])
self.startTimeMs = self.task_attempt.startTime
self.startTimeFormatted = format_unixtime_ms(self.task_attempt.startTime)
self.finishTimeMs = self.task_attempt.finishTime
self.finishTimeFormatted = format_unixtime_ms(self.task_attempt.finishTime)
self.state = self.task_attempt.stateAsString.lower()
self.taskTrackerId = self.task_attempt.taskTracker
self.phase = self.task_attempt.phaseAsString
self.progress = self.task_attempt.progress
self.outputSize = self.task_attempt.outputSize
self.shuffleFinishTimeMs = self.task_attempt.shuffleFinishTime
self.shuffleFinishTimeFormatted = format_unixtime_ms(self.task_attempt.shuffleFinishTime)
self.sortFinishTimeMs = self.task_attempt.sortFinishTime
self.sortFinishTimeFormatted = format_unixtime_ms(self.task_attempt.sortFinishTime)
self.mapFinishTimeMs = self.task_attempt.mapFinishTime # DO NOT USE, NOT VALID IN 0.20
self.mapFinishTimeFormatted = format_unixtime_ms(self.task_attempt.mapFinishTime)
self.counters = self.task_attempt.counters
self.is_mr2 = False
def get_tracker(self):
try:
tracker = Tracker.from_name(self.task.jt, self.taskTrackerId)
return tracker
except ttypes.TaskTrackerNotFoundException, e:
LOGGER.warn("Tracker %s not found: %s" % (self.taskTrackerId, e))
if LOGGER.isEnabledFor(logging.DEBUG):
all_trackers = self.task.jt.all_task_trackers()
for t in all_trackers.trackers:
LOGGER.debug("Available tracker: %s" % (t.trackerName,))
raise ttypes.TaskTrackerNotFoundException(
_("Cannot look up TaskTracker %(id)s.") % {'id': self.taskTrackerId})
def get_task_log(self):
"""
get_task_log(task_id) -> (stdout_text, stderr_text, syslog_text)
Retrieve the task log from the TaskTracker, at this url:
http://<tracker_host>:<port>/tasklog?taskid=<attempt_id>
Optional query string:
&filter=<source> : where <source> is 'syslog', 'stdout', or 'stderr'.
&start=<offset> : specify the start offset of the log section, when using a filter.
&end=<offset> : specify the end offset of the log section, when using a filter.
"""
tracker = self.get_tracker()
url = urlunparse(('http',
'%s:%s' % (tracker.host, tracker.httpPort),
'tasklog',
None,
'attemptid=%s' % (self.attemptId,),
None))
LOGGER.info('Retrieving %s' % (url,))
try:
data = urllib2.urlopen(url)
except urllib2.URLError:
raise urllib2.URLError(_("Cannot retrieve logs from TaskTracker %(id)s.") % {'id': self.taskTrackerId})
et = lxml.html.parse(data)
log_sections = et.findall('body/pre')
logs = [section.text or '' for section in log_sections]
if len(logs) < 3:
LOGGER.warn('Error parsing task attempt log for %s at "%s". Found %d (not 3) log sections' %
(self.attemptId, url, len(log_sections)))
err = _("Hue encountered an error while retrieving logs from '%s'.") % (url,)
logs += [err] * (3 - len(logs))
return logs
class Tracker(object):
def __getitem__(self, item):
"""
For backwards-compatibility, resolve job["foo"] as job.foo.
"""
return getattr(self, item)
@staticmethod
def from_name(jt, trackername):
return Tracker(jt.task_tracker(trackername))
def __init__(self, thrifttracker):
self.tracker = thrifttracker
self._init_attributes();
def _init_attributes(self):
self.trackerId = self.tracker.trackerName
self.httpPort = self.tracker.httpPort
self.host = self.tracker.host
self.lastSeenMs = self.tracker.lastSeen
self.lastSeenFormatted = format_unixtime_ms(self.tracker.lastSeen)
self.totalVirtualMemory = self.tracker.totalVirtualMemory
self.totalPhysicalMemory = self.tracker.totalPhysicalMemory
self.availableSpace = self.tracker.availableSpace
self.failureCount = self.tracker.failureCount
self.mapCount = self.tracker.mapCount
self.reduceCount = self.tracker.reduceCount
self.maxMapTasks = self.tracker.maxMapTasks
self.maxReduceTasks = self.tracker.maxReduceTasks
self.taskReports = self.tracker.taskReports
self.is_mr2 = False
class Cluster(object):
def __getitem__(self, item):
"""
For backwards-compatibility, resolve job["foo"] as job.foo
"""
return getattr(self, item)
def __init__(self, jt):
self.status = jt.cluster_status()
self._init_attributes();
def _init_attributes(self):
self.mapTasksInProgress = self.status.mapTasks
self.reduceTasksInProgress = self.status.reduceTasks
self.maxMapTasks = self.status.maxMapTasks
self.maxReduceTasks = self.status.maxReduceTasks
self.usedHeapMemory = self.status.usedMemory
self.maxHeapMemory = self.status.maxMemory
self.clusterStartTimeMs = self.status.startTime
self.clusterStartTimeFormatted = format_unixtime_ms(self.status.startTime)
self.identifier = self.status.identifier
self.taskTrackerExpiryInterval = self.status.taskTrackerExpiryInterval
self.totalJobSubmissions = self.status.totalSubmissions
self.state = self.status.stateAsString
self.numActiveTrackers = self.status.numActiveTrackers
self.activeTrackerNames = self.status.activeTrackerNames
self.numBlackListedTrackers = self.status.numBlacklistedTrackers
self.blacklistedTrackerNames = self.status.blacklistedTrackerNames
self.hostname = self.status.hostname
self.httpPort = self.status.httpPort
class LinkJobLogs(object):
@classmethod
def _make_hdfs_links(cls, log):
escaped_logs = escape(log)
return re.sub('((?<= |;)/|hdfs://)[^ <&\t;,\n]+', LinkJobLogs._replace_hdfs_link, escaped_logs)
@classmethod
def _make_mr_links(cls, log):
escaped_logs = escape(log)
return re.sub('(job_[0-9_]+(/|\.)?)', LinkJobLogs._replace_mr_link, escaped_logs)
@classmethod
def _make_links(cls, log):
escaped_logs = escape(log)
hdfs_links = re.sub('((?<= |;)/|hdfs://)[^ <&\t;,\n]+', LinkJobLogs._replace_hdfs_link, escaped_logs)
return re.sub('(job_[0-9_]+(/|\.)?)', LinkJobLogs._replace_mr_link, hdfs_links)
@classmethod
def _replace_hdfs_link(self, match):
try:
return '<a href="%s" target="_blank">%s</a>' % (location_to_url(match.group(0), strict=False), match.group(0))
except:
return match.group(0)
@classmethod
def _replace_mr_link(self, match):
try:
return '<a href="%s" target="_blank">%s</a>' % (reverse('jobbrowser.views.single_job', kwargs={'job': match.group(0)}), match.group(0))
except:
return match.group(0)
def get_jobconf(jt, jobid):
"""
Returns a dict representation of the jobconf for the job corresponding
to jobid. filter_keys is an optional list of configuration keys to filter on.
"""
jid = jt.thriftjobid_from_string(jobid)
# This will throw if the the jobconf can't be found
xml_data = jt.get_job_xml(jid)
return confparse.ConfParse(xml_data)
def format_unixtime_ms(unixtime):
"""
Format a unix timestamp in ms to a human readable string
"""
if unixtime:
return str(datetime.datetime.fromtimestamp(unixtime/1000).strftime("%x %X %Z"))
else:
return ""
DOTS = re.compile("\.([a-z])")
def dots_to_camel_case(dots):
"""
Takes a string delimited with periods and returns a camel-case string.
Example: dots_to_camel_case("foo.bar.baz") //returns fooBarBaz
"""
def return_upper(match):
return match.groups()[0].upper()
return str(DOTS.sub(return_upper, dots))
def get_path(hdfs_url):
"""
Returns the path component of an HDFS url.
"""
# urlparse is lame, and only "uses_netloc" for a certain
# set of protocols. So we replace hdfs with gopher:
if hdfs_url.startswith("hdfs://"):
gopher_url = "gopher://" + hdfs_url[7:]
path = urlparse(gopher_url)[2] # path
return path
else:
return hdfs_url
| kalahbrown/HueBigSQL | apps/jobbrowser/src/jobbrowser/models.py | Python | apache-2.0 | 22,026 | 0.009262 |
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from mounts import Mounts
import datetime
import threading
import os
import time
import sys
import signal
import traceback
import threading
def thread_for_binary(self,args):
self.logger.log("Thread for binary is called",True)
time.sleep(5)
self.logger.log("Waited in thread for 5 seconds",True)
self.child = subprocess.Popen(args,stdout=subprocess.PIPE)
self.logger.log("Binary subprocess Created",True)
class FreezeError(object):
def __init__(self):
self.errorcode = None
self.fstype = None
self.path = None
def __str__(self):
return "errorcode:" + str(self.errorcode) + " fstype:" + str(self.fstype) + " path" + str(self.path)
class FreezeResult(object):
def __init__(self):
self.errors = []
def __str__(self):
error_str = ""
for error in self.errors:
error_str+=(str(error)) + "\n"
return error_str
class FreezeHandler(object):
def __init__(self,logger):
# sig_handle valid values(0:nothing done,1: freezed successfully, 2:freeze failed)
self.sig_handle = 0
self.child= None
self.logger=logger
def sigusr1_handler(self,signal,frame):
self.logger.log('freezed',False)
self.sig_handle=1
def sigchld_handler(self,signal,frame):
self.logger.log('some child process terminated')
if(self.child is not None and self.child.poll() is not None):
self.logger.log("binary child terminated",True)
self.sig_handle=2
def startproc(self,args):
binary_thread = threading.Thread(target=thread_for_binary, args=[self, args])
binary_thread.start()
for i in range(0,33):
if(self.sig_handle==0):
self.logger.log("inside while with sig_handle "+str(self.sig_handle))
time.sleep(2)
else:
break
self.logger.log("Binary output for signal handled: "+str(self.sig_handle))
return self.sig_handle
def signal_receiver(self):
signal.signal(signal.SIGUSR1,self.sigusr1_handler)
signal.signal(signal.SIGCHLD,self.sigchld_handler)
class FsFreezer:
def __init__(self, patching, logger):
"""
"""
self.patching = patching
self.logger = logger
try:
self.mounts = Mounts(patching = self.patching, logger = self.logger)
except Exception as e:
errMsg='Failed to retrieve mount points, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())
self.logger.log(errMsg,True,'Warning')
self.logger.log(str(e), True)
self.mounts = None
self.frozen_items = set()
self.unfrozen_items = set()
self.freeze_handler = FreezeHandler(self.logger)
def should_skip(self, mount):
if((mount.fstype == 'ext3' or mount.fstype == 'ext4' or mount.fstype == 'xfs' or mount.fstype == 'btrfs') and mount.type != 'loop'):
return False
else:
return True
def freeze_safe(self,timeout):
self.root_seen = False
error_msg=''
try:
freeze_result = FreezeResult()
freezebin=os.path.join(os.getcwd(),os.path.dirname(__file__),"safefreeze/bin/safefreeze")
args=[freezebin,str(timeout)]
arg=[]
for mount in self.mounts.mounts:
self.logger.log("fsfreeze mount :" + str(mount.mount_point), True)
if(mount.mount_point == '/'):
self.root_seen = True
self.root_mount = mount
elif(mount.mount_point and not self.should_skip(mount)):
args.append(str(mount.mount_point))
if(self.root_seen):
args.append('/')
self.logger.log("arg : " + str(args),True)
self.freeze_handler.signal_receiver()
self.logger.log("proceeded for accepting signals", True)
self.logger.enforce_local_flag(False)
sig_handle=self.freeze_handler.startproc(args)
if(sig_handle != 1):
if (self.freeze_handler.child is not None):
while True:
line=self.freeze_handler.child.stdout.readline()
if sys.version_info > (3,):
line = str(line,encoding='utf-8', errors="backslashreplace")
else:
line = str(line)
if(line != ''):
self.logger.log(line.rstrip(), True)
else:
break
error_msg="freeze failed for some mount"
freeze_result.errors.append(error_msg)
self.logger.log(error_msg, True, 'Error')
except Exception as e:
self.logger.enforce_local_flag(True)
error_msg='freeze failed for some mount with exception, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())
freeze_result.errors.append(error_msg)
self.logger.log(error_msg, True, 'Error')
return freeze_result
def thaw_safe(self):
thaw_result = FreezeResult()
unable_to_sleep = False
if(self.freeze_handler.child is None):
self.logger.log("child already completed", True)
error_msg = 'snapshot result inconsistent'
thaw_result.errors.append(error_msg)
elif(self.freeze_handler.child.poll() is None):
self.logger.log("child process still running")
self.freeze_handler.child.send_signal(signal.SIGUSR1)
for i in range(0,30):
if(self.freeze_handler.child.poll() is None):
self.logger.log("child still running sigusr1 sent")
time.sleep(1)
else:
break
self.logger.enforce_local_flag(True)
self.logger.log("Binary output after process end: ", True)
while True:
line=self.freeze_handler.child.stdout.readline()
if sys.version_info > (3,):
line = str(line, encoding='utf-8', errors="backslashreplace")
else:
line = str(line)
if(line != ''):
self.logger.log(line.rstrip(), True)
else:
break
if(self.freeze_handler.child.returncode!=0):
error_msg = 'snapshot result inconsistent as child returns with failure'
thaw_result.errors.append(error_msg)
self.logger.log(error_msg, True, 'Error')
else:
self.logger.log("Binary output after process end when no thaw sent: ", True)
if(self.freeze_handler.child.returncode==2):
error_msg = 'Unable to execute sleep'
thaw_result.errors.append(error_msg)
unable_to_sleep = True
else:
error_msg = 'snapshot result inconsistent'
thaw_result.errors.append(error_msg)
self.logger.enforce_local_flag(True)
while True:
line=self.freeze_handler.child.stdout.readline()
if sys.version_info > (3,):
line = str(line, encoding='utf-8', errors="backslashreplace")
else:
line = str(line)
if(line != ''):
self.logger.log(line.rstrip(), True)
else:
break
self.logger.log(error_msg, True, 'Error')
self.logger.enforce_local_flag(True)
return thaw_result, unable_to_sleep
| soumyanishan/azure-linux-extensions | VMBackup/main/fsfreezer.py | Python | apache-2.0 | 8,407 | 0.00904 |
import xbmcaddon
MainBase = 'http://164.132.106.213/data/home/home.txt'
addon = xbmcaddon.Addon('plugin.video.sneek') | gypogypo/plugin.video.sneek | _Edit.py | Python | gpl-3.0 | 121 | 0.008264 |
### Simple IAN model for use with Neural Photo Editor
# This model is a simplified version of the Introspective Adversarial Network that does not
# make use of Multiscale Dilated Convolutional blocks, Ternary Adversarial Loss, or an
# autoregressive RGB-Beta layer. It's designed to be sleeker and to run on laptop GPUs with <1GB of memory.
from math import sqrt
import os
import sys
import numpy as np
import lasagne.layers
from lasagne.layers import batch_norm as BN
from lasagne.layers import ConcatLayer as CL
from lasagne.layers import DenseLayer as DL
from lasagne.layers import ElemwiseSumLayer as ESL
from lasagne.layers import NonlinearityLayer as NL
from lasagne.layers import SliceLayer as SL
from lasagne.layers import TransposedConv2DLayer as TC2D
from lasagne.init import Normal as initmethod
from lasagne.nonlinearities import elu
from lasagne.nonlinearities import rectify as relu
from lasagne.nonlinearities import LeakyRectify as lrelu
from theano import tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from gan.util.layers import GaussianSampleLayer,MinibatchLayer
CFG = {
'batch_size': 128,
'learning_rate': {
0: 0.0002,
},
'optimizer': 'Adam',
'beta1': 0.5,
'update_ratio': 1,
'decay_rate': 0,
'reg': 1e-5,
'momentum': 0.9,
'shuffle': True,
'dims': (64,64),
'n_channels': 3,
'batches_per_chunk': 64,
'max_epochs': 250,
'checkpoint_every_nth': 1,
'num_latents': 100,
'recon_weight': 3.0,
'feature_weight': 1.0,
'dg_weight': 1.0,
'dd_weight': 1.0,
'agr_weight': 1.0,
'ags_weight': 1.0,
'n_shuffles': 1,
'ortho': 1e-3,
}
def get_model(interp=False, dnn=True):
if dnn:
import lasagne.layers.dnn
from lasagne.layers.dnn import Conv2DDNNLayer as C2D
from theano.sandbox.cuda.basic_ops import (
as_cuda_ndarray_variable,
host_from_gpu,
gpu_contiguous,
HostFromGpu,
gpu_alloc_empty,
)
from theano.sandbox.cuda.dnn import (
GpuDnnConvDesc,
GpuDnnConv,
GpuDnnConvGradI,
dnn_conv,
dnn_pool,
)
from gan.util.layers import DeconvLayer
else:
from lasagne.layers import Conv2DLayer as C2D
dims, n_channels = tuple(CFG['dims']), CFG['n_channels']
shape = (None, n_channels)+dims
l_in = lasagne.layers.InputLayer(shape=shape)
l_enc_conv1 = C2D(
incoming = l_in,
num_filters = 128,
filter_size = [5,5],
stride = [2,2],
pad = (2,2),
W = initmethod(0.02),
nonlinearity = lrelu(0.2),
flip_filters=False,
name = 'enc_conv1'
)
l_enc_conv2 = BN(C2D(
incoming = l_enc_conv1,
num_filters = 256,
filter_size = [5,5],
stride = [2,2],
pad = (2,2),
W = initmethod(0.02),
nonlinearity = lrelu(0.2),
flip_filters=False,
name = 'enc_conv2'
),name = 'bnorm2')
l_enc_conv3 = BN(C2D(
incoming = l_enc_conv2,
num_filters = 512,
filter_size = [5,5],
stride = [2,2],
pad = (2,2),
W = initmethod(0.02),
nonlinearity = lrelu(0.2),
flip_filters=False,
name = 'enc_conv3'
),name = 'bnorm3')
l_enc_conv4 = BN(C2D(
incoming = l_enc_conv3,
num_filters = 1024,
filter_size = [5,5],
stride = [2,2],
pad = (2,2),
W = initmethod(0.02),
nonlinearity = lrelu(0.2),
flip_filters=False,
name = 'enc_conv4'
),name = 'bnorm4')
l_enc_fc1 = BN(DL(
incoming = l_enc_conv4,
num_units = 1000,
W = initmethod(0.02),
nonlinearity = elu,
name = 'enc_fc1'
),
name = 'bnorm_enc_fc1')
l_enc_mu,l_enc_logsigma = [BN(DL(incoming = l_enc_fc1,num_units=CFG['num_latents'],nonlinearity = None,name='enc_mu'),name='mu_bnorm'),
BN(DL(incoming = l_enc_fc1,num_units=CFG['num_latents'],nonlinearity = None,name='enc_logsigma'),name='ls_bnorm')]
l_Z = GaussianSampleLayer(l_enc_mu, l_enc_logsigma, name='l_Z')
l_dec_fc2 = BN(DL(
incoming = l_Z,
num_units = 1024*16,
nonlinearity = relu,
W=initmethod(0.02),
name='l_dec_fc2'),
name = 'bnorm_dec_fc2')
l_unflatten = lasagne.layers.ReshapeLayer(
incoming = l_dec_fc2,
shape = ([0],1024,4,4),
)
if dnn:
l_dec_conv1 = BN(DeconvLayer(
incoming = l_unflatten,
num_filters = 512,
filter_size = [5,5],
stride = [2,2],
crop = (2,2),
W = initmethod(0.02),
nonlinearity = relu,
name = 'dec_conv1'
),name = 'bnorm_dc1')
l_dec_conv2 = BN(DeconvLayer(
incoming = l_dec_conv1,
num_filters = 256,
filter_size = [5,5],
stride = [2,2],
crop = (2,2),
W = initmethod(0.02),
nonlinearity = relu,
name = 'dec_conv2'
),name = 'bnorm_dc2')
l_dec_conv3 = BN(DeconvLayer(
incoming = l_dec_conv2,
num_filters = 128,
filter_size = [5,5],
stride = [2,2],
crop = (2,2),
W = initmethod(0.02),
nonlinearity = relu,
name = 'dec_conv3'
),name = 'bnorm_dc3')
l_out = DeconvLayer(
incoming = l_dec_conv3,
num_filters = 3,
filter_size = [5,5],
stride = [2,2],
crop = (2,2),
W = initmethod(0.02),
b = None,
nonlinearity = lasagne.nonlinearities.tanh,
name = 'dec_out'
)
else:
l_dec_conv1 = SL(SL(BN(TC2D(
incoming = l_unflatten,
num_filters = 512,
filter_size = [5,5],
stride = [2,2],
crop = (1,1),
W = initmethod(0.02),
nonlinearity = relu,
name = 'dec_conv1'
),name = 'bnorm_dc1'),indices=slice(1,None),axis=2),indices=slice(1,None),axis=3)
l_dec_conv2 = SL(SL(BN(TC2D(
incoming = l_dec_conv1,
num_filters = 256,
filter_size = [5,5],
stride = [2,2],
crop = (1,1),
W = initmethod(0.02),
nonlinearity = relu,
name = 'dec_conv2'
),name = 'bnorm_dc2'),indices=slice(1,None),axis=2),indices=slice(1,None),axis=3)
l_dec_conv3 = SL(SL(BN(TC2D(
incoming = l_dec_conv2,
num_filters = 128,
filter_size = [5,5],
stride = [2,2],
crop = (1,1),
W = initmethod(0.02),
nonlinearity = relu,
name = 'dec_conv3'
),name = 'bnorm_dc3'),indices=slice(1,None),axis=2),indices=slice(1,None),axis=3)
l_out = SL(SL(TC2D(
incoming = l_dec_conv3,
num_filters = 3,
filter_size = [5,5],
stride = [2,2],
crop = (1,1),
W = initmethod(0.02),
b = None,
nonlinearity = lasagne.nonlinearities.tanh,
name = 'dec_out'
),indices=slice(1,None),axis=2),indices=slice(1,None),axis=3)
# l_in,num_filters=1,filter_size=[5,5],stride=[2,2],crop=[1,1],W=dc.W,b=None,nonlinearity=None)
minibatch_discrim = MinibatchLayer(lasagne.layers.GlobalPoolLayer(l_enc_conv4), num_kernels=500,name='minibatch_discrim')
l_discrim = DL(incoming = minibatch_discrim,
num_units = 1,
nonlinearity = lasagne.nonlinearities.sigmoid,
b = None,
W=initmethod(),
name = 'discrimi')
return {'l_in': l_in,
'l_out': l_out,
'l_mu': l_enc_mu,
'l_ls': l_enc_logsigma,
'l_Z': l_Z,
'l_introspect': [l_enc_conv1, l_enc_conv2,l_enc_conv3,l_enc_conv4],
'l_discrim': l_discrim}
| spellrun/Neural-Photo-Editor | gan/models/ian_simple.py | Python | mit | 8,119 | 0.045695 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
# pylint: disable=redefined-builtin
# pylint: disable=invalid-name
import unittest
from citest.base import (
ExecutionContext,
JsonSnapshotHelper)
from citest.json_predicate.path_predicate_helpers import PathEqPredicate
import citest.json_predicate as jp
_LETTER_DICT = {'a':'A', 'b':'B', 'z':'Z'}
_NUMBER_DICT = {'a':1, 'b':2, 'three':3}
_MIXED_DICT = {'a':'A', 'b':2, 'x':'X'}
_COMPOSITE_DICT = {'letters': _LETTER_DICT, 'numbers': _NUMBER_DICT}
_LETTER_ARRAY = ['a', 'b', 'c']
_NUMBER_ARRAY = [1, 2, 3]
_DICT_ARRAY = [{}, _LETTER_DICT, _NUMBER_DICT, _COMPOSITE_DICT]
_MULTI_ARRAY = [_LETTER_DICT, _NUMBER_DICT, _LETTER_DICT, _NUMBER_DICT]
class JsonMapPredicateTest(unittest.TestCase):
def assertEqual(self, expect, have, msg=''):
JsonSnapshotHelper.AssertExpectedValue(expect, have, msg)
def _try_map(self, context, pred, obj, expect_ok, expect_map_result=None,
dump=False, min=1):
"""Helper function for invoking finder and asserting the result.
Args:
pred: The jp.ValuePredicate to map.
obj: The object to apply the predicate to.
expect_ok: Whether we expect apply to succeed or not.
expect_map_result: If not None, then the expected
jp.MapPredicateResult from apply().
dump: If True then print the filter_result to facilitate debugging.
"""
map_result = jp.MapPredicate(pred, min=min)(context, obj)
if dump:
print('MAP_RESULT:\n{0}\n'.format(
JsonSnapshotHelper.ValueToEncodedJson(map_result)))
if expect_map_result:
self.assertEqual(expect_map_result, map_result)
error_msg = '{expect_ok} != {ok}\n{map_result}'.format(
expect_ok=expect_ok, ok=map_result.__nonzero__(),
map_result=map_result)
self.assertEqual(expect_ok, map_result.__nonzero__(), error_msg)
def test_map_predicate_good_1(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
aA_attempt = jp.ObjectResultMapAttempt(_LETTER_DICT,
aA(context, _LETTER_DICT))
expect_result = jp.MapPredicateResult(
valid=True, pred=aA,
obj_list=[_LETTER_DICT], all_results=[aA_attempt.result],
good_map=[aA_attempt],
bad_map=[])
self._try_map(context, aA, _LETTER_DICT, True, expect_result)
def test_map_predicate_bad(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
expect_result = jp.MapPredicateResult(
valid=False, pred=aA,
obj_list=[_NUMBER_DICT], all_results=[aA(context, _NUMBER_DICT)],
bad_map=[jp.ObjectResultMapAttempt(_NUMBER_DICT,
aA(context, _NUMBER_DICT))],
good_map=[])
self._try_map(context, aA, _NUMBER_DICT, False, expect_result)
def test_map_predicate_good_and_bad_min_1(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
aa_number_attempt = jp.ObjectResultMapAttempt(_NUMBER_DICT,
aA(context, _NUMBER_DICT))
aa_letter_attempt = jp.ObjectResultMapAttempt(_LETTER_DICT,
aA(context, _LETTER_DICT))
expect_result = jp.MapPredicateResult(
valid=True, pred=aA,
obj_list=[_NUMBER_DICT, _LETTER_DICT],
all_results=[aa_number_attempt.result, aa_letter_attempt.result],
good_map=[aa_letter_attempt],
bad_map=[aa_number_attempt])
self._try_map(context, aA, [_NUMBER_DICT, _LETTER_DICT],
True, expect_result)
def test_map_predicate_good_and_bad_min_2(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
expect_result = jp.MapPredicateResult(
valid=False, pred=aA,
obj_list=[_NUMBER_DICT, _LETTER_DICT],
all_results=[aA(context, _NUMBER_DICT), aA(context, _LETTER_DICT)],
good_map=[jp.ObjectResultMapAttempt(_LETTER_DICT,
aA(context, _LETTER_DICT))],
bad_map=[jp.ObjectResultMapAttempt(_NUMBER_DICT,
aA(context, _NUMBER_DICT))])
self._try_map(
context, aA, [_NUMBER_DICT, _LETTER_DICT], False, expect_result, min=2)
def test_map_predicate_good_and_bad_min_indirect(self):
context = ExecutionContext(min=2)
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
expect_result = jp.MapPredicateResult(
valid=False, pred=aA,
obj_list=[_NUMBER_DICT, _LETTER_DICT],
all_results=[aA(context, _NUMBER_DICT), aA(context, _LETTER_DICT)],
good_map=[jp.ObjectResultMapAttempt(_LETTER_DICT,
aA(context, _LETTER_DICT))],
bad_map=[jp.ObjectResultMapAttempt(_NUMBER_DICT,
aA(context, _NUMBER_DICT))])
self._try_map(
context, aA, [_NUMBER_DICT, _LETTER_DICT], False, expect_result,
min=lambda x: x['min'])
def test_map_not_found(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
aa_composite_attempt = jp.ObjectResultMapAttempt(
_COMPOSITE_DICT, aA(context, _COMPOSITE_DICT))
expect_result = jp.MapPredicateResult(
valid=False, pred=aA,
obj_list=[_COMPOSITE_DICT], all_results=[aa_composite_attempt.result],
bad_map=[aa_composite_attempt],
good_map=[])
self._try_map(context, aA, _COMPOSITE_DICT, False, expect_result)
def test_object_filter_cases(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
self._try_map(context, aA, _LETTER_DICT, True)
self._try_map(context, aA, _COMPOSITE_DICT, False)
self._try_map(context, aA, _NUMBER_DICT, False)
self._try_map(context, aA, _MULTI_ARRAY, True)
self._try_map(context, aA, [_COMPOSITE_DICT, _COMPOSITE_DICT], False)
self._try_map(context, aA, _MIXED_DICT, True)
AandB = jp.AND([PathEqPredicate('a', 'A'),
PathEqPredicate('b', 'B')])
self._try_map(context, AandB, _LETTER_DICT, True)
self._try_map(context, AandB, _COMPOSITE_DICT, False)
self._try_map(context, AandB, _NUMBER_DICT, False)
self._try_map(context, AandB, _MULTI_ARRAY, True)
self._try_map(context, AandB, _MIXED_DICT, False)
def test_none_bad(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
self._try_map(context, aA, None, False)
def test_none_good(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
self._try_map(context, aA, None, True, min=0)
if __name__ == '__main__':
unittest.main()
| google/citest | tests/json_predicate/map_predicate_test.py | Python | apache-2.0 | 7,327 | 0.003276 |
#####################################################################################
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Unless a separate license agreement exists between you and Crossbar.io GmbH (e.g.
# you have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, then the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU Affero General Public License Version 3 for more details.
#
# You should have received a copy of the GNU Affero General Public license along
# with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>.
#
#####################################################################################
from __future__ import absolute_import, division, print_function
import warnings
import attr
from attr.validators import instance_of, optional
from bitstring import pack
from ._utils import (read_prefixed_data, read_string, build_string,
build_header, ParseFailure, SerialisationFailure)
unicode = type(u"")
@attr.s
class Failure(object):
reason = attr.ib(default=None)
@attr.s
class Disconnect(object):
def serialise(self):
"""
Assemble this into an on-wire message.
"""
return build_header(14, (False, False, False, False), 0)
@classmethod
def deserialise(cls, flags, data):
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
return cls()
@attr.s
class PingRESP(object):
def serialise(self):
"""
Assemble this into an on-wire message.
"""
return build_header(13, (False, False, False, False), 0)
@classmethod
def deserialise(cls, flags, data):
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
return cls()
@attr.s
class PingREQ(object):
def serialise(self):
"""
Assemble this into an on-wire message.
"""
return build_header(12, (False, False, False, False), 0)
@classmethod
def deserialise(cls, flags, data):
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
return cls()
@attr.s
class UnsubACK(object):
packet_identifier = attr.ib(validator=instance_of(int))
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload = self._make_payload()
header = build_header(11, (False, False, False, False), len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
# Session identifier
b.append(pack('uint:16', self.packet_identifier).bytes)
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
packet_identifier = data.read('uint:16')
return cls(packet_identifier=packet_identifier)
@attr.s
class Unsubscribe(object):
packet_identifier = attr.ib(validator=instance_of(int))
topics = attr.ib(validator=instance_of(list))
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload = self._make_payload()
header = build_header(10, (False, False, True, False), len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
# Session identifier
b.append(pack('uint:16', self.packet_identifier).bytes)
for topic in self.topics:
if not isinstance(topic, unicode):
raise SerialisationFailure(self, "Topics must be Unicode")
b.append(build_string(topic))
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
if flags != (False, False, True, False):
raise ParseFailure(cls, "Bad flags")
topics = []
packet_identifier = data.read('uint:16')
while not data.bitpos == len(data):
topics.append(read_string(data))
if len(topics) == 0:
raise ParseFailure(cls, "Must contain a payload.")
return cls(packet_identifier=packet_identifier, topics=topics)
@attr.s
class PubCOMP(object):
packet_identifier = attr.ib(validator=instance_of(int))
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload = self._make_payload()
header = build_header(7, (False, False, False, False), len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
b.append(pack('uint:16', self.packet_identifier).bytes)
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
"""
Disassemble from an on-wire message.
"""
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
packet_identifier = data.read('uint:16')
return cls(packet_identifier)
@attr.s
class PubREL(object):
packet_identifier = attr.ib(validator=instance_of(int))
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload = self._make_payload()
header = build_header(6, (False, False, True, False), len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
b.append(pack('uint:16', self.packet_identifier).bytes)
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
"""
Disassemble from an on-wire message.
"""
if flags != (False, False, True, False):
raise ParseFailure(cls, "Bad flags")
packet_identifier = data.read('uint:16')
return cls(packet_identifier)
@attr.s
class PubREC(object):
packet_identifier = attr.ib(validator=instance_of(int))
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload = self._make_payload()
header = build_header(5, (False, False, False, False), len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
b.append(pack('uint:16', self.packet_identifier).bytes)
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
"""
Disassemble from an on-wire message.
"""
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
packet_identifier = data.read('uint:16')
return cls(packet_identifier)
@attr.s
class PubACK(object):
packet_identifier = attr.ib(validator=instance_of(int))
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload = self._make_payload()
header = build_header(4, (False, False, False, False), len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
b.append(pack('uint:16', self.packet_identifier).bytes)
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
"""
Disassemble from an on-wire message.
"""
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
packet_identifier = data.read('uint:16')
return cls(packet_identifier)
@attr.s
class Publish(object):
duplicate = attr.ib(validator=instance_of(bool))
qos_level = attr.ib(validator=instance_of(int))
retain = attr.ib(validator=instance_of(bool))
topic_name = attr.ib(validator=instance_of(unicode))
payload = attr.ib(validator=instance_of(bytes))
packet_identifier = attr.ib(validator=optional(instance_of(int)),
default=None)
def serialise(self):
"""
Assemble this into an on-wire message.
"""
flags = [self.duplicate]
if self.qos_level == 0:
flags.extend([False, False])
elif self.qos_level == 1:
flags.extend([False, True])
elif self.qos_level == 2:
flags.extend([True, False])
else:
raise SerialisationFailure(self, "QoS must be 0, 1, or 2")
flags.append(self.retain)
payload = self._make_payload()
header = build_header(3, flags, len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
# Topic Name
b.append(build_string(self.topic_name))
if self.packet_identifier:
if self.qos_level > 0:
# Session identifier
b.append(pack('uint:16', self.packet_identifier).bytes)
else:
raise SerialisationFailure(self, "Packet Identifier on non-QoS 1/2 packet")
else:
if self.qos_level > 0:
raise SerialisationFailure(self, "QoS level > 0 but no Packet Identifier")
# Payload (bytes)
b.append(self.payload)
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
total_length = len(data)
duplicate = flags[0]
if flags[1:3] == (False, False):
qos_level = 0
elif flags[1:3] == (False, True):
qos_level = 1
elif flags[1:3] == (True, False):
qos_level = 2
elif flags[1:3] == (True, True):
raise ParseFailure(cls, "Invalid QoS value")
retain = flags[3]
topic_name = read_string(data)
if qos_level in [1, 2]:
packet_identifier = data.read('uint:16')
else:
packet_identifier = None
payload = data.read(total_length - data.bitpos).bytes
return cls(duplicate=duplicate, qos_level=qos_level, retain=retain,
topic_name=topic_name, packet_identifier=packet_identifier,
payload=payload)
@attr.s
class SubACK(object):
packet_identifier = attr.ib(validator=instance_of(int))
return_codes = attr.ib(validator=instance_of(list))
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload = self._make_payload()
header = build_header(9, (False, False, False, False), len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
# Session identifier
b.append(pack('uint:16', self.packet_identifier).bytes)
for code in self.return_codes:
b.append(pack('uint:8', code).bytes)
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
return_codes = []
packet_identifier = data.read('uint:16')
while not data.bitpos == len(data):
return_code = data.read('uint:8')
return_codes.append(return_code)
return cls(packet_identifier=packet_identifier,
return_codes=return_codes)
@attr.s
class SubscriptionTopicRequest(object):
topic_filter = attr.ib(validator=instance_of(unicode))
max_qos = attr.ib(validator=instance_of(int))
def serialise(self):
"""
Assemble this into an on-wire message part.
"""
b = []
# Topic filter, as UTF-8
b.append(build_string(self.topic_filter))
# Reserved section + max QoS
b.append(pack('uint:6, uint:2', 0, self.max_qos).bytes)
return b"".join(b)
@attr.s
class Subscribe(object):
packet_identifier = attr.ib(validator=instance_of(int))
topic_requests = attr.ib(validator=instance_of(list))
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload = self._make_payload()
header = build_header(8, (False, False, True, False), len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
# Session identifier
b.append(pack('uint:16', self.packet_identifier).bytes)
for request in self.topic_requests:
b.append(request.serialise())
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
if flags != (False, False, True, False):
raise ParseFailure(cls, "Bad flags")
pairs = []
packet_identifier = data.read('uint:16')
def parse_pair():
topic_filter = read_string(data)
reserved = data.read("uint:6")
max_qos = data.read("uint:2")
if reserved:
raise ParseFailure(cls, "Data in QoS Reserved area")
if max_qos not in [0, 1, 2]:
raise ParseFailure(cls, "Invalid QoS")
pairs.append(SubscriptionTopicRequest(topic_filter=topic_filter,
max_qos=max_qos))
parse_pair()
while not data.bitpos == len(data):
parse_pair()
return cls(packet_identifier=packet_identifier, topic_requests=pairs)
@attr.s
class ConnACK(object):
session_present = attr.ib(validator=instance_of(bool))
return_code = attr.ib(validator=instance_of(int))
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload = self._make_payload()
header = build_header(2, (False, False, False, False), len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
# Flags -- 7 bit reserved + Session Present flag
b.append(pack('uint:7, bool', 0, self.session_present).bytes)
# Return code
b.append(pack('uint:8', self.return_code).bytes)
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
"""
Take an on-wire message and turn it into an instance of this class.
"""
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
reserved = data.read(7).uint
if reserved:
raise ParseFailure(cls, "Reserved flag used.")
built = cls(session_present=data.read(1).bool,
return_code=data.read(8).uint)
# XXX: Do some more verification, re conn flags
if not data.bitpos == len(data):
# There's some wacky stuff going on here -- data they included, but
# didn't put flags for, maybe?
warnings.warn(("Quirky server CONNACK -- packet length was "
"%d bytes but only had %d bytes of useful data") % (
data.bitpos, len(data)))
return built
@attr.s
class ConnectFlags(object):
username = attr.ib(validator=instance_of(bool), default=False)
password = attr.ib(validator=instance_of(bool), default=False)
will = attr.ib(validator=instance_of(bool), default=False)
will_retain = attr.ib(validator=instance_of(bool), default=False)
will_qos = attr.ib(validator=instance_of(int), default=False)
clean_session = attr.ib(validator=instance_of(bool), default=False)
reserved = attr.ib(validator=instance_of(bool), default=False)
def serialise(self):
"""
Assemble this into an on-wire message portion.
"""
return pack(
'bool, bool, bool, uint:2, bool, bool, bool',
self.username, self.password, self.will_retain, self.will_qos,
self.will, self.clean_session, self.reserved).bytes
@classmethod
def deserialise(cls, data):
built = cls(
username=data.read(1).bool,
password=data.read(1).bool,
will_retain=data.read(1).bool,
will_qos=data.read(2).uint,
will=data.read(1).bool,
clean_session=data.read(1).bool,
reserved=data.read(1).bool
)
# XXX: Do some more conformance checking here
# Need to worry about invalid flag combinations
if built.reserved:
# MQTT-3.1.2-3, reserved flag must not be used
raise ParseFailure(cls, "Reserved flag in CONNECT used")
return built
@attr.s
class Connect(object):
client_id = attr.ib(validator=instance_of(unicode))
flags = attr.ib(validator=instance_of(ConnectFlags))
keep_alive = attr.ib(validator=instance_of(int), default=0)
will_topic = attr.ib(validator=optional(instance_of(unicode)),
default=None)
will_message = attr.ib(validator=optional(instance_of(bytes)),
default=None)
username = attr.ib(validator=optional(instance_of(unicode)),
default=None)
password = attr.ib(validator=optional(instance_of(unicode)),
default=None)
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload = self._make_payload()
header = build_header(1, (False, False, False, False), len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
# Protocol name (MQTT)
b.append(build_string(u"MQTT"))
# Protocol Level (4 == 3.1.1)
b.append(pack('uint:8', 4).bytes)
# CONNECT flags
b.append(self.flags.serialise())
# Keep Alive time
b.append(pack('uint:16', self.keep_alive).bytes)
# Client ID
b.append(build_string(self.client_id))
if self.flags.will:
b.append(build_string(self.will_topic))
# Will message is a uint16 prefixed bytestring
b.append(pack('uint:16', len(self.will_message)).bytes)
b.append(self.will_message)
if self.flags.username:
b.append(build_string(self.username))
# Technically this should be binary data but we will only accept UTF-8
if self.flags.password:
b.append(build_string(self.password))
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
"""
Disassemble from an on-wire message.
"""
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
protocol = read_string(data)
if protocol != u"MQTT":
print(protocol)
raise ParseFailure(cls, "Bad protocol name")
protocol_level = data.read('uint:8')
if protocol_level != 4:
raise ParseFailure(cls, "Bad protocol level")
flags = ConnectFlags.deserialise(data.read(8))
# Keep alive, in seconds
keep_alive = data.read('uint:16')
# The client ID
client_id = read_string(data)
if flags.will:
# MQTT-3.1.3-10, topic must be UTF-8
will_topic = read_string(data)
will_message = read_prefixed_data(data)
else:
will_topic = None
will_message = None
# Username
if flags.username:
username = read_string(data)
else:
username = None
# Password
if flags.password:
password = read_string(data)
else:
password = None
if not data.bitpos == len(data):
# There's some wacky stuff going on here -- data they included, but
# didn't put flags for, maybe?
warnings.warn(("Quirky client CONNECT -- packet length was "
"%d bytes but only had %d bytes of useful data") % (
data.bitpos, len(data)))
# The event
return cls(flags=flags, keep_alive=keep_alive, client_id=client_id,
will_topic=will_topic, will_message=will_message,
username=username, password=password)
| NinjaMSP/crossbar | crossbar/adapter/mqtt/_events.py | Python | agpl-3.0 | 21,416 | 0.000654 |
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2010 Rosen Diankov (rosen.diankov@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""6D kinematic reachability space of a robot's manipulators.
.. image:: ../../images/databases/kinematicreachability.jpg
:width: 640
.. image:: ../../images/databases/kinematicreachability_side.jpg
:width: 640
`[source] <../_modules/openravepy/databases/kinematicreachability.html>`_
**Running the Generator**
.. code-block:: bash
openrave.py --database kinematicreachability --robot=robots/barrettsegway.robot.xml
**Showing the Reachability** (uses mayavi2)
.. code-block:: bash
openrave.py --database kinematicreachability --robot=robots/barrettsegway.robot.xml --show
Description
-----------
This is the reachability when counting the total number of configurations possible at each pose.
Command-line
------------
.. shell-block:: openrave.py --database kinematicreachability --help
Class Definitions
-----------------
"""
from __future__ import with_statement # for python 2.5
__author__ = 'Rosen Diankov'
__copyright__ = 'Copyright (C) 2009-2010 Rosen Diankov (rosen.diankov@gmail.com)'
__license__ = 'Apache License, Version 2.0'
if not __openravepy_build_doc__:
from numpy import *
else:
from numpy import array
from ..openravepy_int import RaveFindDatabaseFile, IkParameterization, rotationMatrixFromQArray, poseFromMatrix
from ..openravepy_ext import transformPoints, quatArrayTDist
from .. import metaclass, pyANN
from ..misc import SpaceSamplerExtra
from . import DatabaseGenerator
from . import convexdecomposition, inversekinematics
import numpy
import time
import os.path
from os import makedirs
from heapq import nsmallest # for nth smallest element
from optparse import OptionParser
import logging
log = logging.getLogger('openravepy.'+__name__.split('.',2)[-1])
class ReachabilityModel(DatabaseGenerator):
"""Computes the robot manipulator's reachability space (stores it in 6D) and
offers several functions to use it effectively in planning."""
class QuaternionKDTree(metaclass.AutoReloader):
"""Artificially add more weight to the X,Y,Z translation dimensions"""
def __init__(self, poses,transmult):
self.numposes = len(poses)
self.transmult = transmult
self.itransmult = 1/transmult
searchposes = array(poses)
searchposes[:,4:] *= self.transmult # take translation errors more seriously
allposes = r_[searchposes,searchposes]
allposes[self.numposes:,0:4] *= -1
self.nnposes = pyANN.KDTree(allposes)
def kSearch(self,poses,k,eps):
"""returns distance squared"""
poses[:,4:] *= self.transmult
# neighs,dists = self.nnposes.kSearch(poses,k,eps)
neighs,dists = zip(*[self.nnposes.kSearch(pose,k,eps) for pose in poses])
neighs[neighs>=self.numposes] -= self.numposes
poses[:,4:] *= self.itransmult
return neighs,dists
def kFRSearch(self,pose,radiussq,k,eps):
"""returns distance squared"""
pose[4:] *= self.transmult
neighs,dists,kball = self.nnposes.kFRSearch(pose,radiussq,k,eps)
neighs[neighs>=self.numposes] -= self.numposes
pose[4:] *= self.itransmult
return neighs,dists,kball
def kFRSearchArray(self,poses,radiussq,k,eps):
"""returns distance squared"""
poses[:,4:] *= self.transmult
neighs,dists,kball = self.nnposes.kFRSearchArray(poses,radiussq,k,eps)
neighs[neighs>=self.numposes] -= self.numposes
poses[:,4:] *= self.itransmult
return neighs,dists,kball
xyzdelta = None # the sampling discretization of the XYZ space
reachabilitystats = None # Nx8 array of all the poses that are reachable. The first 7 columns are the quaternion and translation, the last column is the number of IK solutions present
reachability3d = None # a KxKxK voxelized map that repsents the density of solutions for each XYZ point. The higher the density, the more rotations the arm can be solved for. Use xyzdelta to from 3D point to voxel index.
def __init__(self,robot):
DatabaseGenerator.__init__(self,robot=robot)
self.ikmodel = inversekinematics.InverseKinematicsModel(robot=robot,iktype=IkParameterization.Type.Transform6D)
self.reachabilitystats = None
self.reachability3d = None
self.reachabilitydensity3d = None
self.pointscale = None
self.xyzdelta = None
self.quatdelta = None
self.kdtree6d = None
self.kdtree3d = None
def clone(self,envother):
clone = DatabaseGenerator.clone(self,envother)
return clone
def has(self):
return len(self.reachabilitydensity3d) > 0 and len(self.reachability3d) > 0 and len(self.reachabilitystats) > 0
def getversion(self):
return 5
def save(self):
try:
self.SaveHDF5()
except ImportError:
log.warn('python h5py library not found, will not be able to speedup database access')
self.SavePickle()
def load(self):
try:
if not self.ikmodel.load():
self.ikmodel.autogenerate()
try:
return self.LoadHDF5()
except ImportError:
log.warn('python h5py library not found, will not be able to speedup database access')
return self.LoadPickle()
except Exception, e:
log.warn(e)
return False
def SavePickle(self):
DatabaseGenerator.save(self,(self.reachabilitystats,self.reachabilitydensity3d,self.reachability3d, self.pointscale,self.xyzdelta,self.quatdelta))
def LoadPickle(self):
params = DatabaseGenerator.load(self)
if params is None:
return False
self.reachabilitystats,self.reachabilitydensity3d,self.reachability3d,self.pointscale,self.xyzdelta,self.quatdelta = params
return self.has()
def SaveHDF5(self):
import h5py
filename=self.getfilename(False)
log.info(u'saving model to %s',filename)
try:
makedirs(os.path.split(filename)[0])
except OSError:
pass
f=h5py.File(filename,'w')
try:
f['version'] = self.getversion()
f['reachabilitystats'] = self.reachabilitystats
f['reachabilitydensity3d'] = self.reachabilitydensity3d
f['reachability3d'] = self.reachability3d
f['pointscale'] = self.pointscale
f['xyzdelta'] = self.xyzdelta
f['quatdelta'] = self.quatdelta
finally:
f.close()
def LoadHDF5(self):
import h5py
filename = self.getfilename(True)
if len(filename) == 0:
return False
self._CloseDatabase()
try:
f=h5py.File(filename,'r')
if f['version'].value != self.getversion():
log.error('version is wrong %s!=%s ',f['version'],self.getversion())
return False
self.reachabilitystats = f['reachabilitystats']
self.reachabilitydensity3d = f['reachabilitydensity3d']
self.reachability3d = f['reachability3d']
self.pointscale = f['pointscale'].value
self.xyzdelta = f['xyzdelta'].value
self.quatdelta = f['quatdelta'].value
self._databasefile = f
f = None
return self.has()
except Exception,e:
log.debug('LoadHDF5 for %s: ',filename,e)
return False
finally:
if f is not None:
f.close()
def getfilename(self,read=False):
return RaveFindDatabaseFile(os.path.join('robot.'+self.robot.GetKinematicsGeometryHash(), 'reachability.' + self.manip.GetStructureHash() + '.pp'),read)
def autogenerateparams(self,options=None):
maxradius=None
translationonly=False
xyzdelta=None
quatdelta=None
usefreespace=False
if options is not None:
if options.maxradius is not None:
maxradius = options.maxradius
if options.xyzdelta is not None:
xyzdelta=options.xyzdelta
if options.quatdelta is not None:
quatdelta=options.quatdelta
usefreespace=options.usefreespace
if self.robot.GetKinematicsGeometryHash() == 'e829feb384e6417bbf5bd015f1c6b49a' or self.robot.GetKinematicsGeometryHash() == '22548f4f2ecf83e88ae7e2f3b2a0bd08': # wam 7dof
if maxradius is None:
maxradius = 1.1
elif self.robot.GetKinematicsGeometryHash() == 'e3b4168a72a78fa2c37dc414cabb933a': # pr2
if xyzdelta is None:
xyzdelta = 0.03
if quatdelta is None:
quatdelta = 0.2
return maxradius,translationonly,xyzdelta,quatdelta,usefreespace
def getOrderedArmJoints(self):
return [j for j in self.robot.GetDependencyOrderedJoints() if j.GetJointIndex() in self.manip.GetArmIndices()]
@staticmethod
def getManipulatorLinks(manip):
links = manip.GetChildLinks()
# add the links connecting to the base link.... although this reduces the freespace of the arm, it is better to have than not (ie waist on humanoid)
tobasejoints = manip.GetRobot().GetChain(0,manip.GetBase().GetIndex())
dofindices = [arange(joint.GetDOFIndex(),joint.GetDOFIndex()+joint.GetDOF()) for joint in tobasejoints if joint.GetDOFIndex() >= 0 and not joint.IsStatic()]
tobasedofs = hstack(dofindices) if len(dofindices) > 0 else array([],int)
robot = manip.GetRobot()
joints = robot.GetJoints()
for jindex in r_[manip.GetArmIndices(),tobasedofs]:
joint = joints[jindex]
if joint.GetFirstAttached() and not joint.GetFirstAttached() in links:
links.append(joint.GetFirstAttached())
if joint.GetSecondAttached() and not joint.GetSecondAttached() in links:
links.append(joint.GetSecondAttached())
# don't forget the rigidly attached links
for link in links[:]:
for newlink in link.GetRigidlyAttachedLinks():
if not newlink in links:
links.append(newlink)
return links
def generatepcg(self,maxradius=None,translationonly=False,xyzdelta=None,quatdelta=None,usefreespace=False):
"""Generate producer, consumer, and gatherer functions allowing parallelization
"""
if not self.ikmodel.load():
self.ikmodel.autogenerate()
# disable every body but the target and robot\
if xyzdelta is None:
xyzdelta=0.04
if quatdelta is None:
quatdelta=0.5
self.kdtree3d = self.kdtree6d = None
with self.robot:
Tbase = self.manip.GetBase().GetTransform()
Tbaseinv = linalg.inv(Tbase)
Trobot=dot(Tbaseinv,self.robot.GetTransform())
self.robot.SetTransform(Trobot) # set base link to global origin
maniplinks = self.getManipulatorLinks(self.manip)
for link in self.robot.GetLinks():
link.Enable(link in maniplinks)
# the axes' anchors are the best way to find the max radius
# the best estimate of arm length is to sum up the distances of the anchors of all the points in between the chain
armjoints = self.getOrderedArmJoints()
baseanchor = armjoints[0].GetAnchor()
eetrans = self.manip.GetEndEffectorTransform()[0:3,3]
armlength = 0
for j in armjoints[::-1]:
armlength += sqrt(sum((eetrans-j.GetAnchor())**2))
eetrans = j.GetAnchor()
if maxradius is None:
maxradius = armlength+xyzdelta*sqrt(3.0)*1.05
allpoints,insideinds,shape,self.pointscale = self.UniformlySampleSpace(maxradius,delta=xyzdelta)
qarray = SpaceSamplerExtra().sampleSO3(quatdelta=quatdelta)
rotations = [eye(3)] if translationonly else rotationMatrixFromQArray(qarray)
self.xyzdelta = xyzdelta
self.quatdelta = 0
if not translationonly:
# for rotations, get the average distance to the nearest rotation
neighdists = []
for q in qarray:
neighdists.append(nsmallest(2,quatArrayTDist(q,qarray))[1])
self.quatdelta = mean(neighdists)
log.info('radius: %f, xyzsamples: %d, quatdelta: %f, rot samples: %d, freespace: %d',maxradius,len(insideinds),self.quatdelta,len(rotations),usefreespace)
self.reachabilitydensity3d = zeros(prod(shape))
self.reachability3d = zeros(prod(shape))
self.reachabilitystats = []
def producer():
T = eye(4)
for i,ind in enumerate(insideinds):
T[0:3,3] = allpoints[ind]+baseanchor
if mod(i,1000)==0:
log.info('%s/%d', i,len(insideinds))
yield ind,T
def consumer(ind,T):
with self.robot:
self.robot.SetTransform(Trobot)
reachabilitystats = []
numvalid = 0
numrotvalid = 0
T = array(T)
for rotation in rotations:
T[0:3,0:3] = rotation
if usefreespace:
solutions = self.manip.FindIKSolutions(T,0)
if solutions is not None:
reachabilitystats.append(r_[poseFromMatrix(T),len(solutions)])
numvalid += len(solutions)
numrotvalid += 1
else:
solution = self.manip.FindIKSolution(T,0)
if solution is not None:
reachabilitystats.append(r_[poseFromMatrix(T),1])
numvalid += 1
numrotvalid += 1
return ind,reachabilitystats, numvalid, numrotvalid
def gatherer(ind=None,reachabilitystats=None,numvalid=None,numrotvalid=None):
if ind is not None:
self.reachabilitystats += reachabilitystats
self.reachabilitydensity3d[ind] = numvalid/float(len(rotations))
self.reachability3d[ind] = numrotvalid/float(len(rotations))
else:
self.reachability3d = reshape(self.reachability3d,shape)
self.reachabilitydensity3d = reshape(self.reachabilitydensity3d,shape)
self.reachabilitystats = array(self.reachabilitystats)
return producer, consumer, gatherer, len(insideinds)
def show(self,showrobot=True,contours=[0.01,0.1,0.2,0.5,0.8,0.9,0.99],opacity=None,figureid=1, xrange=None,options=None):
try:
mlab = __import__('enthought.mayavi.mlab',fromlist=['mlab'])
except ImportError:
mlab = __import__('mayavi.mlab',fromlist=['mlab'])
mlab.figure(figureid,fgcolor=(0,0,0), bgcolor=(1,1,1),size=(1024,768))
mlab.clf()
log.info('max reachability: %r',numpy.max(self._GetValue(self.reachability3d)))
if options is not None:
reachability3d = minimum(self._GetValue(self.reachability3d)*options.showscale,1.0)
else:
reachability3d = minimum(self._GetValue(self.reachability3d),1.0)
reachability3d[0,0,0] = 1 # have at least one point be at the maximum
if xrange is None:
offset = array((0,0,0))
src = mlab.pipeline.scalar_field(reachability3d)
else:
offset = array((xrange[0]-1,0,0))
src = mlab.pipeline.scalar_field(r_[zeros((1,)+reachability3d.shape[1:]),reachability3d[xrange,:,:],zeros((1,)+reachability3d.shape[1:])])
for i,c in enumerate(contours):
mlab.pipeline.iso_surface(src,contours=[c],opacity=min(1,0.7*c if opacity is None else opacity[i]))
#mlab.pipeline.volume(mlab.pipeline.scalar_field(reachability3d*100))
if showrobot:
with self.robot:
Tbase = self.manip.GetBase().GetTransform()
Tbaseinv = linalg.inv(Tbase)
self.robot.SetTransform(dot(Tbaseinv,self.robot.GetTransform()))
baseanchor = self.getOrderedArmJoints()[0].GetAnchor()
trimesh = self.env.Triangulate(self.robot)
v = self.pointscale[0]*(trimesh.vertices-tile(baseanchor,(len(trimesh.vertices),1)))+self.pointscale[1]
mlab.triangular_mesh(v[:,0]-offset[0],v[:,1]-offset[1],v[:,2]-offset[2],trimesh.indices,color=(0.5,0.5,0.5))
mlab.show()
def UniformlySampleSpace(self,maxradius,delta):
nsteps = floor(maxradius/delta)
X,Y,Z = mgrid[-nsteps:nsteps,-nsteps:nsteps,-nsteps:nsteps]
allpoints = c_[X.flat,Y.flat,Z.flat]*delta
insideinds = flatnonzero(sum(allpoints**2,1)<maxradius**2)
return allpoints,insideinds,X.shape,array((1.0/delta,nsteps))
def ComputeNN(self,translationonly=False):
if translationonly:
if self.kdtree3d is None:
self.kdtree3d = pyANN.KDTree(self._GetValue(self.reachabilitystats)[:,4:7])
return self.kdtree3d
else:
if self.kdtree6d is None:
self.kdtree6d = self.QuaternionKDTree(self._GetValue(self.reachabilitystats)[:,0:7],5.0)
return self.kdtree6d
@staticmethod
def CreateOptionParser():
parser = DatabaseGenerator.CreateOptionParser()
parser.description='Computes the reachability region of a robot manipulator and python pickles it into a file.'
parser.usage='openrave.py --database kinematicreachability [options]'
parser.add_option('--maxradius',action='store',type='float',dest='maxradius',default=None,
help='The max radius of the arm to perform the computation')
parser.add_option('--xyzdelta',action='store',type='float',dest='xyzdelta',default=None,
help='The max radius of the arm to perform the computation (default=0.04)')
parser.add_option('--quatdelta',action='store',type='float',dest='quatdelta',default=None,
help='The max radius of the arm to perform the computation (default=0.5)')
parser.add_option('--usefreespace',action='store_true',dest='usefreespace',default=False,
help='If set, will record the number of IK solutions that exist for every transform rather than just finding one. More useful map, but much slower to produce')
parser.add_option('--showscale',action='store',type='float',dest='showscale',default=1.0,
help='Scales the reachability by this much in order to show colors better (default=%default)')
return parser
@staticmethod
def InitializeFromParser(Model=None,parser=None,*args,**kwargs):
if Model is None:
Model = lambda robot: ReachabilityModel(robot=robot)
if parser is None:
parser = ReachabilityModel.CreateOptionParser()
return DatabaseGenerator.InitializeFromParser(Model,parser,*args,**kwargs)
def run(*args,**kwargs):
"""Command-line execution of the example. ``args`` specifies a list of the arguments to the script.
"""
ReachabilityModel.RunFromParser(Model = lambda robot: ReachabilityModel(robot=robot), parser = ReachabilityModel.CreateOptionParser(), *args,**kwargs)
| vitan/openrave | python/databases/kinematicreachability.py | Python | lgpl-3.0 | 20,281 | 0.015926 |
#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestIDCT(TestCase):
def testInvalidParam(self):
self.assertConfigureFails(IDCT(), { 'inputSize': 0, 'outputSize': 2 })
self.assertConfigureFails(IDCT(), { 'inputSize': 6, 'outputSize': 0 })
def testRegression(self):
# values from Matlab/Octave
inputArray = [ 0.89442718, -0.60150099, -0.12078822, -0.37174806, 0.82789522]
expected = [ 0, 0, 1, 0, 1 ]
self.assertAlmostEqualVector(IDCT(outputSize=len(expected), inputSize = len(inputArray))(inputArray), expected, 1e-6)
def testLifteringRegression(self):
# DCT III and Liftening computed using PLP and RASTA matlab toolbox.
# A big tolerance is necessary due to the smoothing caused by the smaller amount of bins in the DCT domain.
inputArray = [ 1.89736652, 0.95370573, 3.39358997, -3.35009956]
expected = [1, 1, 0, 0, 1]
self.assertAlmostEqualVector(IDCT(inputSize=len(inputArray),
outputSize=len(expected),
dctType = 3,
liftering = 22)(inputArray), expected, 1e0)
def testZero(self):
self.assertEqualVector(IDCT(outputSize=10)(zeros(5)), zeros(10))
def testInvalidInput(self):
self.assertComputeFails(IDCT(), []) # = testEmpty
self.assertComputeFails(IDCT(outputSize = 2, inputSize = 1), [ 0, 2, 4 ])
suite = allTests(TestIDCT)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| carthach/essentia | test/src/unittests/standard/test_idct.py | Python | agpl-3.0 | 2,364 | 0.015651 |
#!/usr/bin/env python3
import os
import sys
src_dir = os.path.abspath('src/')
sys.path.append(src_dir)
sys.ps1 = ''
sys.ps2 = ''
import id003
import termutils as t
import time
import logging
import configparser
import threading
import serial.tools.list_ports
from serial.serialutil import SerialException
from collections import OrderedDict
X_SIZE, Y_SIZE = t.get_size()
CONFIG_FILE = 'bv.ini'
CONFIG = configparser.ConfigParser()
CONFIG.read(CONFIG_FILE)
def get_denoms():
denom = 0
for k in CONFIG['bv.denom_inhibit']:
if CONFIG['bv.denom_inhibit'].getboolean(k):
denom |= id003.DENOMS[k]
return [denom, 0]
def get_security():
sec = 0
for k in CONFIG['bv.security']:
if CONFIG['bv.security'].getboolean(k):
sec |= id003.DENOMS[k]
return [sec, 0]
def get_directions():
dir = 0
for k in CONFIG['bv.direction']:
if CONFIG['bv.direction'].getboolean(k):
dir |= id003.DIRECTIONS[k]
return [dir]
def get_optional():
opt = 0
for k in CONFIG['bv.optional']:
if CONFIG['bv.optional'].getboolean(k):
opt |= id003.OPTIONS[k]
return [opt, 0]
def kb_loop(bv, stdout_lock, bv_lock):
global CONFIG
print("Press Q at any time to quit, or H for help")
while True:
with stdout_lock:
opt = t.get_key(0.1)
if opt is not None:
opt = opt.lower()
if opt == b'q':
bv.bv_on = False
with open(CONFIG_FILE, 'w') as f:
CONFIG.write(f)
return
elif opt == b'h':
print("Q - Quit\n" "H - Help\n" "S - Settings menu\n"
"R - Reset and initialize bill validator\n"
"P - Pause bill validator\n" "M - Stop polling "
"and return to main menu")
elif opt == b'm':
return
elif opt == b's':
with stdout_lock:
logging.debug("Entered settings menu from status poll")
settings()
logging.debug("Exited settings menu")
bv.bv_status = None # print current status after returning
t.wipe()
elif opt == b'r':
with bv_lock:
logging.debug("Sending reset command")
status = None
while status != id003.ACK:
bv.send_command(id003.RESET)
status, data = bv.read_response()
time.sleep(0.2)
logging.debug("Received ACK")
if bv.req_status()[0] == id003.INITIALIZE:
denom = get_denoms()
sec = get_security()
dir = get_directions()
opt = get_optional()
logging.info("Initializing bill validator")
bv.initialize(denom, sec, dir, opt)
bv.bv_status = None
elif opt == b'p':
print("Not implemented yet")
def poll_loop(bv, stdout_lock, bv_lock, interval=0.2):
denom = get_denoms()
sec = get_security()
dir = get_directions()
opt = get_optional()
print("Please connect bill validator.")
bv.power_on(denom, sec, dir, opt)
if bv.init_status == id003.POW_UP:
logging.info("BV powered up normally.")
elif bv.init_status == id003.POW_UP_BIA:
logging.info("BV powered up with bill in acceptor.")
elif bv.init_status == id003.POW_UP_BIS:
logging.info("BV powered up with bill in stacker.")
while True:
poll_start = time.time()
if not bv.bv_on:
return
with bv_lock:
status, data = bv.req_status()
if (status, data) != bv.bv_status and status in bv.bv_events:
if stdout_lock.acquire(timeout=0.5):
bv.bv_events[status](data)
stdout_lock.release()
bv.bv_status = (status, data)
wait = interval - (time.time() - poll_start)
if wait > 0.0:
time.sleep(wait)
def display_header(text):
t.set_pos(0, 0)
print(text.center(X_SIZE), end='')
print('=' * X_SIZE, end='')
def display_menu(menu, prompt='>>>', header='', info=''):
if len(menu) > Y_SIZE - 5:
raise ValueError("Too many menu options")
# print the header
t.wipe()
display_header(header)
# print the menu items
for k, v in menu.items():
print("{}) {}".format(k, v))
# print prompt and info
print(prompt, end=' ')
x, y = t.get_pos()
print('\n\n' + info)
t.set_pos(x, y)
# get user's choice
k = None
while k not in menu:
k = input('')
t.set_pos(x, y)
print(' ' * (X_SIZE - x), end='')
t.set_pos(x, y)
return k
def settings():
global CONFIG
t.wipe()
settings_menu = OrderedDict()
settings_menu['e'] = "Denomination enable/inhibit"
settings_menu['s'] = "Denomination security"
settings_menu['d'] = "Direction enable/inhibit"
settings_menu['o'] = "Optional functions"
settings_menu['b'] = "Bar code ticket options"
settings_menu['q'] = "Back"
choice = display_menu(settings_menu, '>>>', "Settings",
"Changes will take effect next time bill validator is initialized")
if choice == 'e':
denom_settings()
elif choice == 's':
security_settings()
elif choice == 'd':
direction_settings()
elif choice == 'o':
opt_settings()
elif choice == 'b':
t.wipe()
print("Barcode settings not available.")
input("Press enter to go back")
return
def opt_settings():
global CONFIG
t.wipe()
display_header("Optional function settings")
opts = dict()
set_opts = OrderedDict()
opt_txt = {
'power_recovery': "Power recovery:\t\t\t\t",
'auto_retry': "Auto-retry operaton:\t\t\t",
'24_char_barcode': "Accept 24-character barcodes:\t\t",
'near_full': "Stacker nearly full event:\t\t",
'entrance_event': "Entrance sensor event:\t\t\t",
'encryption': "Encryption:\t\t\t\t",
}
for i, k in enumerate(CONFIG['bv.optional'].keys()):
opt_enabled = CONFIG['bv.optional'].getboolean(k)
opts[i] = k
set_opts[k] = opt_enabled
print(opt_txt[k], end='')
start_x, start_y = t.get_pos()
if opt_enabled:
print('X')
else:
print('_')
print("\n\n_ = disabled, X = enabled")
print("\nPress Enter to save and go back, or Esc to go back without saving")
t.set_pos(start_x, 3)
max_opt = len(CONFIG['bv.optional']) - 1
cur_opt = 0
while True:
x, y = t.get_pos()
c = t.getch()
if c == b'\xe0H' and cur_opt > 0:
# up
t.set_pos(x, y-1)
cur_opt -= 1
elif c == b'\xe0P' and cur_opt < max_opt:
# down
t.set_pos(x, y+1)
cur_opt += 1
elif c == b'\t' and cur_opt == max_opt:
# wrap around to first option
t.set_pos(x, 3)
cur_opt = 0
elif c == b'\t':
# next option, same as down
t.set_pos(x, y+1)
cur_opt += 1
elif c == b'X' or c == b'x':
set_opts[opts[cur_opt]] = True
print('X', end='')
if cur_opt < max_opt:
t.set_pos(x, y+1)
cur_opt += 1
else:
t.set_pos(x, y)
elif c == b' ':
set_opts[opts[cur_opt]] = False
print('_', end='')
if cur_opt < max_opt:
t.set_pos(x, y+1)
cur_opt += 1
else:
t.set_pos(x, y)
elif c == b'\r':
# save and go back
CONFIG['bv.optional'] = set_opts
return
elif c == b'\x1b':
# escape, go back without saving
return
def direction_settings():
global CONFIG
t.wipe()
display_header("Direction ihibit settings")
opts = dict()
set_opts = OrderedDict()
for i, k in enumerate(CONFIG['bv.direction'].keys()):
dir_enabled = CONFIG['bv.direction'].getboolean(k)
opts[i] = k
set_opts[k] = dir_enabled
if k == 'fa':
print("Front side up, left side in:\t\t", end='')
elif k == 'fb':
print("Front side up, right side in:\t\t", end='')
elif k == 'bb':
print("Back side up, left side in:\t\t", end='')
elif k == 'ba':
print("Back side up, right side in:\t\t", end='')
start_x, start_y = t.get_pos()
if dir_enabled:
print('X')
else:
print('_')
print("\n\n_ = enabled, X = inhibited")
print("\nPress Enter to save and go back, or Esc to go back without saving")
t.set_pos(start_x, 3)
max_opt = len(CONFIG['bv.direction']) - 1
cur_opt = 0
while True:
x, y = t.get_pos()
c = t.getch()
if c == b'\xe0H' and cur_opt > 0:
# up
t.set_pos(x, y-1)
cur_opt -= 1
elif c == b'\xe0P' and cur_opt < max_opt:
# down
t.set_pos(x, y+1)
cur_opt += 1
elif c == b'\t' and cur_opt == max_opt:
# wrap around to first option
t.set_pos(x, 3)
cur_opt = 0
elif c == b'\t':
# next option, same as down
t.set_pos(x, y+1)
cur_opt += 1
elif c == b'X' or c == b'x':
set_opts[opts[cur_opt]] = True
print('X', end='')
if cur_opt < max_opt:
t.set_pos(x, y+1)
cur_opt += 1
else:
t.set_pos(x, y)
elif c == b' ':
set_opts[opts[cur_opt]] = False
print('_', end='')
if cur_opt < max_opt:
t.set_pos(x, y+1)
cur_opt += 1
else:
t.set_pos(x, y)
elif c == b'\r':
# save and go back
CONFIG['bv.direction'] = set_opts
return
elif c == b'\x1b':
# escape, go back without saving
return
def security_settings():
global CONFIG
t.wipe()
display_header("Denomination security settings")
opts = dict()
set_opts = OrderedDict()
for i, k in enumerate(CONFIG['bv.security'].keys()):
if id003.DENOM_MAP[k] in id003.ESCROW_USA:
denom = id003.ESCROW_USA[id003.DENOM_MAP[k]]
else:
denom = None
denom_enabled = CONFIG['bv.security'].getboolean(k)
opts[i] = k
set_opts[k] = denom_enabled
if denom is not None:
line = k + ' (' + denom + '):\t\t'
else:
line = k + ':\t\t\t'
if denom_enabled:
line += 'X'
else:
line += '_'
print(line)
print("\n\nX = high security, _ = low security")
print("\nPress Enter to save and go back, or Esc to go back without saving")
t.set_pos(25, 3)
max_opt = len(CONFIG['bv.security']) - 1
cur_opt = 0
while True:
x, y = t.get_pos()
c = t.getch()
if c == b'\xe0H' and cur_opt > 0:
# up
t.set_pos(x, y-1)
cur_opt -= 1
elif c == b'\xe0P' and cur_opt < max_opt:
# down
t.set_pos(x, y+1)
cur_opt += 1
elif c == b'\t' and cur_opt == max_opt:
# wrap around to first option
t.set_pos(x, 3)
cur_opt = 0
elif c == b'\t':
# next option, same as down
t.set_pos(x, y+1)
cur_opt += 1
elif c == b'X' or c == b'x':
set_opts[opts[cur_opt]] = True
print('X', end='')
if cur_opt < max_opt:
t.set_pos(x, y+1)
cur_opt += 1
else:
t.set_pos(x, y)
elif c == b' ':
set_opts[opts[cur_opt]] = False
print('_', end='')
if cur_opt < max_opt:
t.set_pos(x, y+1)
cur_opt += 1
else:
t.set_pos(x, y)
elif c == b'\r':
# save and go back
CONFIG['bv.security'] = set_opts
return
elif c == b'\x1b':
# escape, go back without saving
return
def denom_settings():
global CONFIG
t.wipe()
display_header("Denomination enable/inhibit settings")
opts = dict()
set_opts = OrderedDict()
for i, k in enumerate(CONFIG['bv.denom_inhibit'].keys()):
if id003.DENOM_MAP[k] in id003.ESCROW_USA:
denom = id003.ESCROW_USA[id003.DENOM_MAP[k]]
else:
denom = None
denom_enabled = CONFIG['bv.denom_inhibit'].getboolean(k)
opts[i] = k # index into this config section
set_opts[k] = denom_enabled # cache settings before writing to config
if denom is not None:
line = k + ' (' + denom + '):\t\t'
else:
line = k + ':\t\t\t'
if denom_enabled:
line += 'X'
else:
line += '_'
print(line)
print("\n\nIf a denom is inhibited through these settings that's not inhibited by the\n"
"appropriate DIP switch on the BV, the BV will go into INHIBIT status.")
print("\nPress Enter to save and go back, or Esc to go back without saving")
t.set_pos(25, 3)
max_opt = len(CONFIG['bv.denom_inhibit']) - 1
cur_opt = 0
while True:
x, y = t.get_pos()
c = t.getch()
if c == b'\xe0H' and cur_opt > 0:
# up
t.set_pos(x, y-1)
cur_opt -= 1
elif c == b'\xe0P' and cur_opt < max_opt:
# down
t.set_pos(x, y+1)
cur_opt += 1
elif c == b'\t' and cur_opt == max_opt:
# wrap around to first option
t.set_pos(x, 3)
cur_opt = 0
elif c == b'\t':
# next option, same as down
t.set_pos(x, y+1)
cur_opt += 1
elif c == b'X' or c == b'x':
set_opts[opts[cur_opt]] = True
print('X', end='')
if cur_opt < max_opt:
t.set_pos(x, y+1)
cur_opt += 1
else:
t.set_pos(x, y)
elif c == b' ':
set_opts[opts[cur_opt]] = False
print('_', end='')
if cur_opt < max_opt:
t.set_pos(x, y+1)
cur_opt += 1
else:
t.set_pos(x, y)
elif c == b'\r':
# save and go back
CONFIG['bv.denom_inhibit'] = set_opts
return
elif c == b'\x1b':
# escape, go back without saving
return
def main():
global CONFIG
comport = CONFIG['main']['comport']
poll_interval = float(CONFIG['main']['poll_interval'])
main_menu = OrderedDict()
main_menu['r'] = "Run"
main_menu['s'] = "Settings"
main_menu['c'] = "Select COM port"
main_menu['q'] = "Quit"
choice = display_menu(main_menu, '>>>', "ID-003 protocol analyzer", "Using COM port %s" % comport)
if choice == 'r':
t.wipe()
raw = CONFIG['main'].getboolean('debug')
try:
bv = id003.BillVal(comport, log_raw=raw, threading=True)
except SerialException:
print("Unable to open serial port")
q = 'x'
while q not in 'qm':
q = input("(Q)uit or (M)ain menu? ").lower()
if q == 'q':
return True
elif q == 'm':
return
stdout_lock = threading.Lock()
bv_lock = threading.Lock()
poll_args = (bv, stdout_lock, bv_lock, poll_interval)
poll_thread = threading.Thread(target=poll_loop, args=poll_args)
kb_args = (bv, stdout_lock, bv_lock)
kb_thread = threading.Thread(target=kb_loop, args=kb_args)
poll_thread.start()
while bv.bv_status != (id003.IDLE, b''):
# wait for power-up before starting keyboard loop
continue
kb_thread.start()
kb_thread.join()
if not bv.bv_on:
# kb_thread quit, not main menu
bv.com.close()
return True
else:
# terminate poll thread
bv.bv_on = False
poll_thread.join()
bv.com.close()
del poll_thread
del kb_thread
del bv
return
elif choice == 's':
settings()
return
elif choice == 'c':
t.wipe()
com_menu = OrderedDict()
ports = list(serial.tools.list_ports.comports())
for i, p in enumerate(ports):
com_menu[str(i+1)] = p.description
com_menu['q'] = "Back to main menu"
port = display_menu(com_menu, '>>>', "Select COM port")
if port == 'q':
return
else:
port = int(port) - 1
CONFIG['main']['comport'] = ports[port].device
return
elif choice == 'q':
return True
if __name__ == '__main__':
while not main():
continue
with open(CONFIG_FILE, 'w') as f:
# save configuration on program exit
CONFIG.write(f) | Kopachris/py-id003 | protocol_analyzer.py | Python | bsd-3-clause | 17,902 | 0.005307 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_user
short_description: Manage user accounts and user attributes on a BIG-IP.
description:
- Manage user accounts and user attributes on a BIG-IP.
version_added: "2.4"
options:
full_name:
description:
- Full name of the user.
username_credential:
description:
- Name of the user to create, remove or modify.
required: True
aliases:
- name
password_credential:
description:
- Set the users password to this unencrypted value.
C(password_credential) is required when creating a new account.
shell:
description:
- Optionally set the users shell.
choices:
- bash
- none
- tmsh
partition_access:
description:
- Specifies the administrative partition to which the user has access.
C(partition_access) is required when creating a new account.
Should be in the form "partition:role". Valid roles include
C(acceleration-policy-editor), C(admin), C(application-editor), C(auditor)
C(certificate-manager), C(guest), C(irule-manager), C(manager), C(no-access)
C(operator), C(resource-admin), C(user-manager), C(web-application-security-administrator),
and C(web-application-security-editor). Partition portion of tuple should
be an existing partition or the value 'all'.
state:
description:
- Whether the account should exist or not, taking action if the state is
different from what is stated.
default: present
choices:
- present
- absent
update_password:
description:
- C(always) will allow to update passwords if the user chooses to do so.
C(on_create) will only set the password for newly created users.
default: on_create
choices:
- always
- on_create
notes:
- Requires the requests Python package on the host. This is as easy as
pip install requests
- Requires BIG-IP versions >= 12.0.0
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = '''
- name: Add the user 'johnd' as an admin
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
username_credential: "johnd"
password_credential: "password"
full_name: "John Doe"
partition_access: "all:admin"
update_password: "on_create"
state: "present"
delegate_to: localhost
- name: Change the user "johnd's" role and shell
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
username_credential: "johnd"
partition_access: "NewPartition:manager"
shell: "tmsh"
state: "present"
delegate_to: localhost
- name: Make the user 'johnd' an admin and set to advanced shell
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
name: "johnd"
partition_access: "all:admin"
shell: "bash"
state: "present"
delegate_to: localhost
- name: Remove the user 'johnd'
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
name: "johnd"
state: "absent"
delegate_to: localhost
- name: Update password
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
username_credential: "johnd"
password_credential: "newsupersecretpassword"
delegate_to: localhost
# Note that the second time this task runs, it would fail because
# The password has been changed. Therefore, it is recommended that
# you either,
#
# * Put this in its own playbook that you run when you need to
# * Put this task in a `block`
# * Include `ignore_errors` on this task
- name: Change the Admin password
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
username_credential: "admin"
password_credential: "NewSecretPassword"
delegate_to: localhost
'''
RETURN = '''
full_name:
description: Full name of the user
returned: changed and success
type: string
sample: "John Doe"
partition_access:
description:
- List of strings containing the user's roles and which partitions they
are applied to. They are specified in the form "partition:role".
returned: changed and success
type: list
sample: "['all:admin']"
shell:
description: The shell assigned to the user account
returned: changed and success
type: string
sample: "tmsh"
'''
from distutils.version import LooseVersion
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_map = {
'partitionAccess': 'partition_access',
'description': 'full_name',
}
updatables = [
'partition_access', 'full_name', 'shell', 'password_credential'
]
returnables = [
'shell', 'partition_access', 'full_name', 'username_credential'
]
api_attributes = [
'shell', 'partitionAccess', 'description', 'name', 'password'
]
@property
def partition_access(self):
"""Partition access values will require some transformation.
This operates on both user and device returned values.
Check if the element is a string from user input in the format of
name:role, if it is split it and create dictionary out of it.
If the access value is a dictionary (returned from device,
or already processed) and contains nameReference
key, delete it and append the remaining dictionary element into
a list.
If the nameReference key is removed just append the dictionary
into the list.
:returns list of dictionaries
"""
if self._values['partition_access'] is None:
return
result = []
part_access = self._values['partition_access']
for access in part_access:
if isinstance(access, dict):
if 'nameReference' in access:
del access['nameReference']
result.append(access)
else:
result.append(access)
if isinstance(access, str):
acl = access.split(':')
if acl[0].lower() == 'all':
acl[0] = 'all-partitions'
value = dict(
name=acl[0],
role=acl[1]
)
result.append(value)
return result
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if api_attribute in self.api_map:
result[api_attribute] = getattr(
self, self.api_map[api_attribute])
elif api_attribute == 'password':
result[api_attribute] = self._values['password_credential']
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
def exec_module(self):
if self.is_version_less_than_13():
manager = UnparitionedManager(self.client)
else:
manager = PartitionedManager(self.client)
return manager.exec_module()
def is_version_less_than_13(self):
"""Checks to see if the TMOS version is less than 13
Anything less than BIG-IP 13.x does not support users
on different partitions.
:return: Bool
"""
version = self.client.api.tmos_version
if LooseVersion(version) < LooseVersion('13.0.0'):
return True
else:
return False
class BaseManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
if key == 'password_credential':
new_pass = getattr(self.want, key)
if self.want.update_password == 'always':
changed[key] = new_pass
else:
# We set the shell parameter to 'none' when bigip does
# not return it.
if self.want.shell == 'bash':
self.validate_shell_parameter()
if self.want.shell == 'none' and \
self.have.shell is None:
self.have.shell = 'none'
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def validate_shell_parameter(self):
"""Method to validate shell parameters.
Raise when shell attribute is set to 'bash' with roles set to
either 'admin' or 'resource-admin'.
NOTE: Admin and Resource-Admin roles automatically enable access to
all partitions, removing any other roles that the user might have
had. There are few other roles which do that but those roles,
do not allow bash.
"""
err = "Shell access is only available to " \
"'admin' or 'resource-admin' roles"
permit = ['admin', 'resource-admin']
if self.have is not None:
have = self.have.partition_access
if not any(r['role'] for r in have if r['role'] in permit):
raise F5ModuleError(err)
# This check is needed if we want to modify shell AND
# partition_access attribute.
# This check will also trigger on create.
if self.want.partition_access is not None:
want = self.want.partition_access
if not any(r['role'] for r in want if r['role'] in permit):
raise F5ModuleError(err)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def validate_create_parameters(self):
"""Password credentials and partition access are mandatory,
when creating a user resource.
"""
if self.want.password_credential and \
self.want.update_password != 'on_create':
err = "The 'update_password' option " \
"needs to be set to 'on_create' when creating " \
"a resource with a password."
raise F5ModuleError(err)
if self.want.partition_access is None:
err = "The 'partition_access' option " \
"is required when creating a resource."
raise F5ModuleError(err)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the user")
return True
def create(self):
self.validate_create_parameters()
if self.want.shell == 'bash':
self.validate_shell_parameter()
self._set_changed_options()
if self.client.check_mode:
return True
self.create_on_device()
return True
class UnparitionedManager(BaseManager):
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.auth.users.user.create(**params)
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.auth.users.user.load(name=self.want.name)
result.modify(**params)
def read_current_from_device(self):
tmp_res = self.client.api.tm.auth.users.user.load(name=self.want.name)
result = tmp_res.attrs
return Parameters(result)
def exists(self):
return self.client.api.tm.auth.users.user.exists(name=self.want.name)
def remove_from_device(self):
result = self.client.api.tm.auth.users.user.load(name=self.want.name)
if result:
result.delete()
class PartitionedManager(BaseManager):
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.auth.users.user.create(
partition=self.want.partition, **params
)
def _read_one_resource_from_collection(self):
collection = self.client.api.tm.auth.users.get_collection(
requests_params=dict(
params="$filter=partition+eq+'{0}'".format(self.want.partition)
)
)
collection = [x for x in collection if x.name == self.want.name]
if len(collection) == 1:
resource = collection.pop()
return resource
elif len(collection) == 0:
raise F5ModuleError(
"No accounts with the provided name were found"
)
else:
raise F5ModuleError(
"Multiple users with the provided name were found!"
)
def update_on_device(self):
params = self.want.api_params()
try:
resource = self._read_one_resource_from_collection()
resource.modify(**params)
except iControlUnexpectedHTTPError as ex:
# TODO: Patch this in the F5 SDK so that I dont need this check
if 'updated successfully' not in str(ex):
raise F5ModuleError(
"Failed to update the specified user"
)
def read_current_from_device(self):
resource = self._read_one_resource_from_collection()
result = resource.attrs
return Parameters(result)
def exists(self):
collection = self.client.api.tm.auth.users.get_collection(
requests_params=dict(
params="$filter=partition+eq+'{0}'".format(self.want.partition)
)
)
collection = [x for x in collection if x.name == self.want.name]
if len(collection) == 1:
result = True
elif len(collection) == 0:
result = False
else:
raise F5ModuleError(
"Multiple users with the provided name were found!"
)
return result
def remove_from_device(self):
resource = self._read_one_resource_from_collection()
if resource:
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(
required=True,
aliases=['username_credential']
),
password_credential=dict(
no_log=True,
),
partition_access=dict(
type='list'
),
full_name=dict(),
shell=dict(
choices=['none', 'bash', 'tmsh']
),
update_password=dict(
default='always',
choices=['always', 'on_create']
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| mryanlam/f5-ansible | library/bigip_user.py | Python | gpl-3.0 | 18,876 | 0.000371 |
# ----------------------------------------------------------------------
# Copyright (c) 2010-2014 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
# ----------------------------------------------------------------------
from extensions.sfa.util.xrn import hrn_authfor_hrn
'''
Credential creation and verification utilities.
'''
import os
import sys
import datetime
import dateutil
from extensions.sfa.trust import credential as cred
from extensions.sfa.trust import gid
from extensions.sfa.trust import rights
from extensions.sfa.trust.certificate import Certificate
from extensions.sfa.trust.credential_factory import CredentialFactory
from extensions.sfa.trust.abac_credential import ABACCredential
from extensions.sfa.trust.speaksfor_util import determine_speaks_for_ex
def naiveUTC(dt):
"""Converts dt to a naive datetime in UTC.
if 'dt' has a timezone then
convert to UTC
strip off timezone (make it "naive" in Python parlance)
"""
if dt.tzinfo:
tz_utc = dateutil.tz.tzutc()
dt = dt.astimezone(tz_utc)
dt = dt.replace(tzinfo=None)
return dt
class CredentialVerifier(object):
"""Utilities to verify signed credentials from a given set of
root certificates. Will compare target and source URNs, and privileges.
See verify and verify_from_strings methods in particular."""
CATEDCERTSFNAME = 'CATedCACerts.pem'
# root_cert_fileordir is a trusted root cert file or directory of
# trusted roots for verifying credentials
def __init__(self, root_cert_fileordir):
if root_cert_fileordir is None:
raise Exception("Missing Root certs argument")
elif os.path.isdir(root_cert_fileordir):
files = os.listdir(root_cert_fileordir)
self.root_cert_files = []
for file in files:
# FIXME: exclude files that aren't cert files?
#print file == CredentialVerifier.CATEDCERTSFNAME
if file == CredentialVerifier.CATEDCERTSFNAME:
continue
self.root_cert_files.append(os.path.expanduser(os.path.join(root_cert_fileordir, file)))
#self.root_cert_files = [root_cert_fileordir]
else:
raise Exception("Couldn't find Root certs in %s" % root_cert_fileordir)
@classmethod
def getCAsFileFromDir(cls, caCerts):
'''Take a directory of CA certificates and concatenate them into a single
file suitable for use by the Python SSL library to validate client
credentials. Existing file is replaced.'''
if caCerts is None:
raise Exception ('Missing caCerts argument')
if os.path.isfile(os.path.expanduser(caCerts)):
return caCerts
if not os.path.isdir(os.path.expanduser(caCerts)):
raise Exception ('caCerts arg Not a file or a dir: %s' % caCerts)
# Now we have a dir of caCerts files
# For each file in the dir (isfile), concatenate them into a new file
comboFullPath = os.path.join(caCerts, CredentialVerifier.CATEDCERTSFNAME)
caFiles = os.listdir(caCerts)
#logger.debug('Got %d potential caCert files in the dir', len(caFiles))
outfile = open(comboFullPath, "w")
okFileCount = 0
for filename in caFiles:
filepath = os.path.join(caCerts, filename)
# Confirm it's a CA file?
# if not file.endswith('.pem'):
# continue
if not os.path.isfile(os.path.expanduser(filepath)):
continue
if filename == CredentialVerifier.CATEDCERTSFNAME:
# logger.debug('Skipping previous cated certs file')
continue
okFileCount += 1
certfile = open(filepath)
for line in certfile:
outfile.write(line)
certfile.close()
outfile.close()
if okFileCount == 0:
sys.exit('Found NO trusted certs in %s!' % caCerts)
return comboFullPath
def verify_from_strings(self, gid_string, cred_strings, target_urn,
privileges, options=None):
'''Create Credential and GID objects from the given strings,
and then verify the GID has the right privileges according
to the given credentials on the given target.'''
def make_cred(cred_string):
credO = None
try:
credO = CredentialFactory.createCred(credString=cred_string)
except Exception, e:
print(e)
return credO
root_certs = \
[Certificate(filename=root_cert_file) \
for root_cert_file in self.root_cert_files]
caller_gid = gid.GID(string=gid_string)
# Potentially, change gid_string to be the cert of the actual user
# if this is a 'speaks-for' invocation
speaksfor_gid = \
determine_speaks_for_ex(None, \
cred_strings, # May include ABAC speaks_for credential
caller_gid, # Caller cert (may be the tool 'speaking for' user)
options, # May include 'geni_speaking_for' option with user URN
root_certs
)
if caller_gid.get_subject() != speaksfor_gid.get_subject():
# speaksfor_urn = speaksfor_gid.get_urn()
caller_gid = speaksfor_gid
# Remove the abac credentials
cred_strings = [cred_string for cred_string in cred_strings
if CredentialFactory.getType(cred_string) == cred.Credential.SFA_CREDENTIAL_TYPE]
return self.verify(caller_gid,
map(make_cred, cred_strings),
target_urn,
privileges)
def verify_source(self, source_gid, credential):
'''Ensure the credential is giving privileges to the caller/client.
Return True iff the given source (client) GID's URN
is == the given credential's Caller (Owner) URN'''
source_urn = source_gid.get_urn()
cred_source_urn = credential.get_gid_caller().get_urn()
#self.logger.debug('Verifying source %r against credential source %r (cred target %s)',
# source_urn, cred_source_urn, credential.get_gid_object().get_urn())
result = (cred_source_urn == source_urn)
if result:
# self.logger.debug('Source URNs match')
pass
return result
def verify_target(self, target_urn, credential):
'''Ensure the credential is giving privileges on the right subject/target.
Return True if no target is specified, or the target URN
matches the credential's Object's (target's) URN, else return False.
No target is required, for example, to ListResources.'''
if not target_urn:
# self.logger.debug('No target specified, considering it a match.')
return True
else:
cred_target_urn = credential.get_gid_object().get_urn()
# self.logger.debug('Verifying target %r against credential target %r',
# target_urn, cred_target_urn)
result = target_urn == cred_target_urn
if result:
# self.logger.debug('Target URNs match.')
pass
return result
def verify_privileges(self, privileges, credential):
''' Return True iff the given credential gives the privilege
to perform ALL of the privileges (actions) in the given list.
In particular, the given list of 'privileges' is really a list
of names of operations. The privileges in credentials are
each turned in to Rights objects (see sfa/trust/rights.py).
And the SFA rights table is used to map from names of privileges
as specified in credentials, to names of operations.'''
result = True
privs = credential.get_privileges()
for priv in privileges:
if not privs.can_perform(priv):
result = False
return result
def verify(self, gid, credentials, target_urn, privileges):
'''Verify that the given Source GID supplied at least one credential
in the given list of credentials that has all the privileges required
in the privileges list on the given target.
IE if any of the supplied credentials has a caller that matches gid
and a target that matches target_urn, and has all the privileges in
the given list, then return the list of credentials that were ok.
Throw an Exception if we fail to verify any credential.'''
# Note that here we treat a list of credentials as being options
# Alternatively could accumulate privileges for example
# The semantics of the list of credentials is under specified.
result = list()
failure = ""
tried_creds = ""
if len(credentials) == 0:
failure = "No credentials found"
for cred in credentials:
if cred is None:
failure = "Credential was unparseable"
continue
if cred.get_cred_type() == cred.SFA_CREDENTIAL_TYPE:
cS = cred.get_gid_caller().get_urn()
elif cred.get_cred_type() == ABACCredential.ABAC_CREDENTIAL_TYPE:
cS = cred.get_summary_tostring()
else:
cS = "Unknown credential type %s" % cred.get_cred_type()
if tried_creds != "":
tried_creds = "%s, %s" % (tried_creds, cS)
else:
tried_creds = cS
if cred.get_cred_type() != cred.SFA_CREDENTIAL_TYPE:
failure = "Not an SFA credential: " + cS
continue
# if not self.verify_source(gid, cred):
# failure = "Cred %s fails: Credential doesn't grant rights to you (%s), but to %s (over object %s)" % (cred.get_gid_caller().get_urn(), gid.get_urn(), cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn())
# continue
if not self.verify_target(target_urn, cred):
failure = "Cred granting rights to %s on %s fails: It grants permissions over a different target, not %s (URNs dont match)" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), target_urn)
continue
if not self.verify_privileges(privileges, cred):
failure = "Cred for %s over %s doesn't provide sufficient privileges" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn())
continue
try:
if not cred.verify(self.root_cert_files):
failure = "Couldn't validate credential for caller %s with target %s with any of %d known root certs" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), len(self.root_cert_files))
continue
except Exception, exc:
failure = "Couldn't validate credential for caller %s with target %s with any of %d known root certs: %s: %s" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), len(self.root_cert_files), exc.__class__.__name__, exc)
continue
# If got here it verified
result.append(cred)
if result and result != list():
# At least one credential verified ok and was added to the list
# return that list
return result
else:
# We did not find any credential with sufficient privileges
# Raise an exception.
fault_code = 'Insufficient privileges'
fault_string = 'No credential was found with appropriate privileges. Tried %s. Last failure: %s' % (tried_creds, failure)
# GCF ticket #120 - do not raise an xmlrpclib Fault here -
# just an Exception. But the caller may want to turn this
# into one
# raise xmlrpclib.Fault(fault_code, fault_string)
raise Exception(fault_string)
def create_credential(caller_gid, object_gid, expiration, typename, issuer_keyfile, issuer_certfile, trusted_roots, delegatable=False):
'''Create and Return a Credential object issued by given key/cert for the given caller
and object GID objects, given life in seconds, and given type.
Privileges are determined by type per sfa/trust/rights.py
Privileges are delegatable if requested.'''
# FIXME: Validate args: my gids, >0 life,
# type of cred one I can issue
# and readable key and cert files
if caller_gid is None:
raise ValueError("Missing Caller GID")
if object_gid is None:
raise ValueError("Missing Object GID")
if expiration is None:
raise ValueError("Missing expiration")
naive_expiration = naiveUTC(expiration)
duration = naive_expiration - datetime.datetime.utcnow()
life_secs = duration.seconds + duration.days * 24 * 3600
if life_secs < 1:
raise ValueError("Credential expiration is in the past")
if trusted_roots is None:
raise ValueError("Missing list of trusted roots")
if typename is None or typename.strip() == '':
raise ValueError("Missing credential type")
typename = typename.strip().lower()
if typename not in ("user", "sa", "ma", "authority", "slice", "component"):
raise ValueError("Unknown credential type %s" % typename)
if not os.path.isfile(issuer_keyfile):
raise ValueError("Cant read issuer key file %s" % issuer_keyfile)
if not os.path.isfile(issuer_certfile):
raise ValueError("Cant read issuer cert file %s" % issuer_certfile)
issuer_gid = gid.GID(filename=issuer_certfile)
if not (object_gid.get_urn() == issuer_gid.get_urn() or
(issuer_gid.get_type().find('authority') == 0 and
hrn_authfor_hrn(issuer_gid.get_hrn(), object_gid.get_hrn()))):
raise ValueError("Issuer not authorized to issue credential: \
Issuer=%s Target=%s" %
(issuer_gid.get_urn(), object_gid.get_urn()))
ucred = cred.Credential()
# FIXME: Validate the caller_gid and object_gid
# are my user and slice
# Do get_issuer and compare to the issuer cert?
# Or do gid.is_signed_by_cert(issuer_certfile)?
ucred.set_gid_caller(caller_gid)
ucred.set_gid_object(object_gid)
ucred.set_expiration(expiration)
# Use sfa/trust/rights.py to figure out what privileges
# the credential should have.
# user means refresh, resolve, info
# per the privilege_table that lets users do
# remove, update, resolve, list, getcredential,
# listslices, listnodes, getpolicy
# Note that it does not allow manipulating slivers
# And every right is delegatable if any are delegatable (default False)
privileges = rights.determine_rights(typename, None)
privileges.delegate_all_privileges(delegatable)
ucred.set_privileges(privileges)
ucred.encode()
ucred.set_issuer_keys(issuer_keyfile, issuer_certfile)
ucred.sign()
try:
ucred.verify(trusted_roots)
except Exception, exc:
raise Exception("Create Credential failed to verify new \
credential from trusted roots: %s" % exc)
return ucred
| ict-felix/stack | modules/resource/orchestrator/src/credentials/cred_util.py | Python | apache-2.0 | 16,348 | 0.00312 |
#coding=utf-8
"""
Command-line interface utilities for Trigger tools. Intended for re-usable
pieces of code like user prompts, that don't fit in other utils modules.
"""
__author__ = 'Jathan McCollum'
__maintainer__ = 'Jathan McCollum'
__email__ = 'jathan.mccollum@teamaol.com'
__copyright__ = 'Copyright 2006-2012, AOL Inc.'
import datetime
from fcntl import ioctl
import os
import pwd
from pytz import timezone
import struct
import sys
import termios
import time
import tty
# Exports
__all__ = ('yesno', 'get_terminal_width', 'get_terminal_size', 'Whirlygig',
'NullDevice', 'print_severed_head', 'min_sec', 'pretty_time',
'proceed', 'get_user')
# Functions
def yesno(prompt, default=False, autoyes=False):
"""
Present a yes-or-no prompt, get input, and return a boolean.
The ``default`` argument is ignored if ``autoyes`` is set.
:param prompt:
Prompt text
:param default:
Yes if True; No if False
:param autoyes:
Automatically return True
Default behavior (hitting "enter" returns ``False``)::
>>> yesno('Blow up the moon?')
Blow up the moon? (y/N)
False
Reversed behavior (hitting "enter" returns ``True``)::
>>> yesno('Blow up the moon?', default=True)
Blow up the moon? (Y/n)
True
Automatically return ``True`` with ``autoyes``; no prompt is displayed::
>>> yesno('Blow up the moon?', autoyes=True)
True
"""
if autoyes:
return True
sys.stdout.write(prompt)
if default:
sys.stdout.write(' (Y/n) ')
else:
sys.stdout.write(' (y/N) ')
sys.stdout.flush()
fd = sys.stdin.fileno()
attr = termios.tcgetattr(fd)
try:
tty.setraw(fd)
yn = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSANOW, attr)
print ''
if yn in ('y', 'Y'):
return True
elif yn in ('n', 'N'):
return False
else:
return default
def proceed():
"""Present a proceed prompt. Return ``True`` if Y, else ``False``"""
return raw_input('\nDo you wish to proceed? [y/N] ').lower().startswith('y')
def get_terminal_width():
"""Find and return stdout's terminal width, if applicable."""
try:
width = struct.unpack("hhhh", ioctl(1, termios.TIOCGWINSZ, ' '*8))[1]
except IOError:
width = sys.maxint
return width
def get_terminal_size():
"""Find and return stdouts terminal size as (height, width)"""
rows, cols = os.popen('stty size', 'r').read().split()
return rows, cols
def get_user():
"""Return the name of the current user."""
return pwd.getpwuid(os.getuid())[0]
def print_severed_head():
"""
Prints a demon holding a severed head. Best used when things go wrong, like
production-impacting network outages caused by fat-fingered ACL changes.
Thanks to Jeff Sullivan for this best error message ever.
"""
print r"""
_( (~\
_ _ / ( \> > \
-/~/ / ~\ :; \ _ > /(~\/
|| | | /\ ;\ |l _____ |; ( \/ > >
_\\)\)\)/ ;;; `8o __-~ ~\ d| \ //
///(())(__/~;;\ "88p;. -. _\_;.oP (_._/ /
(((__ __ \\ \ `>,% (\ (\./)8" ;:' i
)))--`.'-- (( ;,8 \ ,;%%%: ./V^^^V' ;. ;.
((\ | /)) .,88 `: ..,,;;;;,-::::::'_::\ ||\ ;[8: ;
)| ~-~ |(|(888; ..``'::::8888oooooo. :\`^^^/,,~--._ |88:: |
|\ -===- /| \8;; ``:. oo.8888888888:`((( o.ooo8888Oo;:;:' |
|_~-___-~_| `-\. ` `o`88888888b` )) 888b88888P""' ;
; ~~~~;~~ "`--_`. b`888888888;(.,"888b888" ..::;-'
; ; ~"-.... b`8888888:::::.`8888. .:;;;''
; ; `:::. `:::OOO:::::::.`OO' ;;;''
: ; `. "``::::::'' .'
; `. \_ /
; ; +: ~~-- `:' -'; ACL LOADS FAILED
`: : .::/
; ;;+_ :::. :..;;; YOU LOSE
;;;;;;,;;;;;;;;,;
"""
def pretty_time(t):
"""
Print a pretty version of timestamp, including timezone info. Expects
the incoming datetime object to have proper tzinfo.
:param t:
A ``datetime.datetime`` object
>>> import datetime
>>> from pytz import timezone
>>> localzone = timezone('US/Eastern')
<DstTzInfo 'US/Eastern' EST-1 day, 19:00:00 STD>
>>> t = datetime.datetime.now(localzone)
>>> print t
2011-07-19 12:40:30.820920-04:00
>>> print pretty_time(t)
09:40 PDT
>>> t = datetime.datetime(2011,07,20,04,13,tzinfo=localzone)
>>> print t
2011-07-20 04:13:00-05:00
>>> print pretty_time(t)
tomorrow 02:13 PDT
"""
from trigger.conf import settings
localzone = timezone(os.environ.get('TZ', settings.BOUNCE_DEFAULT_TZ))
t = t.astimezone(localzone)
midnight = datetime.datetime.combine(datetime.datetime.now(), datetime.time(tzinfo=localzone))
midnight += datetime.timedelta(1)
if t < midnight:
return t.strftime('%H:%M %Z')
elif t < midnight + datetime.timedelta(1):
return t.strftime('tomorrow %H:%M %Z')
elif t < midnight + datetime.timedelta(6):
return t.strftime('%A %H:%M %Z')
else:
return t.strftime('%Y-%m-%d %H:%M %Z')
def min_sec(secs):
"""
Takes an epoch timestamp and returns string of minutes:seconds.
:param secs:
Timestamp (in seconds)
>>> import time
>>> start = time.time() # Wait a few seconds
>>> finish = time.time()
>>> min_sec(finish - start)
'0:11'
"""
secs = int(secs)
return '%d:%02d' % (secs / 60, secs % 60)
def setup_tty_for_pty(func):
"""
Sets up tty for raw mode while retaining original tty settings and then
starts the reactor to connect to the pty. Upon exiting pty, restores
original tty settings.
:param func:
The callable to run after the tty is ready, such as ``reactor.run``
"""
# Preserve original tty settings
stdin_fileno = sys.stdin.fileno()
old_ttyattr = tty.tcgetattr(stdin_fileno)
try:
# Enter raw mode on the local tty.
tty.setraw(stdin_fileno)
raw_ta = tty.tcgetattr(stdin_fileno)
raw_ta[tty.LFLAG] |= tty.ISIG
raw_ta[tty.OFLAG] |= tty.OPOST | tty.ONLCR
# Pass ^C through so we can abort traceroute, etc.
raw_ta[tty.CC][tty.VINTR] = '\x18' # ^X is the new ^C
# Ctrl-Z is used by a lot of vendors to exit config mode
raw_ta[tty.CC][tty.VSUSP] = 0 # disable ^Z
tty.tcsetattr(stdin_fileno, tty.TCSANOW, raw_ta)
# Execute our callable here
func()
finally:
# Restore original tty settings
tty.tcsetattr(stdin_fileno, tty.TCSANOW, old_ttyattr)
def update_password_and_reconnect(hostname):
"""
Prompts the user to update their password and reconnect to the target
device
:param hostname: Hostname of the device to connect to.
"""
if yesno('Authentication failed, would you like to update your password?',
default=True):
from trigger import tacacsrc
tacacsrc.update_credentials(hostname)
if yesno('\nReconnect to %s?' % hostname, default=True):
# Replaces the current process w/ same pid
os.execl(sys.executable, sys.executable, *sys.argv)
# Classes
class NullDevice(object):
"""
Used to supress output to ``sys.stdout`` (aka ``print``).
Example::
>>> from trigger.utils.cli import NullDevice
>>> import sys
>>> print "1 - this will print to STDOUT"
1 - this will print to STDOUT
>>> original_stdout = sys.stdout # keep a reference to STDOUT
>>> sys.stdout = NullDevice() # redirect the real STDOUT
>>> print "2 - this won't print"
>>>
>>> sys.stdout = original_stdout # turn STDOUT back on
>>> print "3 - this will print to SDTDOUT"
3 - this will print to SDTDOUT
"""
def write(self, s): pass
class Whirlygig(object):
"""
Prints a whirlygig for use in displaying pending operation in a command-line tool.
Guaranteed to make the user feel warm and fuzzy and be 1000% bug-free.
:param start_msg: The status message displayed to the user (e.g. "Doing stuff:")
:param done_msg: The completion message displayed upon completion (e.g. "Done.")
:param max: Integer of the number of whirlygig repetitions to perform
Example::
>>> Whirlygig("Doing stuff:", "Done.", 12).run()
"""
def __init__(self, start_msg="", done_msg="", max=100):
self.unbuff = os.fdopen(sys.stdout.fileno(), 'w', 0)
self.start_msg = start_msg
self.done_msg = done_msg
self.max = max
self.whirlygig = ['|', '/', '-', '\\']
self.whirl = self.whirlygig[:]
self.first = False
def do_whirl(self, whirl):
if not self.first:
self.unbuff.write(self.start_msg + " ")
self.first = True
self.unbuff.write('\b%s' % whirl.pop(0))
def run(self):
"""Executes the whirlygig!"""
cnt = 1
while cnt <= self.max:
try:
self.do_whirl(self.whirl)
except IndexError:
self.whirl = self.whirlygig[:]
time.sleep(.1)
cnt += 1
print '\b' + self.done_msg
| sysbot/trigger | trigger/utils/cli.py | Python | bsd-3-clause | 9,769 | 0.001843 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Clione Software
# Copyright (c) 2010-2013 Cidadania S. Coop. Galega
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import *
urlpatterns = patterns('apps.thirdparty.smart_selects.views',
url(r'^all/(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<field>[\w\-]+)/(?P<value>[\w\-]+)/$', 'filterchain_all', name='chained_filter_all'),
url(r'^filter/(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<field>[\w\-]+)/(?P<value>[\w\-]+)/$', 'filterchain', name='chained_filter'),
url(r'^filter/(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<manager>[\w\-]+)/(?P<field>[\w\-]+)/(?P<value>[\w\-]+)/$', 'filterchain', name='chained_filter'),
)
| cidadania/e-cidadania | src/apps/thirdparty/smart_selects/urls.py | Python | apache-2.0 | 1,198 | 0.003339 |
from paddle.trainer_config_helpers import *
settings(learning_rate=1e-4, batch_size=1000)
seq_in = data_layer(name='input', size=200)
labels = data_layer(name='labels', size=5000)
probs = data_layer(name='probs', size=10)
xe_label = data_layer(name='xe-label', size=10)
hidden = fc_layer(input=seq_in, size=4)
outputs(
ctc_layer(
input=seq_in, label=labels),
warp_ctc_layer(
input=seq_in, label=labels, blank=0),
crf_layer(
input=hidden, label=data_layer(
name='crf_label', size=4)),
rank_cost(
left=data_layer(
name='left', size=1),
right=data_layer(
name='right', size=1),
label=data_layer(
name='label', size=1)),
lambda_cost(
input=data_layer(
name='list_feature', size=100),
score=data_layer(
name='list_scores', size=1)),
cross_entropy(
input=probs, label=xe_label),
cross_entropy_with_selfnorm(
input=probs, label=xe_label),
huber_regression_cost(
input=seq_in, label=labels),
huber_classification_cost(
input=data_layer(
name='huber_probs', size=1),
label=data_layer(
name='huber_label', size=1)),
multi_binary_label_cross_entropy(
input=probs, label=xe_label),
sum_cost(input=hidden),
nce_layer(
input=hidden, label=labels))
| lispc/Paddle | python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py | Python | apache-2.0 | 1,402 | 0 |
import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
| sentriz/steely | steely/plugins/bible/main.py | Python | gpl-3.0 | 3,384 | 0.001182 |
#!/usr/bin/python
import os,sys,re
#Check the OS Version
RELEASE_FILE = "/etc/redhat-release"
RWM_FILE = "/etc/httpd/conf.modules.d/00-base.conf"
if os.path.isfile(RELEASE_FILE):
f=open(RELEASE_FILE,"r")
rel_list = f.read().split()
if rel_list[2] == "release" and tuple(rel_list[3].split(".")) < ('8','5'):
print("so far good")
else:
raise("Unable to find the OS version")
#Check Apache installed
#TODO
#
#Test if the rewrite module file present
if os.path.isfile(RWM_FILE):
print("re write")
##print sys.version_info
##if sys.version_info < (2,7):
## print "This programm works only with the Python 2.7"###
| sujith7c/py-system-tools | en_mod_rw.py | Python | gpl-3.0 | 636 | 0.031447 |
import logging
import select
import socket
from collections import deque
from amqpsfw import amqp_spec
from amqpsfw.exceptions import SfwException
from amqpsfw.configuration import Configuration
amqpsfw_logger = logging.getLogger('amqpsfw')
log_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s.py.%(funcName)s:%(lineno)d - %(message)s')
log_handler.setFormatter(formatter)
amqpsfw_logger.addHandler(log_handler)
amqpsfw_logger.setLevel(logging.ERROR)
log = logging.getLogger(__name__)
class BufferOut:
def __init__(self):
self.frame_queue = deque()
self.current_frame_bytes = b''
def clear(self):
self.current_frame_bytes = b''
def append_frame(self, x):
self.frame_queue.append(x)
class BufferIn:
def __init__(self):
self.frame_bytes = b''
self.parsed_data_size = 0
def clear(self):
self.__init__()
class Application:
STOPPED = 'STOPPPED'
RUNNING = 'RUNNING'
READ = select.EPOLLIN
WRITE = select.EPOLLOUT
ERROR = (select.EPOLLERR | select.EPOLLPRI | select.EPOLLHUP | select.EPOLLRDHUP | select.EPOLLRDBAND)
PRIWRITE = select.EPOLLWRBAND # doesn't use
def __init__(self, ioloop, app_socket=None):
self.buffer_out = BufferOut()
self.buffer_in = BufferIn()
self.ioloop = ioloop
self.status = 'RUNNING'
self.socket = app_socket
self.expected_response_frames = []
self.app_gen = self.processor()
self.config = Configuration()
def start(self):
raise NotImplementedError
def modify_to_read(self):
events = self.READ | self.ERROR
self.ioloop.update_handler(self.socket.fileno(), events)
def modify_to_write(self):
events = self.WRITE | self.READ | self.ERROR
self.ioloop.update_handler(self.socket.fileno(), events)
def write(self, value):
if self.status == self.STOPPED:
raise SfwException('Internal', 'Aplication is stopped')
self.buffer_out.append_frame(value)
self.modify_to_write()
def handler(self, fd, event):
# TODO why == RUNNING is here?
if event & self.WRITE and self.status == 'RUNNING':
self.handle_write()
if event & self.READ and self.status == 'RUNNING':
self.handle_read()
if event & self.ERROR:
self.handle_error(fd)
def handle_error(self, fd):
log.error('Get error on socket: %s', fd)
self.stop()
def handle_read(self):
# we cant parse full buffer in one call because if many data in buffer then we will be run this cycle by buffer while buffer became empty
# but in case Basic.Ack we need to write response immediatly after get frame,
# so we read data, but don't remove it from socket buffer for getting read events again and then all data in app buffer is parser
# remove data from socket buffer
payload_size, frame, self.buffer_in.frame_bytes = amqp_spec.decode_frame(self.buffer_in.frame_bytes)
if not frame:
self.buffer_in.frame_bytes = self.socket.recv(4096, socket.MSG_PEEK)
if not self.buffer_in.frame_bytes:
self.stop()
payload_size, frame, self.buffer_in.frame_bytes = amqp_spec.decode_frame(self.buffer_in.frame_bytes)
if frame:
self.buffer_in.parsed_data_size += (payload_size + 8)
log.debug('IN {}: {}'.format(self.socket.fileno(), frame))
if self.expected_response_frames and not issubclass(type(frame), tuple(self.expected_response_frames)):
log.error('Unexpected frame type: %s', str(frame))
self.stop()
else:
self.expected_response_frames = []
response = self.method_handler(frame)
_, next_frame, _ = amqp_spec.decode_frame(self.buffer_in.frame_bytes)
if not next_frame:
# "frame" was last frame in buffer_in so remove already parsed data, do second read without flag
self.socket.recv(self.buffer_in.parsed_data_size)
self.buffer_in.clear()
if response:
# TODO why this try here?
try:
self.app_gen.send(response)
except StopIteration:
pass
def handle_write(self):
if len(self.buffer_out.frame_queue) > 0 and not self.buffer_out.current_frame_bytes:
self.expected_response_frames = self.buffer_out.frame_queue[-1].expected_response_frames
for frame in self.buffer_out.frame_queue:
log.debug('OUT {}: {}'.format(self.socket.fileno(), frame))
self.buffer_out.current_frame_bytes += frame.encoded
self.buffer_out.frame_queue.clear()
if self.buffer_out.current_frame_bytes:
writed_bytes = self.socket.send(self.buffer_out.current_frame_bytes)
self.buffer_out.current_frame_bytes = self.buffer_out.current_frame_bytes[writed_bytes:]
if not self.buffer_out.current_frame_bytes and not len(self.buffer_out.frame_queue):
self.modify_to_read()
if self.expected_response_frames is None:
# TODO why this try here?
try:
self.app_gen.send(None)
except StopIteration:
pass
self.buffer_out.clear()
def sleep(self, duration):
self.modify_to_write()
self.ioloop.current().call_later(duration, next, self.app_gen)
return
def processor(self):
yield
raise NotImplementedError
def stop(self):
log.debug('Stop application')
self.buffer_in.clear()
self.buffer_out.clear()
self.status = self.STOPPED
self.ioloop.stop()
self.socket.close()
def on_hearbeat(self, method):
self.write(amqp_spec.Heartbeat())
def on_connection_close(self, method):
self.write(amqp_spec.Connection.CloseOk())
self.stop()
def on_channel_flow(self, method):
# TODO if active=0 stop sending data
self.write(amqp_spec.Channel.FlowOk(method.active))
def on_channel_close(self, method):
self.write(amqp_spec.Channel.CloseOk())
method_mapper = {
amqp_spec.Heartbeat: on_hearbeat,
amqp_spec.Connection.Close: on_connection_close,
amqp_spec.Channel.Close: on_channel_close,
amqp_spec.Channel.Flow: on_channel_flow
}
def method_handler(self, method):
if type(method) in self.method_mapper:
return self.method_mapper[type(method)](self, method)
else:
return method
| akayunov/amqpsfw | lib/amqpsfw/application.py | Python | mit | 6,761 | 0.002367 |
import telnetlib
from time import sleep
import re
import os
HOST_IPs = [
"172.16.1.253", "172.16.1.254"
]
telnet_password = b"pass_here"
enable_password = b"pass_here"
show_commands_list = [
b"show run",
b"show ip arp",
b"show vlan",
b"show cdp neighbors",
b"show ip interface brief"
b"show interface status",
b"show interface description",
b"show etherchannel summary"
]
for HOST_IP in HOST_IPs:
# Telnet to the device and login
tn = telnetlib.Telnet(HOST_IP)
tn.read_until(b"Password: ")
tn.write(telnet_password + b"\r\n")
sleep(0.5)
# Get host name from prompt and make a directory
host_name = re.sub(
'\r\n',"",tn.read_very_eager().decode('ascii'))[:-1]
if not os.path.exists(host_name):
os.makedirs(host_name)
# Log into enable mode
tn.write(b"enable\r\n")
tn.write(enable_password + b"\r\n")
# Set terminal output to 0
tn.write(b"terminal length 0\r\n")
tn.read_very_eager().decode('ascii')
sleep(1)
# execute show commands and save in the directory created
for show_command in show_commands_list:
tn.write(show_command+b"\r\n")
sleep(1)
out = tn.read_very_eager().decode('ascii')
file_name = re.sub(' ', '_', show_command.decode('ascii'))
with open(host_name+'/'+file_name+'.txt','w') as f:
f.write(out)
# Close telnet connection
tn.close()
| JamesKBowler/networking_scripts | cisco/cisco_telnet_recon.py | Python | mit | 1,465 | 0.006826 |
from __future__ import with_statement
import hashlib
import os
import posixpath
import stat
import re
from fnmatch import filter as fnfilter
from fabric.state import output, connections, env
from fabric.utils import warn
from fabric.context_managers import settings
def _format_local(local_path, local_is_path):
"""Format a path for log output"""
if local_is_path:
return local_path
else:
# This allows users to set a name attr on their StringIO objects
# just like an open file object would have
return getattr(local_path, 'name', '<file obj>')
class SFTP(object):
"""
SFTP helper class, which is also a facade for ssh.SFTPClient.
"""
def __init__(self, host_string):
self.ftp = connections[host_string].open_sftp()
# Recall that __getattr__ is the "fallback" attribute getter, and is thus
# pretty safe to use for facade-like behavior as we're doing here.
def __getattr__(self, attr):
return getattr(self.ftp, attr)
def isdir(self, path):
try:
return stat.S_ISDIR(self.ftp.stat(path).st_mode)
except IOError:
return False
def islink(self, path):
try:
return stat.S_ISLNK(self.ftp.lstat(path).st_mode)
except IOError:
return False
def exists(self, path):
try:
self.ftp.lstat(path).st_mode
except IOError:
return False
return True
def glob(self, path):
from fabric.state import win32
dirpart, pattern = os.path.split(path)
rlist = self.ftp.listdir(dirpart)
names = fnfilter([f for f in rlist if not f[0] == '.'], pattern)
ret = [path]
if len(names):
s = '/'
ret = [dirpart.rstrip(s) + s + name.lstrip(s) for name in names]
if not win32:
ret = [posixpath.join(dirpart, name) for name in names]
return ret
def walk(self, top, topdown=True, onerror=None, followlinks=False):
from os.path import join
# We may not have read permission for top, in which case we can't get a
# list of the files the directory contains. os.path.walk always
# suppressed the exception then, rather than blow up for a minor reason
# when (say) a thousand readable directories are still left to visit.
# That logic is copied here.
try:
# Note that listdir and error are globals in this module due to
# earlier import-*.
names = self.ftp.listdir(top)
except Exception, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if self.isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not self.islink(path):
for x in self.walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
def mkdir(self, path, use_sudo):
from fabric.api import sudo, hide
if use_sudo:
with hide('everything'):
sudo('mkdir "%s"' % path)
else:
self.ftp.mkdir(path)
def get(self, remote_path, local_path, use_sudo, local_is_path, rremote=None, temp_dir=""):
from fabric.api import sudo, hide
# rremote => relative remote path, so get(/var/log) would result in
# this function being called with
# remote_path=/var/log/apache2/access.log and
# rremote=apache2/access.log
rremote = rremote if rremote is not None else remote_path
# Handle format string interpolation (e.g. %(dirname)s)
path_vars = {
'host': env.host_string.replace(':', '-'),
'basename': os.path.basename(rremote),
'dirname': os.path.dirname(rremote),
'path': rremote
}
if local_is_path:
# Naive fix to issue #711
escaped_path = re.sub(r'(%[^()]*\w)', r'%\1', local_path)
local_path = os.path.abspath(escaped_path % path_vars )
# Ensure we give ssh.SFTPCLient a file by prepending and/or
# creating local directories as appropriate.
dirpath, filepath = os.path.split(local_path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
if os.path.isdir(local_path):
local_path = os.path.join(local_path, path_vars['basename'])
if output.running:
print("[%s] download: %s <- %s" % (
env.host_string,
_format_local(local_path, local_is_path),
remote_path
))
# Warn about overwrites, but keep going
if local_is_path and os.path.exists(local_path):
msg = "Local file %s already exists and is being overwritten."
warn(msg % local_path)
# When using sudo, "bounce" the file through a guaranteed-unique file
# path in the default remote CWD (which, typically, the login user will
# have write permissions on) in order to sudo(cp) it.
if use_sudo:
target_path = remote_path
hasher = hashlib.sha1()
hasher.update(env.host_string)
hasher.update(target_path)
target_path = posixpath.join(temp_dir, hasher.hexdigest())
# Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv command.
# (The target path has already been cwd-ified elsewhere.)
with settings(hide('everything'), cwd=""):
sudo('cp -p "%s" "%s"' % (remote_path, target_path))
# The user should always own the copied file.
sudo('chown %s "%s"' % (env.user, target_path))
# Only root and the user has the right to read the file
sudo('chmod %o "%s"' % (0400, target_path))
remote_path = target_path
try:
# File-like objects: reset to file seek 0 (to ensure full overwrite)
# and then use Paramiko's getfo() directly
getter = self.ftp.get
if not local_is_path:
local_path.seek(0)
getter = self.ftp.getfo
getter(remote_path, local_path)
finally:
# try to remove the temporary file after the download
if use_sudo:
with settings(hide('everything'), cwd=""):
sudo('rm -f "%s"' % remote_path)
# Return local_path object for posterity. (If mutated, caller will want
# to know.)
return local_path
def get_dir(self, remote_path, local_path, use_sudo, temp_dir):
# Decide what needs to be stripped from remote paths so they're all
# relative to the given remote_path
if os.path.basename(remote_path):
strip = os.path.dirname(remote_path)
else:
strip = os.path.dirname(os.path.dirname(remote_path))
# Store all paths gotten so we can return them when done
result = []
# Use our facsimile of os.walk to find all files within remote_path
for context, dirs, files in self.walk(remote_path):
# Normalize current directory to be relative
# E.g. remote_path of /var/log and current dir of /var/log/apache2
# would be turned into just 'apache2'
lcontext = rcontext = context.replace(strip, '', 1).lstrip('/')
# Prepend local path to that to arrive at the local mirrored
# version of this directory. So if local_path was 'mylogs', we'd
# end up with 'mylogs/apache2'
lcontext = os.path.join(local_path, lcontext)
# Download any files in current directory
for f in files:
# Construct full and relative remote paths to this file
rpath = posixpath.join(context, f)
rremote = posixpath.join(rcontext, f)
# If local_path isn't using a format string that expands to
# include its remote path, we need to add it here.
if "%(path)s" not in local_path \
and "%(dirname)s" not in local_path:
lpath = os.path.join(lcontext, f)
# Otherwise, just passthrough local_path to self.get()
else:
lpath = local_path
# Now we can make a call to self.get() with specific file paths
# on both ends.
result.append(self.get(rpath, lpath, use_sudo, True, rremote, temp_dir))
return result
def put(self, local_path, remote_path, use_sudo, mirror_local_mode, mode,
local_is_path, temp_dir):
from fabric.api import sudo, hide
pre = self.ftp.getcwd()
pre = pre if pre else ''
if local_is_path and self.isdir(remote_path):
basename = os.path.basename(local_path)
remote_path = posixpath.join(remote_path, basename)
if output.running:
print("[%s] put: %s -> %s" % (
env.host_string,
_format_local(local_path, local_is_path),
posixpath.join(pre, remote_path)
))
# When using sudo, "bounce" the file through a guaranteed-unique file
# path in the default remote CWD (which, typically, the login user will
# have write permissions on) in order to sudo(mv) it later.
if use_sudo:
target_path = remote_path
hasher = hashlib.sha1()
hasher.update(env.host_string)
hasher.update(target_path)
remote_path = posixpath.join(temp_dir, hasher.hexdigest())
# Read, ensuring we handle file-like objects correct re: seek pointer
putter = self.ftp.put
if not local_is_path:
old_pointer = local_path.tell()
local_path.seek(0)
putter = self.ftp.putfo
rattrs = putter(local_path, remote_path)
if not local_is_path:
local_path.seek(old_pointer)
# Handle modes if necessary
if (local_is_path and mirror_local_mode) or (mode is not None):
lmode = os.stat(local_path).st_mode if mirror_local_mode else mode
# Cast to octal integer in case of string
if isinstance(lmode, basestring):
lmode = int(lmode, 8)
lmode = lmode & 07777
rmode = rattrs.st_mode
# Only bitshift if we actually got an rmode
if rmode is not None:
rmode = (rmode & 07777)
if lmode != rmode:
if use_sudo:
# Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv
# command. (The target path has already been cwd-ified
# elsewhere.)
with settings(hide('everything'), cwd=""):
sudo('chmod %o \"%s\"' % (lmode, remote_path))
else:
self.ftp.chmod(remote_path, lmode)
if use_sudo:
# Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv command.
# (The target path has already been cwd-ified elsewhere.)
with settings(hide('everything'), cwd=""):
sudo("mv \"%s\" \"%s\"" % (remote_path, target_path))
# Revert to original remote_path for return value's sake
remote_path = target_path
return remote_path
def put_dir(self, local_path, remote_path, use_sudo, mirror_local_mode,
mode, temp_dir):
if os.path.basename(local_path):
strip = os.path.dirname(local_path)
else:
strip = os.path.dirname(os.path.dirname(local_path))
remote_paths = []
for context, dirs, files in os.walk(local_path):
rcontext = context.replace(strip, '', 1)
# normalize pathname separators with POSIX separator
rcontext = rcontext.replace(os.sep, '/')
rcontext = rcontext.lstrip('/')
rcontext = posixpath.join(remote_path, rcontext)
if not self.exists(rcontext):
self.mkdir(rcontext, use_sudo)
for d in dirs:
n = posixpath.join(rcontext, d)
if not self.exists(n):
self.mkdir(n, use_sudo)
for f in files:
local_path = os.path.join(context, f)
n = posixpath.join(rcontext, f)
p = self.put(local_path, n, use_sudo, mirror_local_mode, mode,
True, temp_dir)
remote_paths.append(p)
return remote_paths
| jessekl/flixr | venv/lib/python2.7/site-packages/fabric/sftp.py | Python | mit | 12,958 | 0.000772 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
from cocoprep.archive_load_data import get_file_name_list, parse_archive_file_name, get_key_value, parse_range
from cocoprep.archive_exceptions import PreprocessingException, PreprocessingWarning
def extract_extremes(input_paths, output_file, functions, instances, dimensions):
"""
Extracts the extreme points from the archives contained in input_paths and outputs them to the output_file in
the following format:
[problem_name] [extreme_point_1] [extreme_point_2]
Assumes the two extreme points are contained in the first two lines of every instance archive. If not, that
instance is skipped.
Performs no kind of sorting or filtering of the problems, therefore if multiple copies of one problem are present
in the input, multiple lines for one problem will be also present in the output.
"""
# Check whether input paths exist
input_files = get_file_name_list(input_paths, ".adat")
if len(input_files) == 0:
raise PreprocessingException('Folder {} does not exist or is empty'.format(input_paths))
# Read the input files one by one and save the result in the output_file
with open(output_file, 'a') as f_out:
for input_file in input_files:
try:
(suite_name, function, instance, dimension) = parse_archive_file_name(input_file)
if (function not in functions) or (instance not in instances) or (dimension not in dimensions):
continue
except PreprocessingWarning as warning:
print('Skipping file {}\n{}'.format(input_file, warning))
continue
print(input_file)
with open(input_file, 'r') as f_in:
extreme1 = None
count = 0
for line in f_in:
if line[0] == '%' and 'instance' in line:
instance = int(get_key_value(line[1:], 'instance').strip(' \t\n\r'))
count = 0
elif count > 1 or (len(line) == 0) or line[0] == '%':
continue
elif count == 0:
extreme1 = line.split()[1:3]
count = 1
elif count == 1:
extreme2 = line.split()[1:3]
count = 2
try:
string = '{}_f{:02d}_i{:02d}_d{:02d}\t'.format(suite_name, function, instance, dimension)
string = string + '\t'.join(extreme1) + '\t' + '\t'.join(extreme2) + '\n'
f_out.write(string)
except ValueError:
print('Skipping instance {} in file {}'.format(instance, input_file))
f_in.close()
f_out.flush()
f_out.close()
if __name__ == '__main__':
"""Extracts information on the two extreme points from the archives of solutions. Results are stored into an output
file.
"""
import timing
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--functions', type=parse_range, default=range(1, 56),
help='function numbers to be included in the processing of archives')
parser.add_argument('-i', '--instances', type=parse_range, default=range(1, 11),
help='instance numbers to be included in the processing of archives')
parser.add_argument('-d', '--dimensions', type=parse_range, default=[2, 3, 5, 10, 20, 40],
help='dimensions to be included in the processing of archives')
parser.add_argument('output', help='path to the output file')
parser.add_argument('input', default=[], nargs='+', help='path(s) to the input folder(s)')
args = parser.parse_args()
print('Program called with arguments: \ninput folders = {}\noutput file = {}'.format(args.input, args.output))
print('functions = {} \ninstances = {}\ndimensions = {}\n'.format(args.functions, args.instances, args.dimensions))
extract_extremes(args.input, args.output, args.functions, args.instances, args.dimensions)
| PyQuake/earthquakemodels | code/cocobbob/coco/code-preprocessing/archive-update/extract_extremes.py | Python | bsd-3-clause | 4,277 | 0.006079 |
"""distutils.file_util
Utility functions for operating on single files.
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: file_util.py,v 1.17 2004/11/10 22:23:14 loewis Exp $"
import os
from distutils.errors import DistutilsFileError
from distutils import log
# for generating verbose output in 'copy_file()'
_copy_action = { None: 'copying',
'hard': 'hard linking',
'sym': 'symbolically linking' }
def _copy_file_contents (src, dst, buffer_size=16*1024):
"""Copy the file 'src' to 'dst'; both must be filenames. Any error
opening either file, reading from 'src', or writing to 'dst', raises
DistutilsFileError. Data is read/written in chunks of 'buffer_size'
bytes (default 16k). No attempt is made to handle anything apart from
regular files.
"""
# Stolen from shutil module in the standard library, but with
# custom error-handling added.
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not open '%s': %s" % (src, errstr)
if os.path.exists(dst):
try:
os.unlink(dst)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not delete '%s': %s" % (dst, errstr)
try:
fdst = open(dst, 'wb')
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not create '%s': %s" % (dst, errstr)
while 1:
try:
buf = fsrc.read(buffer_size)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not read from '%s': %s" % (src, errstr)
if not buf:
break
try:
fdst.write(buf)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not write to '%s': %s" % (dst, errstr)
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
# _copy_file_contents()
def copy_file (src, dst,
preserve_mode=1,
preserve_times=1,
update=0,
link=None,
verbose=0,
dry_run=0):
"""Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
copied there with the same name; otherwise, it must be a filename. (If
the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
is true (the default), the file's mode (type and permission bits, or
whatever is analogous on the current platform) is copied. If
'preserve_times' is true (the default), the last-modified and
last-access times are copied as well. If 'update' is true, 'src' will
only be copied if 'dst' does not exist, or if 'dst' does exist but is
older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available.
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
# XXX if the destination file already exists, we clobber it if
# copying, but blow up if linking. Hmmm. And I don't know what
# macostools.copyfile() does. Should definitely be consistent, and
# should probably blow up if destination exists and we would be
# changing it (ie. it's not already a hard/soft link to src OR
# (not update) and (src newer than dst).
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError, \
"can't copy '%s': doesn't exist or not a regular file" % src
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
log.debug("not copying %s (output up-to-date)", src)
return dst, 0
try:
action = _copy_action[link]
except KeyError:
raise ValueError, \
"invalid value '%s' for 'link' argument" % link
if os.path.basename(dst) == os.path.basename(src):
log.info("%s %s -> %s", action, src, dir)
else:
log.info("%s %s -> %s", action, src, dst)
if dry_run:
return (dst, 1)
# On Mac OS, use the native file copy routine
if os.name == 'mac':
import macostools
try:
macostools.copy(src, dst, 0, preserve_times)
except os.error, exc:
raise DistutilsFileError, \
"could not copy '%s' to '%s': %s" % (src, dst, exc[-1])
# If linking (hard or symbolic), use the appropriate system call
# (Unix only, of course, but that's the caller's responsibility)
elif link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.link(src, dst)
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
# Otherwise (non-Mac, not linking), copy the file contents and
# (optionally) copy the times and mode.
else:
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
# According to David Ascher <da@ski.org>, utime() should be done
# before chmod() (at least under NT).
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode:
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1)
# copy_file ()
# XXX I suspect this is Unix-specific -- need porting help!
def move_file (src, dst,
verbose=0,
dry_run=0):
"""Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
be moved into it with the same name; otherwise, 'src' is just renamed
to 'dst'. Return the new full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
log.info("moving %s -> %s", src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError, \
"can't move '%s': not a regular file" % src
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError, \
"can't move '%s': destination '%s' already exists" % \
(src, dst)
if not isdir(dirname(dst)):
raise DistutilsFileError, \
"can't move '%s': destination '%s' not a valid path" % \
(src, dst)
copy_it = 0
try:
os.rename(src, dst)
except os.error, (num, msg):
if num == errno.EXDEV:
copy_it = 1
else:
raise DistutilsFileError, \
"couldn't move '%s' to '%s': %s" % (src, dst, msg)
if copy_it:
copy_file(src, dst)
try:
os.unlink(src)
except os.error, (num, msg):
try:
os.unlink(dst)
except os.error:
pass
raise DistutilsFileError, \
("couldn't move '%s' to '%s' by copy/delete: " +
"delete '%s' failed: %s") % \
(src, dst, src, msg)
return dst
# move_file ()
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, "w")
for line in contents:
f.write(line + "\n")
f.close()
| trivoldus28/pulsarch-verilog | tools/local/bas-release/bas,3.9-SunOS-i386/lib/python/lib/python2.4/distutils/file_util.py | Python | gpl-2.0 | 8,320 | 0.002404 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Util to invoke clang in the system."""
# pylint: disable=invalid-name
import subprocess
from tvm._ffi.base import py_str
import tvm.target
from . import util
def find_clang(required=True):
"""Find clang in system.
Parameters
----------
required : bool
Whether it is required,
runtime error will be raised if the compiler is required.
Returns
-------
valid_list : list of str
List of possible paths.
Note
----
This function will first search clang that
matches the major llvm version that built with tvm
"""
cc_list = []
major = tvm.target.codegen.llvm_version_major(allow_none=True)
if major is not None:
cc_list += ["clang-%d.0" % major]
cc_list += ["clang-%d" % major]
cc_list += ["clang"]
cc_list += ["clang.exe"]
valid_list = [util.which(x) for x in cc_list]
valid_list = [x for x in valid_list if x]
if not valid_list and required:
raise RuntimeError("cannot find clang, candidates are: " + str(cc_list))
return valid_list
def create_llvm(inputs, output=None, options=None, cc=None):
"""Create llvm text ir.
Parameters
----------
inputs : list of str
List of input files name or code source.
output : str, optional
Output file, if it is none
a temporary file is created
options : list
The list of additional options string.
cc : str, optional
The clang compiler, if not specified,
we will try to guess the matched clang version.
Returns
-------
code : str
The generated llvm text IR.
"""
cc = cc if cc else find_clang()[0]
cmd = [cc]
cmd += ["-S", "-emit-llvm"]
temp = util.tempdir()
output = output if output else temp.relpath("output.ll")
inputs = [inputs] if isinstance(inputs, str) else inputs
input_files = []
for i, code in enumerate(inputs):
if util.is_source_path(code):
input_files.append(code)
else:
temp_path = temp.relpath("input%d.cc" % i)
with open(temp_path, "w") as output_file:
output_file.write(code)
input_files.append(temp_path)
if options:
cmd += options
cmd += ["-o", output]
cmd += input_files
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Compilation error:\n"
msg += py_str(out)
raise RuntimeError(msg)
return open(output).read()
| sxjscience/tvm | python/tvm/contrib/clang.py | Python | apache-2.0 | 3,361 | 0.000595 |
from bokeh.plotting import figure, output_file, show
p = figure(width=400, height=400)
p.circle(2, 3, radius=.5, alpha=0.5)
output_file('out.html')
show(p)
| Serulab/Py4Bio | code/ch14/basiccircle.py | Python | mit | 157 | 0 |
class Optimizer:
def __init__(self, model, params=None):
self.model = model
if params:
self.model.set_params(**params)
self.params = self.model.get_params()
self.__chain = list()
def step(self, name, values, skipped=False):
if not skipped:
self.__chain.append({
'pname': name,
'pvalues': values
})
return self
def solve(self, evaluator):
score = -1
for param in self.__chain:
self.model.set_params(**self.params) # set previous best param
results = [(evaluator(self.model.set_params(**{param['pname']: value})), value)
for value in param['pvalues']]
results = sorted(results, lambda a, b: -1 if a[0] < b[0] else 1)
print param['pname']
for result in results:
print result[1], ' : ', result[0]
# update best params
self.params[param['pname']] = results[0][1]
score = results[0][0]
return score
| danielwpz/soybean | src/util/optimizer.py | Python | mit | 1,090 | 0.000917 |
"""
Parent of all (field) classes in Hachoir: Field.
"""
from hachoir_core.compatibility import reversed
from hachoir_core.stream import InputFieldStream
from hachoir_core.error import HachoirError, HACHOIR_ERRORS
from hachoir_core.log import Logger
from hachoir_core.i18n import _
from hachoir_core.tools import makePrintable
from weakref import ref as weakref_ref
class FieldError(HachoirError):
"""
Error raised by a L{Field}.
@see: L{HachoirError}
"""
pass
def joinPath(path, name):
if path != "/":
return "/".join((path, name))
else:
return "/%s" % name
class MissingField(KeyError, FieldError):
def __init__(self, field, key):
KeyError.__init__(self)
self.field = field
self.key = key
def __str__(self):
return 'Can\'t get field "%s" from %s' % (self.key, self.field.path)
def __unicode__(self):
return u'Can\'t get field "%s" from %s' % (self.key, self.field.path)
class Field(Logger):
# static size can have two differents value: None (no static size), an
# integer (number of bits), or a function which returns an integer.
#
# This function receives exactly the same arguments than the constructor
# except the first one (one). Example of function:
# static_size = staticmethod(lambda *args, **kw: args[1])
static_size = None
# Indicate if this field contains other fields (is a field set) or not
is_field_set = False
def __init__(self, parent, name, size=None, description=None):
"""
Set default class attributes, set right address if None address is
given.
@param parent: Parent field of this field
@type parent: L{Field}|None
@param name: Name of the field, have to be unique in parent. If it ends
with "[]", end will be replaced with "[new_id]" (eg. "raw[]"
becomes "raw[0]", next will be "raw[1]", and then "raw[2]", etc.)
@type name: str
@param size: Size of the field in bit (can be None, so it
will be computed later)
@type size: int|None
@param address: Address in bit relative to the parent absolute address
@type address: int|None
@param description: Optional string description
@type description: str|None
"""
assert issubclass(parent.__class__, Field)
assert (size is None) or (0 <= size)
self._parent = parent
if not name:
raise ValueError("empty field name")
self._name = name
self._address = parent.nextFieldAddress()
self._size = size
self._description = description
def _logger(self):
return self.path
def createDescription(self):
return ""
def _getDescription(self):
if self._description is None:
try:
self._description = self.createDescription()
if isinstance(self._description, str):
self._description = makePrintable(
self._description, "ISO-8859-1", to_unicode=True)
except HACHOIR_ERRORS, err:
self.error("Error getting description: " + unicode(err))
self._description = ""
return self._description
description = property(_getDescription,
doc="Description of the field (string)")
def __str__(self):
return self.display
def __unicode__(self):
return self.display
def __repr__(self):
return "<%s path=%r, address=%s, size=%s>" % (
self.__class__.__name__, self.path, self._address, self._size)
def hasValue(self):
return self._getValue() is not None
def createValue(self):
raise NotImplementedError()
def _getValue(self):
try:
value = self.createValue()
except HACHOIR_ERRORS, err:
self.error(_("Unable to create value: %s") % unicode(err))
value = None
self._getValue = lambda: value
return value
value = property(lambda self: self._getValue(), doc="Value of field")
def _getParent(self):
return self._parent
parent = property(_getParent, doc="Parent of this field")
def createDisplay(self):
return unicode(self.value)
def _getDisplay(self):
if not hasattr(self, "_Field__display"):
try:
self.__display = self.createDisplay()
except HACHOIR_ERRORS, err:
self.error("Unable to create display: %s" % err)
self.__display = u""
return self.__display
display = property(lambda self: self._getDisplay(),
doc="Short (unicode) string which represents field content")
def createRawDisplay(self):
value = self.value
if isinstance(value, str):
return makePrintable(value, "ASCII", to_unicode=True)
else:
return unicode(value)
def _getRawDisplay(self):
if not hasattr(self, "_Field__raw_display"):
try:
self.__raw_display = self.createRawDisplay()
except HACHOIR_ERRORS, err:
self.error("Unable to create raw display: %s" % err)
self.__raw_display = u""
return self.__raw_display
raw_display = property(lambda self: self._getRawDisplay(),
doc="(Unicode) string which represents raw field content")
def _getName(self):
return self._name
name = property(_getName,
doc="Field name (unique in its parent field set list)")
def _getIndex(self):
if not self._parent:
return None
return self._parent.getFieldIndex(self)
index = property(_getIndex)
def _getPath(self):
if not self._parent:
return '/'
names = []
field = self
while field is not None:
names.append(field._name)
field = field._parent
names[-1] = ''
return '/'.join(reversed(names))
path = property(_getPath,
doc="Full path of the field starting at root field")
def _getAddress(self):
return self._address
address = property(_getAddress,
doc="Relative address in bit to parent address")
def _getAbsoluteAddress(self):
address = self._address
current = self._parent
while current:
address += current._address
current = current._parent
return address
absolute_address = property(_getAbsoluteAddress,
doc="Absolute address (from stream beginning) in bit")
def _getSize(self):
return self._size
size = property(_getSize, doc="Content size in bit")
def _getField(self, name, const):
if name.strip("."):
return None
field = self
for index in xrange(1, len(name)):
field = field._parent
if field is None:
break
return field
def getField(self, key, const=True):
if key:
if key[0] == "/":
if self._parent:
current = self._parent.root
else:
current = self
if len(key) == 1:
return current
key = key[1:]
else:
current = self
for part in key.split("/"):
field = current._getField(part, const)
if field is None:
raise MissingField(current, part)
current = field
return current
raise KeyError("Key must not be an empty string!")
def __getitem__(self, key):
return self.getField(key, False)
def __contains__(self, key):
try:
return self.getField(key, False) is not None
except FieldError:
return False
def _createInputStream(self, **args):
assert self._parent
return InputFieldStream(self, **args)
def getSubIStream(self):
if hasattr(self, "_sub_istream"):
stream = self._sub_istream()
else:
stream = None
if stream is None:
stream = self._createInputStream()
self._sub_istream = weakref_ref(stream)
return stream
def setSubIStream(self, createInputStream):
cis = self._createInputStream
self._createInputStream = lambda **args: createInputStream(cis, **args)
def __nonzero__(self):
"""
Method called by code like "if field: (...)".
Always returns True
"""
return True
def getFieldType(self):
return self.__class__.__name__
| kreatorkodi/repository.torrentbr | plugin.video.yatp/site-packages/hachoir_core/field/field.py | Python | gpl-2.0 | 8,646 | 0.002429 |
# coding: utf-8
import os
import sys
from nxdrive.logging_config import get_logger
from nxdrive.utils import safe_long_path
from tests.common_unit_test import UnitTestCase
if sys.platform == 'win32':
import win32api
log = get_logger(__name__)
# Number of chars in path c://.../Nuxeo.. is approx 96 chars
FOLDER_A = 'A' * 90
FOLDER_B = 'B' * 90
FOLDER_C = 'C' * 90
FOLDER_D = 'D' * 50
class TestLongPath(UnitTestCase):
def setUp(self):
UnitTestCase.setUp(self)
self.local_1 = self.local_client_1
self.remote_1 = self.remote_document_client_1
log.info("Create a folder AAAA... (90 chars) in server")
self.folder_a = self.remote_1.make_folder("/", FOLDER_A)
self.folder_b = self.remote_1.make_folder(self.folder_a, FOLDER_B)
self.folder_c = self.remote_1.make_folder(self.folder_b, FOLDER_C)
self.remote_1.make_file(self.folder_c, "File1.txt", "Sample Content")
def tearDown(self):
log.info("Delete the folder AAA... in server")
self.remote_1.delete(self.folder_a, use_trash=False)
UnitTestCase.tearDown(self)
def test_long_path(self):
self.engine_1.start()
self.wait_sync(wait_for_async=True)
parent_path = os.path.join(self.local_1.abspath('/'),
FOLDER_A, FOLDER_B, FOLDER_C, FOLDER_D)
log.info("Creating folder with path: %s", parent_path)
if sys.platform == 'win32' and not os.path.exists(parent_path):
log.debug('Add \\\\?\\ prefix to path %r', parent_path)
parent_path = safe_long_path(parent_path)
os.makedirs(parent_path)
if sys.platform == 'win32':
log.info("Convert path of FOLDER_D\File2.txt to short path format")
parent_path = win32api.GetShortPathName(parent_path)
new_file = os.path.join(parent_path, "File2.txt")
log.info("Creating file with path: %s", new_file)
with open(new_file, "w") as f:
f.write("Hello world")
self.wait_sync(wait_for_async=True, timeout=45, fail_if_timeout=False)
remote_children_of_c = self.remote_1.get_children_info(self.folder_c)
children_names = [item.name for item in remote_children_of_c]
log.warn("Verify if FOLDER_D is uploaded to server")
self.assertIn(FOLDER_D, children_names)
folder_d = [item.uid for item in remote_children_of_c if item.name == FOLDER_D][0]
remote_children_of_d = self.remote_1.get_children_info(folder_d)
children_names = [item.name for item in remote_children_of_d]
log.warn("Verify if FOLDER_D\File2.txt is uploaded to server")
self.assertIn('File2.txt', children_names)
def test_setup_on_long_path(self):
""" NXDRIVE 689: Fix error when adding a new account when installation
path is greater than 245 characters.
"""
self.engine_1.stop()
self.engine_1.reinit()
# On Mac, avoid permission denied error
self.engine_1.get_local_client().clean_xattr_root()
test_folder_len = 245 - len(str(self.local_nxdrive_folder_1))
test_folder = 'A' * test_folder_len
self.local_nxdrive_folder_1 = os.path.join(self.local_nxdrive_folder_1,
test_folder)
self.assertTrue(len(self.local_nxdrive_folder_1) > 245)
self.manager_1.unbind_all()
self.engine_1 = self.manager_1.bind_server(
self.local_nxdrive_folder_1, self.nuxeo_url, self.user_2,
self.password_2, start_engine=False)
self.engine_1.start()
self.engine_1.stop()
| ssdi-drive/nuxeo-drive | nuxeo-drive-client/tests/test_long_path.py | Python | lgpl-2.1 | 3,664 | 0.000819 |
from cgi import parse_qs, escape, FieldStorage
import time
import shutil
def ping_app(environ, start_response):
status = '200 OK'
output = 'Pong!'
response_headers = [('Content-type', 'text/plain'),
('Content-Length', str(len(output)))]
start_response(status, response_headers)
return [output]
def hello_world_app(environ, start_response):
parameters=parse_qs(environ.get('QUERY_STRING', ''))
if 'sleep' in parameters:
time.sleep(5)
if 'subject' in parameters:
subject=escape(parameters['subject'][0])
else:
subject='World'
start_response('200 OK', [('Content-Type', 'text/html;charset=utf-8')])
result=u'<p>Hello, %(subject)s!</p>\n' % {'subject': subject}
for key, value in iter(sorted(environ.iteritems())):
result+='<p>'+html_escape(key)+'='+html_escape(value)+'</p>\n'
content_length=environ.get('CONTENT_LENGTH', 0)
if content_length and content_length<100:
result+='bytes read='+environ['wsgi.input'].read()
return [result.encode('utf-8')]
def file_upload_app(environ, start_response):
result=''
if environ['REQUEST_METHOD'].upper()=='POST':
start_response('200 OK', [('Content-Type', 'text/plain;charset=utf-8')])
try:
fs=FieldStorage(fp=environ['wsgi.input'], environ=environ, keep_blank_values=True, strict_parsing=True)
if fs.list:
count=0
for item in fs.list:
if item.filename:
count+=1
result+='%s: file; %s, %s, %s, %r\n' % (item.name, item.filename, item.type, item.disposition, item.file)
with open('fupl-'+str(count), 'w') as fdst:
shutil.copyfileobj(item.file, fdst, 8192)
if hasattr(item.file, 'close'):
item.file.close()
else:
result+='%s: value; %s\n' % (item.name, item.value)
except Exception as e:
result='multipart data parse failure: '+repr(e)
else:
start_response('200 OK', [('Content-Type', 'text/html;charset=utf-8')])
result='''
<form action="/py" method="post" enctype="multipart/form-data">
Category: <input type="text" name="category" />
Select file(s): <input type="file" name="upload" multiple />
<input type="submit" value="Start upload!" />
</form>'''
return [result]
def html_escape(s):
if not s: return ''
return unicode(s).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace('\'', ''')
| ameyjadiye/nxweb | sample_config/python/hello.py | Python | lgpl-3.0 | 2,478 | 0.01937 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Aaron Rosen, Nicira Networks, Inc.
import hashlib
import logging
import os
from oslo.config import cfg
from sqlalchemy.orm import exc as sa_exc
import webob.exc
from quantum.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from quantum.api.v2 import attributes as attr
from quantum.api.v2 import base
from quantum.common import constants
from quantum.common import exceptions as q_exc
from quantum.common import rpc as q_rpc
from quantum.common import topics
from quantum.common import utils
from quantum import context as q_context
from quantum.db import agents_db
from quantum.db import agentschedulers_db
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import dhcp_rpc_base
from quantum.db import l3_db
from quantum.db import models_v2
from quantum.db import portsecurity_db
from quantum.db import quota_db # noqa
from quantum.db import securitygroups_db
from quantum.extensions import l3
from quantum.extensions import portsecurity as psec
from quantum.extensions import providernet as pnet
from quantum.extensions import securitygroup as ext_sg
from quantum.openstack.common import importutils
from quantum.openstack.common import rpc
from quantum.plugins.nicira.common import config # noqa
from quantum.plugins.nicira.common import exceptions as nvp_exc
from quantum.plugins.nicira.common import metadata_access as nvp_meta
from quantum.plugins.nicira.common import securitygroups as nvp_sec
from quantum.plugins.nicira.extensions import nvp_networkgw as networkgw
from quantum.plugins.nicira.extensions import nvp_qos as ext_qos
from quantum.plugins.nicira import nicira_db
from quantum.plugins.nicira import nicira_networkgw_db as networkgw_db
from quantum.plugins.nicira import nicira_qos_db as qos_db
from quantum.plugins.nicira import nvp_cluster
from quantum.plugins.nicira.nvp_plugin_version import PLUGIN_VERSION
from quantum.plugins.nicira import NvpApiClient
from quantum.plugins.nicira import nvplib
LOG = logging.getLogger("QuantumPlugin")
NVP_NOSNAT_RULES_ORDER = 10
NVP_FLOATINGIP_NAT_RULES_ORDER = 224
NVP_EXTGW_NAT_RULES_ORDER = 255
NVP_EXT_PATH = os.path.join(os.path.dirname(__file__), 'extensions')
# Provider network extension - allowed network types for the NVP Plugin
class NetworkTypes:
"""Allowed provider network types for the NVP Plugin."""
L3_EXT = 'l3_ext'
STT = 'stt'
GRE = 'gre'
FLAT = 'flat'
VLAN = 'vlan'
def create_nvp_cluster(cluster_opts, concurrent_connections,
nvp_gen_timeout):
# NOTE(armando-migliaccio): remove this block once we no longer
# want to support deprecated options in the nvp config file
# ### BEGIN
config.register_deprecated(cfg.CONF)
# ### END
cluster = nvp_cluster.NVPCluster(**cluster_opts)
api_providers = [ctrl.split(':') + [True]
for ctrl in cluster.nvp_controllers]
cluster.api_client = NvpApiClient.NVPApiHelper(
api_providers, cluster.nvp_user, cluster.nvp_password,
request_timeout=cluster.req_timeout,
http_timeout=cluster.http_timeout,
retries=cluster.retries,
redirects=cluster.redirects,
concurrent_connections=concurrent_connections,
nvp_gen_timeout=nvp_gen_timeout)
return cluster
class NVPRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
RPC_API_VERSION = '1.0'
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
l3_db.L3_NAT_db_mixin,
portsecurity_db.PortSecurityDbMixin,
securitygroups_db.SecurityGroupDbMixin,
networkgw_db.NetworkGatewayMixin,
qos_db.NVPQoSDbMixin,
nvp_sec.NVPSecurityGroups,
nvp_meta.NvpMetadataAccess,
agentschedulers_db.AgentSchedulerDbMixin):
"""L2 Virtual network plugin.
NvpPluginV2 is a Quantum plugin that provides L2 Virtual Network
functionality using NVP.
"""
supported_extension_aliases = ["provider", "quotas", "port-security",
"router", "security-group", "nvp-qos",
"network-gateway"]
__native_bulk_support = True
# Map nova zones to cluster for easy retrieval
novazone_cluster_map = {}
port_security_enabled_update = "update_port:port_security_enabled"
def __init__(self, loglevel=None):
if loglevel:
logging.basicConfig(level=loglevel)
nvplib.LOG.setLevel(loglevel)
NvpApiClient.LOG.setLevel(loglevel)
# Routines for managing logical ports in NVP
self._port_drivers = {
'create': {l3_db.DEVICE_OWNER_ROUTER_GW:
self._nvp_create_ext_gw_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nvp_create_port,
l3_db.DEVICE_OWNER_FLOATINGIP:
self._nvp_create_fip_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nvp_create_router_port,
networkgw_db.DEVICE_OWNER_NET_GW_INTF:
self._nvp_create_l2_gw_port,
'default': self._nvp_create_port},
'delete': {l3_db.DEVICE_OWNER_ROUTER_GW:
self._nvp_delete_ext_gw_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nvp_delete_router_port,
l3_db.DEVICE_OWNER_FLOATINGIP:
self._nvp_delete_fip_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nvp_delete_port,
networkgw_db.DEVICE_OWNER_NET_GW_INTF:
self._nvp_delete_port,
'default': self._nvp_delete_port}
}
# If no api_extensions_path is provided set the following
if not cfg.CONF.api_extensions_path:
cfg.CONF.set_override('api_extensions_path', NVP_EXT_PATH)
self.nvp_opts = cfg.CONF.NVP
self.cluster = create_nvp_cluster(cfg.CONF,
self.nvp_opts.concurrent_connections,
self.nvp_opts.nvp_gen_timeout)
db.configure_db()
# Extend the fault map
self._extend_fault_map()
# Set up RPC interface for DHCP agent
self.setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver)
# Set this flag to false as the default gateway has not
# been yet updated from the config file
self._is_default_net_gw_in_sync = False
def _ensure_default_network_gateway(self):
if self._is_default_net_gw_in_sync:
return
# Add the gw in the db as default, and unset any previous default
def_l2_gw_uuid = self.cluster.default_l2_gw_service_uuid
try:
ctx = q_context.get_admin_context()
self._unset_default_network_gateways(ctx)
if not def_l2_gw_uuid:
return
try:
def_network_gw = self._get_network_gateway(ctx,
def_l2_gw_uuid)
except sa_exc.NoResultFound:
# Create in DB only - don't go on NVP
def_gw_data = {'id': def_l2_gw_uuid,
'name': 'default L2 gateway service',
'devices': []}
gw_res_name = networkgw.RESOURCE_NAME.replace('-', '_')
def_network_gw = super(
NvpPluginV2, self).create_network_gateway(
ctx, {gw_res_name: def_gw_data})
# In any case set is as default
self._set_default_network_gateway(ctx, def_network_gw['id'])
# Ensure this method is executed only once
self._is_default_net_gw_in_sync = True
except Exception:
LOG.exception(_("Unable to process default l2 gw service:%s"),
def_l2_gw_uuid)
raise
def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None):
"""Build ip_addresses data structure for logical router port.
No need to perform validation on IPs - this has already been
done in the l3_db mixin class.
"""
ip_addresses = []
for ip in fixed_ips:
if not subnet_ids or (ip['subnet_id'] in subnet_ids):
subnet = self._get_subnet(context, ip['subnet_id'])
ip_prefix = '%s/%s' % (ip['ip_address'],
subnet['cidr'].split('/')[1])
ip_addresses.append(ip_prefix)
return ip_addresses
def _create_and_attach_router_port(self, cluster, context,
router_id, port_data,
attachment_type, attachment,
attachment_vlan=None,
subnet_ids=None):
# Use a fake IP address if gateway port is not 'real'
ip_addresses = (port_data.get('fake_ext_gw') and
['0.0.0.0/31'] or
self._build_ip_address_list(context,
port_data['fixed_ips'],
subnet_ids))
try:
lrouter_port = nvplib.create_router_lport(
cluster, router_id, port_data.get('tenant_id', 'fake'),
port_data.get('id', 'fake'), port_data.get('name', 'fake'),
port_data.get('admin_state_up', True), ip_addresses)
LOG.debug(_("Created NVP router port:%s"), lrouter_port['uuid'])
except NvpApiClient.NvpApiException:
LOG.exception(_("Unable to create port on NVP logical router %s"),
router_id)
raise nvp_exc.NvpPluginException(
err_msg=_("Unable to create logical router port for quantum "
"port id %(port_id)s on router %(router_id)s") %
{'port_id': port_data.get('id'), 'router_id': router_id})
self._update_router_port_attachment(cluster, context, router_id,
port_data, attachment_type,
attachment, attachment_vlan,
lrouter_port['uuid'])
return lrouter_port
def _update_router_port_attachment(self, cluster, context,
router_id, port_data,
attachment_type, attachment,
attachment_vlan=None,
nvp_router_port_id=None):
if not nvp_router_port_id:
nvp_router_port_id = self._find_router_gw_port(context, port_data)
try:
nvplib.plug_router_port_attachment(cluster, router_id,
nvp_router_port_id,
attachment,
attachment_type,
attachment_vlan)
LOG.debug(_("Attached %(att)s to NVP router port %(port)s"),
{'att': attachment, 'port': nvp_router_port_id})
except NvpApiClient.NvpApiException:
# Must remove NVP logical port
nvplib.delete_router_lport(cluster, router_id,
nvp_router_port_id)
LOG.exception(_("Unable to plug attachment in NVP logical "
"router port %(r_port_id)s, associated with "
"Quantum %(q_port_id)s"),
{'r_port_id': nvp_router_port_id,
'q_port_id': port_data.get('id')})
raise nvp_exc.NvpPluginException(
err_msg=(_("Unable to plug attachment in router port "
"%(r_port_id)s for quantum port id %(q_port_id)s "
"on router %(router_id)s") %
{'r_port_id': nvp_router_port_id,
'q_port_id': port_data.get('id'),
'router_id': router_id}))
def _get_port_by_device_id(self, context, device_id, device_owner):
"""Retrieve ports associated with a specific device id.
Used for retrieving all quantum ports attached to a given router.
"""
port_qry = context.session.query(models_v2.Port)
return port_qry.filter_by(
device_id=device_id,
device_owner=device_owner,).all()
def _find_router_subnets_cidrs(self, context, router_id):
"""Retrieve subnets attached to the specified router."""
ports = self._get_port_by_device_id(context, router_id,
l3_db.DEVICE_OWNER_ROUTER_INTF)
# No need to check for overlapping CIDRs
cidrs = []
for port in ports:
for ip in port.get('fixed_ips', []):
cidrs.append(self._get_subnet(context,
ip.subnet_id).cidr)
return cidrs
def _nvp_find_lswitch_for_port(self, context, port_data):
network = self._get_network(context, port_data['network_id'])
network_binding = nicira_db.get_network_binding(
context.session, port_data['network_id'])
max_ports = self.nvp_opts.max_lp_per_overlay_ls
allow_extra_lswitches = False
if (network_binding and
network_binding.binding_type in (NetworkTypes.FLAT,
NetworkTypes.VLAN)):
max_ports = self.nvp_opts.max_lp_per_bridged_ls
allow_extra_lswitches = True
try:
return self._handle_lswitch_selection(self.cluster, network,
network_binding, max_ports,
allow_extra_lswitches)
except NvpApiClient.NvpApiException:
err_desc = _("An exception occured while selecting logical "
"switch for the port")
LOG.exception(err_desc)
raise nvp_exc.NvpPluginException(err_msg=err_desc)
def _nvp_create_port_helper(self, cluster, ls_uuid, port_data,
do_port_security=True):
return nvplib.create_lport(cluster, ls_uuid, port_data['tenant_id'],
port_data['id'], port_data['name'],
port_data['device_id'],
port_data['admin_state_up'],
port_data['mac_address'],
port_data['fixed_ips'],
port_data[psec.PORTSECURITY],
port_data[ext_sg.SECURITYGROUPS],
port_data[ext_qos.QUEUE])
def _nvp_create_port(self, context, port_data):
"""Driver for creating a logical switch port on NVP platform."""
# FIXME(salvatore-orlando): On the NVP platform we do not really have
# external networks. So if as user tries and create a "regular" VIF
# port on an external network we are unable to actually create.
# However, in order to not break unit tests, we need to still create
# the DB object and return success
if self._network_is_external(context, port_data['network_id']):
LOG.error(_("NVP plugin does not support regular VIF ports on "
"external networks. Port %s will be down."),
port_data['network_id'])
# No need to actually update the DB state - the default is down
return port_data
try:
selected_lswitch = self._nvp_find_lswitch_for_port(context,
port_data)
lport = self._nvp_create_port_helper(self.cluster,
selected_lswitch['uuid'],
port_data,
True)
nicira_db.add_quantum_nvp_port_mapping(
context.session, port_data['id'], lport['uuid'])
if (not port_data['device_owner'] in
(l3_db.DEVICE_OWNER_ROUTER_GW,
l3_db.DEVICE_OWNER_ROUTER_INTF)):
nvplib.plug_interface(self.cluster, selected_lswitch['uuid'],
lport['uuid'], "VifAttachment",
port_data['id'])
LOG.debug(_("_nvp_create_port completed for port %(name)s "
"on network %(network_id)s. The new port id is "
"%(id)s."), port_data)
except NvpApiClient.NvpApiException:
msg = (_("An exception occured while plugging the interface "
"into network:%s") % port_data['network_id'])
LOG.exception(msg)
raise q_exc.QuantumException(message=msg)
def _nvp_delete_port(self, context, port_data):
# FIXME(salvatore-orlando): On the NVP platform we do not really have
# external networks. So deleting regular ports from external networks
# does not make sense. However we cannot raise as this would break
# unit tests.
if self._network_is_external(context, port_data['network_id']):
LOG.error(_("NVP plugin does not support regular VIF ports on "
"external networks. Port %s will be down."),
port_data['network_id'])
return
nvp_port_id = self._nvp_get_port_id(context, self.cluster,
port_data)
if not nvp_port_id:
LOG.debug(_("Port '%s' was already deleted on NVP platform"), id)
return
# TODO(bgh): if this is a bridged network and the lswitch we just got
# back will have zero ports after the delete we should garbage collect
# the lswitch.
try:
nvplib.delete_port(self.cluster,
port_data['network_id'],
nvp_port_id)
LOG.debug(_("_nvp_delete_port completed for port %(port_id)s "
"on network %(net_id)s"),
{'port_id': port_data['id'],
'net_id': port_data['network_id']})
except q_exc.NotFound:
LOG.warning(_("port %s not found in NVP"), port_data['id'])
def _nvp_delete_router_port(self, context, port_data):
# Delete logical router port
lrouter_id = port_data['device_id']
nvp_port_id = self._nvp_get_port_id(context, self.cluster,
port_data)
if not nvp_port_id:
raise q_exc.PortNotFound(port_id=port_data['id'])
try:
nvplib.delete_peer_router_lport(self.cluster,
lrouter_id,
port_data['network_id'],
nvp_port_id)
except (NvpApiClient.NvpApiException, NvpApiClient.ResourceNotFound):
# Do not raise because the issue might as well be that the
# router has already been deleted, so there would be nothing
# to do here
LOG.exception(_("Ignoring exception as this means the peer "
"for port '%s' has already been deleted."),
nvp_port_id)
# Delete logical switch port
self._nvp_delete_port(context, port_data)
def _nvp_create_router_port(self, context, port_data):
"""Driver for creating a switch port to be connected to a router."""
# No router ports on external networks!
if self._network_is_external(context, port_data['network_id']):
raise nvp_exc.NvpPluginException(
err_msg=(_("It is not allowed to create router interface "
"ports on external networks as '%s'") %
port_data['network_id']))
try:
selected_lswitch = self._nvp_find_lswitch_for_port(context,
port_data)
# Do not apply port security here!
lport = self._nvp_create_port_helper(self.cluster,
selected_lswitch['uuid'],
port_data,
False)
nicira_db.add_quantum_nvp_port_mapping(
context.session, port_data['id'], lport['uuid'])
LOG.debug(_("_nvp_create_port completed for port %(name)s on "
"network %(network_id)s. The new port id is %(id)s."),
port_data)
except Exception:
# failed to create port in NVP delete port from quantum_db
LOG.exception(_("An exception occured while plugging "
"the interface"))
super(NvpPluginV2, self).delete_port(context, port_data["id"])
raise
def _find_router_gw_port(self, context, port_data):
router_id = port_data['device_id']
if not router_id:
raise q_exc.BadRequest(_("device_id field must be populated in "
"order to create an external gateway "
"port for network %s"),
port_data['network_id'])
lr_port = nvplib.find_router_gw_port(context, self.cluster, router_id)
if not lr_port:
raise nvp_exc.NvpPluginException(
err_msg=(_("The gateway port for the router %s "
"was not found on the NVP backend")
% router_id))
return lr_port
def _nvp_create_ext_gw_port(self, context, port_data):
"""Driver for creating an external gateway port on NVP platform."""
# TODO(salvatore-orlando): Handle NVP resource
# rollback when something goes not quite as expected
lr_port = self._find_router_gw_port(context, port_data)
ip_addresses = self._build_ip_address_list(context,
port_data['fixed_ips'])
# This operation actually always updates a NVP logical port
# instead of creating one. This is because the gateway port
# is created at the same time as the NVP logical router, otherwise
# the fabric status of the NVP router will be down.
# admin_status should always be up for the gateway port
# regardless of what the user specifies in quantum
router_id = port_data['device_id']
nvplib.update_router_lport(self.cluster,
router_id,
lr_port['uuid'],
port_data['tenant_id'],
port_data['id'],
port_data['name'],
True,
ip_addresses)
ext_network = self.get_network(context, port_data['network_id'])
if ext_network.get(pnet.NETWORK_TYPE) == NetworkTypes.L3_EXT:
# Update attachment
self._update_router_port_attachment(
self.cluster, context, router_id, port_data,
"L3GatewayAttachment",
ext_network[pnet.PHYSICAL_NETWORK],
ext_network[pnet.SEGMENTATION_ID],
lr_port['uuid'])
# Set the SNAT rule for each subnet (only first IP)
for cidr in self._find_router_subnets_cidrs(context, router_id):
cidr_prefix = int(cidr.split('/')[1])
nvplib.create_lrouter_snat_rule(
self.cluster, router_id,
ip_addresses[0].split('/')[0],
ip_addresses[0].split('/')[0],
order=NVP_EXTGW_NAT_RULES_ORDER - cidr_prefix,
match_criteria={'source_ip_addresses': cidr})
LOG.debug(_("_nvp_create_ext_gw_port completed on external network "
"%(ext_net_id)s, attached to router:%(router_id)s. "
"NVP port id is %(nvp_port_id)s"),
{'ext_net_id': port_data['network_id'],
'router_id': router_id,
'nvp_port_id': lr_port['uuid']})
def _nvp_delete_ext_gw_port(self, context, port_data):
lr_port = self._find_router_gw_port(context, port_data)
# TODO(salvatore-orlando): Handle NVP resource
# rollback when something goes not quite as expected
try:
# Delete is actually never a real delete, otherwise the NVP
# logical router will stop working
router_id = port_data['device_id']
nvplib.update_router_lport(self.cluster,
router_id,
lr_port['uuid'],
port_data['tenant_id'],
port_data['id'],
port_data['name'],
True,
['0.0.0.0/31'])
# Delete the SNAT rule for each subnet
for cidr in self._find_router_subnets_cidrs(context, router_id):
nvplib.delete_nat_rules_by_match(
self.cluster, router_id, "SourceNatRule",
max_num_expected=1, min_num_expected=1,
source_ip_addresses=cidr)
# Reset attachment
self._update_router_port_attachment(
self.cluster, context, router_id, port_data,
"L3GatewayAttachment",
self.cluster.default_l3_gw_service_uuid,
nvp_router_port_id=lr_port['uuid'])
except NvpApiClient.ResourceNotFound:
raise nvp_exc.NvpPluginException(
err_msg=_("Logical router resource %s not found "
"on NVP platform") % router_id)
except NvpApiClient.NvpApiException:
raise nvp_exc.NvpPluginException(
err_msg=_("Unable to update logical router"
"on NVP Platform"))
LOG.debug(_("_nvp_delete_ext_gw_port completed on external network "
"%(ext_net_id)s, attached to router:%(router_id)s"),
{'ext_net_id': port_data['network_id'],
'router_id': router_id})
def _nvp_create_l2_gw_port(self, context, port_data):
"""Create a switch port, and attach it to a L2 gateway attachment."""
# FIXME(salvatore-orlando): On the NVP platform we do not really have
# external networks. So if as user tries and create a "regular" VIF
# port on an external network we are unable to actually create.
# However, in order to not break unit tests, we need to still create
# the DB object and return success
if self._network_is_external(context, port_data['network_id']):
LOG.error(_("NVP plugin does not support regular VIF ports on "
"external networks. Port %s will be down."),
port_data['network_id'])
# No need to actually update the DB state - the default is down
return port_data
try:
selected_lswitch = self._nvp_find_lswitch_for_port(context,
port_data)
lport = self._nvp_create_port_helper(self.cluster,
selected_lswitch['uuid'],
port_data,
True)
nicira_db.add_quantum_nvp_port_mapping(
context.session, port_data['id'], lport['uuid'])
nvplib.plug_l2_gw_service(
self.cluster,
port_data['network_id'],
lport['uuid'],
port_data['device_id'],
int(port_data.get('gw:segmentation_id') or 0))
LOG.debug(_("_nvp_create_port completed for port %(name)s "
"on network %(network_id)s. The new port id "
"is %(id)s."), port_data)
except NvpApiClient.NvpApiException:
# failed to create port in NVP delete port from quantum_db
msg = (_("An exception occured while plugging the gateway "
"interface into network:%s") % port_data['network_id'])
LOG.exception(msg)
super(NvpPluginV2, self).delete_port(context, port_data["id"])
raise q_exc.QuantumException(message=msg)
def _nvp_create_fip_port(self, context, port_data):
# As we do not create ports for floating IPs in NVP,
# this is a no-op driver
pass
def _nvp_delete_fip_port(self, context, port_data):
# As we do not create ports for floating IPs in NVP,
# this is a no-op driver
pass
def _nvp_get_port_id(self, context, cluster, quantum_port):
"""Return the NVP port uuid for a given quantum port.
First, look up the Quantum database. If not found, execute
a query on NVP platform as the mapping might be missing because
the port was created before upgrading to grizzly.
"""
nvp_port_id = nicira_db.get_nvp_port_id(context.session,
quantum_port['id'])
if nvp_port_id:
return nvp_port_id
# Perform a query to NVP and then update the DB
try:
nvp_port = nvplib.get_port_by_quantum_tag(
cluster,
quantum_port['network_id'],
quantum_port['id'])
if nvp_port:
nicira_db.add_quantum_nvp_port_mapping(
context.session,
quantum_port['id'],
nvp_port['uuid'])
return nvp_port['uuid']
except Exception:
LOG.exception(_("Unable to find NVP uuid for Quantum port %s"),
quantum_port['id'])
def _extend_fault_map(self):
"""Extends the Quantum Fault Map.
Exceptions specific to the NVP Plugin are mapped to standard
HTTP Exceptions.
"""
base.FAULT_MAP.update({nvp_exc.NvpInvalidNovaZone:
webob.exc.HTTPBadRequest,
nvp_exc.NvpNoMorePortsException:
webob.exc.HTTPBadRequest})
def _handle_provider_create(self, context, attrs):
# NOTE(salvatore-orlando): This method has been borrowed from
# the OpenvSwtich plugin, altough changed to match NVP specifics.
network_type = attrs.get(pnet.NETWORK_TYPE)
physical_network = attrs.get(pnet.PHYSICAL_NETWORK)
segmentation_id = attrs.get(pnet.SEGMENTATION_ID)
network_type_set = attr.is_attr_set(network_type)
physical_network_set = attr.is_attr_set(physical_network)
segmentation_id_set = attr.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return
err_msg = None
if not network_type_set:
err_msg = _("%s required") % pnet.NETWORK_TYPE
elif network_type in (NetworkTypes.GRE, NetworkTypes.STT,
NetworkTypes.FLAT):
if segmentation_id_set:
err_msg = _("Segmentation ID cannot be specified with "
"flat network type")
elif network_type == NetworkTypes.VLAN:
if not segmentation_id_set:
err_msg = _("Segmentation ID must be specified with "
"vlan network type")
elif (segmentation_id_set and
not utils.is_valid_vlan_tag(segmentation_id)):
err_msg = (_("%(segmentation_id)s out of range "
"(%(min_id)s through %(max_id)s)") %
{'segmentation_id': segmentation_id,
'min_id': constants.MIN_VLAN_TAG,
'max_id': constants.MAX_VLAN_TAG})
else:
# Verify segment is not already allocated
binding = nicira_db.get_network_binding_by_vlanid(
context.session, segmentation_id)
if binding:
raise q_exc.VlanIdInUse(vlan_id=segmentation_id,
physical_network=physical_network)
elif network_type == NetworkTypes.L3_EXT:
if (segmentation_id_set and
not utils.is_valid_vlan_tag(segmentation_id)):
err_msg = (_("%(segmentation_id)s out of range "
"(%(min_id)s through %(max_id)s)") %
{'segmentation_id': segmentation_id,
'min_id': constants.MIN_VLAN_TAG,
'max_id': constants.MAX_VLAN_TAG})
else:
err_msg = _("%(net_type_param)s %(net_type_value)s not "
"supported") % {'net_type_param': pnet.NETWORK_TYPE,
'net_type_value': network_type}
if err_msg:
raise q_exc.InvalidInput(error_message=err_msg)
# TODO(salvatore-orlando): Validate tranport zone uuid
# which should be specified in physical_network
def _extend_network_dict_provider(self, context, network, binding=None):
if not binding:
binding = nicira_db.get_network_binding(context.session,
network['id'])
# With NVP plugin 'normal' overlay networks will have no binding
# TODO(salvatore-orlando) make sure users can specify a distinct
# phy_uuid as 'provider network' for STT net type
if binding:
network[pnet.NETWORK_TYPE] = binding.binding_type
network[pnet.PHYSICAL_NETWORK] = binding.phy_uuid
network[pnet.SEGMENTATION_ID] = binding.vlan_id
def _handle_lswitch_selection(self, cluster, network,
network_binding, max_ports,
allow_extra_lswitches):
lswitches = nvplib.get_lswitches(cluster, network.id)
try:
# TODO(salvatore-orlando) find main_ls too!
return [ls for ls in lswitches
if (ls['_relations']['LogicalSwitchStatus']
['lport_count'] < max_ports)].pop(0)
except IndexError:
# Too bad, no switch available
LOG.debug(_("No switch has available ports (%d checked)"),
len(lswitches))
if allow_extra_lswitches:
main_ls = [ls for ls in lswitches if ls['uuid'] == network.id]
tag_dict = dict((x['scope'], x['tag']) for x in main_ls[0]['tags'])
if 'multi_lswitch' not in tag_dict:
tags = main_ls[0]['tags']
tags.append({'tag': 'True', 'scope': 'multi_lswitch'})
nvplib.update_lswitch(cluster,
main_ls[0]['uuid'],
main_ls[0]['display_name'],
network['tenant_id'],
tags=tags)
selected_lswitch = nvplib.create_lswitch(
cluster, network.tenant_id,
"%s-ext-%s" % (network.name, len(lswitches)),
network_binding.binding_type,
network_binding.phy_uuid,
network_binding.vlan_id,
network.id)
return selected_lswitch
else:
LOG.error(_("Maximum number of logical ports reached for "
"logical network %s"), network.id)
raise nvp_exc.NvpNoMorePortsException(network=network.id)
def setup_rpc(self):
# RPC support for dhcp
self.topic = topics.PLUGIN
self.conn = rpc.create_connection(new=True)
self.dispatcher = NVPRpcCallbacks().create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
# Consume from all consumers in a thread
self.conn.consume_in_thread()
def create_network(self, context, network):
net_data = network['network']
tenant_id = self._get_tenant_id_for_create(context, net_data)
self._ensure_default_security_group(context, tenant_id)
# Process the provider network extension
self._handle_provider_create(context, net_data)
# Replace ATTR_NOT_SPECIFIED with None before sending to NVP
for key, value in network['network'].iteritems():
if value is attr.ATTR_NOT_SPECIFIED:
net_data[key] = None
# FIXME(arosen) implement admin_state_up = False in NVP
if net_data['admin_state_up'] is False:
LOG.warning(_("Network with admin_state_up=False are not yet "
"supported by this plugin. Ignoring setting for "
"network %s"), net_data.get('name', '<unknown>'))
external = net_data.get(l3.EXTERNAL)
if (not attr.is_attr_set(external) or
attr.is_attr_set(external) and not external):
nvp_binding_type = net_data.get(pnet.NETWORK_TYPE)
if nvp_binding_type in ('flat', 'vlan'):
nvp_binding_type = 'bridge'
lswitch = nvplib.create_lswitch(
self.cluster, tenant_id, net_data.get('name'),
nvp_binding_type, net_data.get(pnet.PHYSICAL_NETWORK),
net_data.get(pnet.SEGMENTATION_ID),
shared=net_data.get(attr.SHARED))
net_data['id'] = lswitch['uuid']
with context.session.begin(subtransactions=True):
new_net = super(NvpPluginV2, self).create_network(context,
network)
# Ensure there's an id in net_data
net_data['id'] = new_net['id']
# Process port security extension
self._process_network_create_port_security(context, net_data)
# DB Operations for setting the network as external
self._process_l3_create(context, net_data, new_net['id'])
# Process QoS queue extension
if network['network'].get(ext_qos.QUEUE):
new_net[ext_qos.QUEUE] = network['network'][ext_qos.QUEUE]
# Raises if not found
self.get_qos_queue(context, new_net[ext_qos.QUEUE])
self._process_network_queue_mapping(context, new_net)
self._extend_network_qos_queue(context, new_net)
if net_data.get(pnet.NETWORK_TYPE):
net_binding = nicira_db.add_network_binding(
context.session, new_net['id'],
net_data.get(pnet.NETWORK_TYPE),
net_data.get(pnet.PHYSICAL_NETWORK),
net_data.get(pnet.SEGMENTATION_ID, 0))
self._extend_network_dict_provider(context, new_net,
net_binding)
self._extend_network_port_security_dict(context, new_net)
self._extend_network_dict_l3(context, new_net)
self.schedule_network(context, new_net)
return new_net
def delete_network(self, context, id):
external = self._network_is_external(context, id)
# Before deleting ports, ensure the peer of a NVP logical
# port with a patch attachment is removed too
port_filter = {'network_id': [id],
'device_owner': ['network:router_interface']}
router_iface_ports = self.get_ports(context, filters=port_filter)
for port in router_iface_ports:
nvp_port_id = self._nvp_get_port_id(
context, self.cluster, port)
if nvp_port_id:
port['nvp_port_id'] = nvp_port_id
else:
LOG.warning(_("A nvp lport identifier was not found for "
"quantum port '%s'"), port['id'])
super(NvpPluginV2, self).delete_network(context, id)
# clean up network owned ports
for port in router_iface_ports:
try:
if 'nvp_port_id' in port:
nvplib.delete_peer_router_lport(self.cluster,
port['device_id'],
port['network_id'],
port['nvp_port_id'])
except (TypeError, KeyError,
NvpApiClient.NvpApiException,
NvpApiClient.ResourceNotFound):
# Do not raise because the issue might as well be that the
# router has already been deleted, so there would be nothing
# to do here
LOG.warning(_("Ignoring exception as this means the peer for "
"port '%s' has already been deleted."),
nvp_port_id)
# Do not go to NVP for external networks
if not external:
try:
lswitch_ids = [ls['uuid'] for ls in
nvplib.get_lswitches(self.cluster, id)]
nvplib.delete_networks(self.cluster, id, lswitch_ids)
LOG.debug(_("delete_network completed for tenant: %s"),
context.tenant_id)
except q_exc.NotFound:
LOG.warning(_("Did not found lswitch %s in NVP"), id)
def get_network(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
# goto to the plugin DB and fetch the network
network = self._get_network(context, id)
# if the network is external, do not go to NVP
if not self._network_is_external(context, id):
# verify the fabric status of the corresponding
# logical switch(es) in nvp
try:
lswitches = nvplib.get_lswitches(self.cluster, id)
nvp_net_status = constants.NET_STATUS_ACTIVE
quantum_status = network.status
for lswitch in lswitches:
relations = lswitch.get('_relations')
if relations:
lswitch_status = relations.get(
'LogicalSwitchStatus')
# FIXME(salvatore-orlando): Being unable to fetch
# logical switch status should be an exception.
if (lswitch_status and
not lswitch_status.get('fabric_status',
None)):
nvp_net_status = constants.NET_STATUS_DOWN
break
LOG.debug(_("Current network status:%(nvp_net_status)s; "
"Status in Quantum DB:%(quantum_status)s"),
{'nvp_net_status': nvp_net_status,
'quantum_status': quantum_status})
if nvp_net_status != network.status:
# update the network status
network.status = nvp_net_status
except q_exc.NotFound:
network.status = constants.NET_STATUS_ERROR
except Exception:
err_msg = _("Unable to get logical switches")
LOG.exception(err_msg)
raise nvp_exc.NvpPluginException(err_msg=err_msg)
# Don't do field selection here otherwise we won't be able
# to add provider networks fields
net_result = self._make_network_dict(network, None)
self._extend_network_dict_provider(context, net_result)
self._extend_network_port_security_dict(context, net_result)
self._extend_network_dict_l3(context, net_result)
self._extend_network_qos_queue(context, net_result)
return self._fields(net_result, fields)
def get_networks(self, context, filters=None, fields=None):
nvp_lswitches = {}
filters = filters or {}
with context.session.begin(subtransactions=True):
quantum_lswitches = (
super(NvpPluginV2, self).get_networks(context, filters))
for net in quantum_lswitches:
self._extend_network_dict_provider(context, net)
self._extend_network_port_security_dict(context, net)
self._extend_network_dict_l3(context, net)
self._extend_network_qos_queue(context, net)
tenant_ids = filters and filters.get('tenant_id') or None
filter_fmt = "&tag=%s&tag_scope=os_tid"
if context.is_admin and not tenant_ids:
tenant_filter = ""
else:
tenant_ids = tenant_ids or [context.tenant_id]
tenant_filter = ''.join(filter_fmt % tid for tid in tenant_ids)
lswitch_filters = "uuid,display_name,fabric_status,tags"
lswitch_url_path_1 = (
"/ws.v1/lswitch?fields=%s&relations=LogicalSwitchStatus%s"
% (lswitch_filters, tenant_filter))
lswitch_url_path_2 = nvplib._build_uri_path(
nvplib.LSWITCH_RESOURCE,
fields=lswitch_filters,
relations='LogicalSwitchStatus',
filters={'tag': 'true', 'tag_scope': 'shared'})
try:
res = nvplib.get_all_query_pages(lswitch_url_path_1, self.cluster)
nvp_lswitches.update(dict((ls['uuid'], ls) for ls in res))
# Issue a second query for fetching shared networks.
# We cannot unfortunately use just a single query because tags
# cannot be or-ed
res_shared = nvplib.get_all_query_pages(lswitch_url_path_2,
self.cluster)
nvp_lswitches.update(dict((ls['uuid'], ls) for ls in res_shared))
except Exception:
err_msg = _("Unable to get logical switches")
LOG.exception(err_msg)
raise nvp_exc.NvpPluginException(err_msg=err_msg)
if filters.get('id'):
nvp_lswitches = dict(
(uuid, ls) for (uuid, ls) in nvp_lswitches.iteritems()
if uuid in set(filters['id']))
for quantum_lswitch in quantum_lswitches:
# Skip external networks as they do not exist in NVP
if quantum_lswitch[l3.EXTERNAL]:
continue
elif quantum_lswitch['id'] not in nvp_lswitches:
LOG.warning(_("Logical Switch %s found in quantum database "
"but not in NVP."), quantum_lswitch["id"])
quantum_lswitch["status"] = constants.NET_STATUS_ERROR
else:
# TODO(salvatore-orlando): be careful about "extended"
# logical switches
ls = nvp_lswitches.pop(quantum_lswitch['id'])
if (ls["_relations"]["LogicalSwitchStatus"]["fabric_status"]):
quantum_lswitch["status"] = constants.NET_STATUS_ACTIVE
else:
quantum_lswitch["status"] = constants.NET_STATUS_DOWN
# do not make the case in which switches are found in NVP
# but not in Quantum catastrophic.
if nvp_lswitches:
LOG.warning(_("Found %s logical switches not bound "
"to Quantum networks. Quantum and NVP are "
"potentially out of sync"), len(nvp_lswitches))
LOG.debug(_("get_networks() completed for tenant %s"),
context.tenant_id)
if fields:
ret_fields = []
for quantum_lswitch in quantum_lswitches:
row = {}
for field in fields:
row[field] = quantum_lswitch[field]
ret_fields.append(row)
return ret_fields
return quantum_lswitches
def update_network(self, context, id, network):
if network["network"].get("admin_state_up"):
if network['network']["admin_state_up"] is False:
raise q_exc.NotImplementedError(_("admin_state_up=False "
"networks are not "
"supported."))
with context.session.begin(subtransactions=True):
net = super(NvpPluginV2, self).update_network(context, id, network)
if psec.PORTSECURITY in network['network']:
self._update_network_security_binding(
context, id, network['network'][psec.PORTSECURITY])
if network['network'].get(ext_qos.QUEUE):
net[ext_qos.QUEUE] = network['network'][ext_qos.QUEUE]
self._delete_network_queue_mapping(context, id)
self._process_network_queue_mapping(context, net)
self._extend_network_port_security_dict(context, net)
self._process_l3_update(context, network['network'], id)
self._extend_network_dict_provider(context, net)
self._extend_network_dict_l3(context, net)
self._extend_network_qos_queue(context, net)
return net
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
quantum_lports = super(NvpPluginV2, self).get_ports(
context, filters)
for quantum_lport in quantum_lports:
self._extend_port_port_security_dict(context, quantum_lport)
if (filters.get('network_id') and len(filters.get('network_id')) and
self._network_is_external(context, filters['network_id'][0])):
# Do not perform check on NVP platform
return quantum_lports
vm_filter = ""
tenant_filter = ""
# This is used when calling delete_network. Quantum checks to see if
# the network has any ports.
if filters.get("network_id"):
# FIXME (Aaron) If we get more than one network_id this won't work
lswitch = filters["network_id"][0]
else:
lswitch = "*"
if filters.get("device_id"):
for vm_id in filters.get("device_id"):
vm_filter = ("%stag_scope=vm_id&tag=%s&" % (vm_filter,
hashlib.sha1(vm_id).hexdigest()))
else:
vm_id = ""
if filters.get("tenant_id"):
for tenant in filters.get("tenant_id"):
tenant_filter = ("%stag_scope=os_tid&tag=%s&" %
(tenant_filter, tenant))
nvp_lports = {}
lport_fields_str = ("tags,admin_status_enabled,display_name,"
"fabric_status_up")
try:
lport_query_path = (
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
"&relations=LogicalPortStatus" %
(lswitch, lport_fields_str, vm_filter, tenant_filter))
try:
ports = nvplib.get_all_query_pages(lport_query_path,
self.cluster)
except q_exc.NotFound:
LOG.warn(_("Lswitch %s not found in NVP"), lswitch)
ports = None
if ports:
for port in ports:
for tag in port["tags"]:
if tag["scope"] == "q_port_id":
nvp_lports[tag["tag"]] = port
except Exception:
err_msg = _("Unable to get ports")
LOG.exception(err_msg)
raise nvp_exc.NvpPluginException(err_msg=err_msg)
lports = []
for quantum_lport in quantum_lports:
# if a quantum port is not found in NVP, this migth be because
# such port is not mapped to a logical switch - ie: floating ip
if quantum_lport['device_owner'] in (l3_db.DEVICE_OWNER_FLOATINGIP,
l3_db.DEVICE_OWNER_ROUTER_GW):
lports.append(quantum_lport)
continue
try:
quantum_lport["admin_state_up"] = (
nvp_lports[quantum_lport["id"]]["admin_status_enabled"])
if (nvp_lports[quantum_lport["id"]]
["_relations"]
["LogicalPortStatus"]
["fabric_status_up"]):
quantum_lport["status"] = constants.PORT_STATUS_ACTIVE
else:
quantum_lport["status"] = constants.PORT_STATUS_DOWN
del nvp_lports[quantum_lport["id"]]
except KeyError:
quantum_lport["status"] = constants.PORT_STATUS_ERROR
LOG.debug(_("Quantum logical port %s was not found on NVP"),
quantum_lport['id'])
lports.append(quantum_lport)
# do not make the case in which ports are found in NVP
# but not in Quantum catastrophic.
if nvp_lports:
LOG.warning(_("Found %s logical ports not bound "
"to Quantum ports. Quantum and NVP are "
"potentially out of sync"), len(nvp_lports))
if fields:
ret_fields = []
for lport in lports:
row = {}
for field in fields:
row[field] = lport[field]
ret_fields.append(row)
return ret_fields
return lports
def create_port(self, context, port):
# If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED
# then we pass the port to the policy engine. The reason why we don't
# pass the value to the policy engine when the port is
# ATTR_NOT_SPECIFIED is for the case where a port is created on a
# shared network that is not owned by the tenant.
port_data = port['port']
notify_dhcp_agent = False
with context.session.begin(subtransactions=True):
# First we allocate port in quantum database
quantum_db = super(NvpPluginV2, self).create_port(context, port)
# Update fields obtained from quantum db (eg: MAC address)
port["port"].update(quantum_db)
# metadata_dhcp_host_route
if (cfg.CONF.NVP.metadata_mode == "dhcp_host_route" and
quantum_db.get('device_owner') == constants.DEVICE_OWNER_DHCP):
if (quantum_db.get('fixed_ips') and
len(quantum_db['fixed_ips'])):
notify_dhcp_agent = self._ensure_metadata_host_route(
context, quantum_db['fixed_ips'][0])
# port security extension checks
(port_security, has_ip) = self._determine_port_security_and_has_ip(
context, port_data)
port_data[psec.PORTSECURITY] = port_security
self._process_port_security_create(context, port_data)
# security group extension checks
if port_security and has_ip:
self._ensure_default_security_group_on_port(context, port)
elif attr.is_attr_set(port_data.get(ext_sg.SECURITYGROUPS)):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
port_data[ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
self._process_port_create_security_group(
context, port_data, port_data[ext_sg.SECURITYGROUPS])
# QoS extension checks
port_data[ext_qos.QUEUE] = self._check_for_queue_and_create(
context, port_data)
self._process_port_queue_mapping(context, port_data)
# provider networking extension checks
# Fetch the network and network binding from Quantum db
try:
port_data = port['port'].copy()
port_create_func = self._port_drivers['create'].get(
port_data['device_owner'],
self._port_drivers['create']['default'])
port_create_func(context, port_data)
except q_exc.NotFound:
LOG.warning(_("Network %s was not found in NVP."),
port_data['network_id'])
port_data['status'] = constants.PORT_STATUS_ERROR
except Exception as e:
# FIXME (arosen) or the plugin_interface call failed in which
# case we need to garbage collect the left over port in nvp.
err_msg = _("Unable to create port or set port attachment "
"in NVP.")
LOG.exception(err_msg)
raise e
LOG.debug(_("create_port completed on NVP for tenant "
"%(tenant_id)s: (%(id)s)"), port_data)
# remove since it will be added in extend based on policy
del port_data[ext_qos.QUEUE]
self._extend_port_port_security_dict(context, port_data)
self._extend_port_qos_queue(context, port_data)
net = self.get_network(context, port_data['network_id'])
self.schedule_network(context, net)
if notify_dhcp_agent:
self._send_subnet_update_end(
context, quantum_db['fixed_ips'][0]['subnet_id'])
return port_data
def update_port(self, context, id, port):
delete_security_groups = self._check_update_deletes_security_groups(
port)
has_security_groups = self._check_update_has_security_groups(port)
with context.session.begin(subtransactions=True):
ret_port = super(NvpPluginV2, self).update_port(
context, id, port)
# copy values over - except fixed_ips as
# they've alreaby been processed
port['port'].pop('fixed_ips', None)
ret_port.update(port['port'])
tenant_id = self._get_tenant_id_for_create(context, ret_port)
# populate port_security setting
if psec.PORTSECURITY not in port['port']:
ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
context, id)
has_ip = self._ip_on_port(ret_port)
# checks if security groups were updated adding/modifying
# security groups, port security is set and port has ip
if not (has_ip and ret_port[psec.PORTSECURITY]):
if has_security_groups:
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# Update did not have security groups passed in. Check
# that port does not have any security groups already on it.
filters = {'port_id': [id]}
security_groups = (
super(NvpPluginV2, self)._get_port_security_group_bindings(
context, filters)
)
if security_groups and not delete_security_groups:
raise psec.PortSecurityPortHasSecurityGroup()
if (delete_security_groups or has_security_groups):
# delete the port binding and read it with the new rules.
self._delete_port_security_group_bindings(context, id)
sgids = self._get_security_groups_on_port(context, port)
self._process_port_create_security_group(context, ret_port,
sgids)
if psec.PORTSECURITY in port['port']:
self._update_port_security_binding(
context, id, ret_port[psec.PORTSECURITY])
ret_port[ext_qos.QUEUE] = self._check_for_queue_and_create(
context, ret_port)
self._delete_port_queue_mapping(context, ret_port['id'])
self._process_port_queue_mapping(context, ret_port)
self._extend_port_port_security_dict(context, ret_port)
LOG.warn(_("Update port request: %s"), port)
nvp_port_id = self._nvp_get_port_id(
context, self.cluster, ret_port)
if nvp_port_id:
try:
nvplib.update_port(self.cluster,
ret_port['network_id'],
nvp_port_id, id, tenant_id,
ret_port['name'], ret_port['device_id'],
ret_port['admin_state_up'],
ret_port['mac_address'],
ret_port['fixed_ips'],
ret_port[psec.PORTSECURITY],
ret_port[ext_sg.SECURITYGROUPS],
ret_port[ext_qos.QUEUE])
# Update the port status from nvp. If we fail here hide it
# since the port was successfully updated but we were not
# able to retrieve the status.
ret_port['status'] = nvplib.get_port_status(
self.cluster, ret_port['network_id'],
nvp_port_id)
# FIXME(arosen) improve exception handling.
except Exception:
ret_port['status'] = constants.PORT_STATUS_ERROR
LOG.exception(_("Unable to update port id: %s."),
nvp_port_id)
# If nvp_port_id is not in database or in nvp put in error state.
else:
ret_port['status'] = constants.PORT_STATUS_ERROR
# remove since it will be added in extend based on policy
del ret_port[ext_qos.QUEUE]
self._extend_port_qos_queue(context, ret_port)
return ret_port
def delete_port(self, context, id, l3_port_check=True,
nw_gw_port_check=True):
"""Deletes a port on a specified Virtual Network.
If the port contains a remote interface attachment, the remote
interface is first un-plugged and then the port is deleted.
:returns: None
:raises: exception.PortInUse
:raises: exception.PortNotFound
:raises: exception.NetworkNotFound
"""
# if needed, check to see if this is a port owned by
# a l3 router. If so, we should prevent deletion here
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
quantum_db_port = self.get_port(context, id)
# Perform the same check for ports owned by layer-2 gateways
if nw_gw_port_check:
self.prevent_network_gateway_port_deletion(context,
quantum_db_port)
port_delete_func = self._port_drivers['delete'].get(
quantum_db_port['device_owner'],
self._port_drivers['delete']['default'])
port_delete_func(context, quantum_db_port)
self.disassociate_floatingips(context, id)
notify_dhcp_agent = False
with context.session.begin(subtransactions=True):
queue = self._get_port_queue_bindings(context, {'port_id': [id]})
# metadata_dhcp_host_route
port_device_owner = quantum_db_port['device_owner']
if (cfg.CONF.NVP.metadata_mode == "dhcp_host_route" and
port_device_owner == constants.DEVICE_OWNER_DHCP):
notify_dhcp_agent = self._ensure_metadata_host_route(
context, quantum_db_port['fixed_ips'][0],
is_delete=True)
super(NvpPluginV2, self).delete_port(context, id)
# Delete qos queue if possible
if queue:
self.delete_qos_queue(context, queue[0]['queue_id'], False)
if notify_dhcp_agent:
self._send_subnet_update_end(
context, quantum_db_port['fixed_ips'][0]['subnet_id'])
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
quantum_db_port = super(NvpPluginV2, self).get_port(context,
id, fields)
self._extend_port_port_security_dict(context, quantum_db_port)
self._extend_port_qos_queue(context, quantum_db_port)
if self._network_is_external(context,
quantum_db_port['network_id']):
return quantum_db_port
nvp_id = self._nvp_get_port_id(context, self.cluster,
quantum_db_port)
# If there's no nvp IP do not bother going to NVP and put
# the port in error state
if nvp_id:
try:
port = nvplib.get_logical_port_status(
self.cluster, quantum_db_port['network_id'],
nvp_id)
quantum_db_port["admin_state_up"] = (
port["admin_status_enabled"])
if port["fabric_status_up"]:
quantum_db_port["status"] = (
constants.PORT_STATUS_ACTIVE)
else:
quantum_db_port["status"] = constants.PORT_STATUS_DOWN
except q_exc.NotFound:
quantum_db_port["status"] = constants.PORT_STATUS_ERROR
else:
quantum_db_port["status"] = constants.PORT_STATUS_ERROR
return quantum_db_port
def create_router(self, context, router):
# NOTE(salvatore-orlando): We completely override this method in
# order to be able to use the NVP ID as Quantum ID
# TODO(salvatore-orlando): Propose upstream patch for allowing
# 3rd parties to specify IDs as we do with l2 plugin
r = router['router']
has_gw_info = False
tenant_id = self._get_tenant_id_for_create(context, r)
# default value to set - nvp wants it (even if we don't have it)
nexthop = '1.1.1.1'
try:
# if external gateway info are set, then configure nexthop to
# default external gateway
if 'external_gateway_info' in r and r.get('external_gateway_info'):
has_gw_info = True
gw_info = r['external_gateway_info']
del r['external_gateway_info']
# The following DB read will be performed again when updating
# gateway info. This is not great, but still better than
# creating NVP router here and updating it later
network_id = (gw_info.get('network_id', None) if gw_info
else None)
if network_id:
ext_net = self._get_network(context, network_id)
if not self._network_is_external(context, network_id):
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise q_exc.BadRequest(resource='router', msg=msg)
if ext_net.subnets:
ext_subnet = ext_net.subnets[0]
nexthop = ext_subnet.gateway_ip
lrouter = nvplib.create_lrouter(self.cluster, tenant_id,
router['router']['name'],
nexthop)
# Use NVP identfier for Quantum resource
router['router']['id'] = lrouter['uuid']
except NvpApiClient.NvpApiException:
raise nvp_exc.NvpPluginException(
err_msg=_("Unable to create logical router on NVP Platform"))
# Create the port here - and update it later if we have gw_info
self._create_and_attach_router_port(
self.cluster, context, lrouter['uuid'], {'fake_ext_gw': True},
"L3GatewayAttachment", self.cluster.default_l3_gw_service_uuid)
with context.session.begin(subtransactions=True):
router_db = l3_db.Router(id=lrouter['uuid'],
tenant_id=tenant_id,
name=r['name'],
admin_state_up=r['admin_state_up'],
status="ACTIVE")
context.session.add(router_db)
if has_gw_info:
self._update_router_gw_info(context, router_db['id'], gw_info)
return self._make_router_dict(router_db)
def update_router(self, context, id, router):
try:
# Either nexthop is updated or should be kept as it was before
r = router['router']
nexthop = None
if 'external_gateway_info' in r and r.get('external_gateway_info'):
gw_info = r['external_gateway_info']
# The following DB read will be performed again when updating
# gateway info. This is not great, but still better than
# creating NVP router here and updating it later
network_id = (gw_info.get('network_id', None) if gw_info
else None)
if network_id:
ext_net = self._get_network(context, network_id)
if not self._network_is_external(context, network_id):
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise q_exc.BadRequest(resource='router', msg=msg)
if ext_net.subnets:
ext_subnet = ext_net.subnets[0]
nexthop = ext_subnet.gateway_ip
nvplib.update_lrouter(self.cluster, id,
router['router'].get('name'), nexthop)
except NvpApiClient.ResourceNotFound:
raise nvp_exc.NvpPluginException(
err_msg=_("Logical router %s not found on NVP Platform") % id)
except NvpApiClient.NvpApiException:
raise nvp_exc.NvpPluginException(
err_msg=_("Unable to update logical router on NVP Platform"))
return super(NvpPluginV2, self).update_router(context, id, router)
def delete_router(self, context, id):
with context.session.begin(subtransactions=True):
# Ensure metadata access network is detached and destroyed
# This will also destroy relevant objects on NVP platform.
# NOTE(salvatore-orlando): A failure in this operation will
# cause the router delete operation to fail too.
self._handle_metadata_access_network(context, id, do_create=False)
super(NvpPluginV2, self).delete_router(context, id)
# If removal is successful in Quantum it should be so on
# the NVP platform too - otherwise the transaction should
# be automatically aborted
# TODO(salvatore-orlando): Extend the object models in order to
# allow an extra field for storing the cluster information
# together with the resource
try:
nvplib.delete_lrouter(self.cluster, id)
except q_exc.NotFound:
LOG.warning(_("Logical router '%s' not found "
"on NVP Platform") % id)
except NvpApiClient.NvpApiException:
raise nvp_exc.NvpPluginException(
err_msg=(_("Unable to delete logical router"
"on NVP Platform")))
def get_router(self, context, id, fields=None):
router = self._get_router(context, id)
try:
try:
lrouter = nvplib.get_lrouter(self.cluster, id)
except q_exc.NotFound:
lrouter = {}
router_op_status = constants.NET_STATUS_ERROR
relations = lrouter.get('_relations')
if relations:
lrouter_status = relations.get('LogicalRouterStatus')
# FIXME(salvatore-orlando): Being unable to fetch the
# logical router status should be an exception.
if lrouter_status:
router_op_status = (lrouter_status.get('fabric_status')
and constants.NET_STATUS_ACTIVE or
constants.NET_STATUS_DOWN)
if router_op_status != router.status:
LOG.debug(_("Current router status:%(router_status)s;"
"Status in Quantum DB:%(db_router_status)s"),
{'router_status': router_op_status,
'db_router_status': router.status})
# update the router status
with context.session.begin(subtransactions=True):
router.status = router_op_status
except NvpApiClient.NvpApiException:
err_msg = _("Unable to get logical router")
LOG.exception(err_msg)
raise nvp_exc.NvpPluginException(err_msg=err_msg)
return self._make_router_dict(router, fields)
def get_routers(self, context, filters=None, fields=None):
router_query = self._apply_filters_to_query(
self._model_query(context, l3_db.Router),
l3_db.Router, filters)
routers = router_query.all()
# Query routers on NVP for updating operational status
if context.is_admin and not filters.get("tenant_id"):
tenant_id = None
elif 'tenant_id' in filters:
tenant_id = filters.get('tenant_id')[0]
del filters['tenant_id']
else:
tenant_id = context.tenant_id
try:
nvp_lrouters = nvplib.get_lrouters(self.cluster,
tenant_id,
fields)
except NvpApiClient.NvpApiException:
err_msg = _("Unable to get logical routers from NVP controller")
LOG.exception(err_msg)
raise nvp_exc.NvpPluginException(err_msg=err_msg)
nvp_lrouters_dict = {}
for nvp_lrouter in nvp_lrouters:
nvp_lrouters_dict[nvp_lrouter['uuid']] = nvp_lrouter
for router in routers:
nvp_lrouter = nvp_lrouters_dict.get(router['id'])
if nvp_lrouter:
if (nvp_lrouter["_relations"]["LogicalRouterStatus"]
["fabric_status"]):
router.status = constants.NET_STATUS_ACTIVE
else:
router.status = constants.NET_STATUS_DOWN
nvp_lrouters.remove(nvp_lrouter)
else:
router.status = constants.NET_STATUS_ERROR
# do not make the case in which routers are found in NVP
# but not in Quantum catastrophic.
if nvp_lrouters:
LOG.warning(_("Found %s logical routers not bound "
"to Quantum routers. Quantum and NVP are "
"potentially out of sync"), len(nvp_lrouters))
return [self._make_router_dict(router, fields)
for router in routers]
def add_router_interface(self, context, router_id, interface_info):
router_iface_info = super(NvpPluginV2, self).add_router_interface(
context, router_id, interface_info)
# If the above operation succeded interface_info contains a reference
# to a logical switch port
port_id = router_iface_info['port_id']
subnet_id = router_iface_info['subnet_id']
# Add port to the logical router as well
# The owner of the router port is always the same as the owner of the
# router. Use tenant_id from the port instead of fetching more records
# from the Quantum database
port = self._get_port(context, port_id)
# Find the NVP port corresponding to quantum port_id
results = nvplib.query_lswitch_lports(
self.cluster, '*',
filters={'tag': port_id, 'tag_scope': 'q_port_id'})
if results:
ls_port = results[0]
else:
raise nvp_exc.NvpPluginException(
err_msg=(_("The port %(port_id)s, connected to the router "
"%(router_id)s was not found on the NVP "
"backend.") % {'port_id': port_id,
'router_id': router_id}))
# Create logical router port and patch attachment
self._create_and_attach_router_port(
self.cluster, context, router_id, port,
"PatchAttachment", ls_port['uuid'],
subnet_ids=[subnet_id])
subnet = self._get_subnet(context, subnet_id)
# If there is an external gateway we need to configure the SNAT rule.
# Fetch router from DB
router = self._get_router(context, router_id)
gw_port = router.gw_port
if gw_port:
# There is a change gw_port might have multiple IPs
# In that case we will consider only the first one
if gw_port.get('fixed_ips'):
snat_ip = gw_port['fixed_ips'][0]['ip_address']
subnet = self._get_subnet(context, subnet_id)
cidr_prefix = int(subnet['cidr'].split('/')[1])
nvplib.create_lrouter_snat_rule(
self.cluster, router_id, snat_ip, snat_ip,
order=NVP_EXTGW_NAT_RULES_ORDER - cidr_prefix,
match_criteria={'source_ip_addresses': subnet['cidr']})
nvplib.create_lrouter_nosnat_rule(
self.cluster, router_id,
order=NVP_NOSNAT_RULES_ORDER,
match_criteria={'destination_ip_addresses': subnet['cidr']})
# Ensure the NVP logical router has a connection to a 'metadata access'
# network (with a proxy listening on its DHCP port), by creating it
# if needed.
self._handle_metadata_access_network(context, router_id)
LOG.debug(_("Add_router_interface completed for subnet:%(subnet_id)s "
"and router:%(router_id)s"),
{'subnet_id': subnet_id, 'router_id': router_id})
return router_iface_info
def remove_router_interface(self, context, router_id, interface_info):
# The code below is duplicated from base class, but comes handy
# as we need to retrieve the router port id before removing the port
subnet = None
subnet_id = None
if 'port_id' in interface_info:
port_id = interface_info['port_id']
# find subnet_id - it is need for removing the SNAT rule
port = self._get_port(context, port_id)
if port.get('fixed_ips'):
subnet_id = port['fixed_ips'][0]['subnet_id']
if not (port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF and
port['device_id'] == router_id):
raise l3.RouterInterfaceNotFound(router_id=router_id,
port_id=port_id)
elif 'subnet_id' in interface_info:
subnet_id = interface_info['subnet_id']
subnet = self._get_subnet(context, subnet_id)
rport_qry = context.session.query(models_v2.Port)
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
network_id=subnet['network_id'])
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
port_id = p['id']
break
else:
raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id,
subnet_id=subnet_id)
results = nvplib.query_lswitch_lports(
self.cluster, '*', relations="LogicalPortAttachment",
filters={'tag': port_id, 'tag_scope': 'q_port_id'})
lrouter_port_id = None
if results:
lport = results[0]
attachment_data = lport['_relations'].get('LogicalPortAttachment')
lrouter_port_id = (attachment_data and
attachment_data.get('peer_port_uuid'))
else:
LOG.warning(_("The port %(port_id)s, connected to the router "
"%(router_id)s was not found on the NVP backend"),
{'port_id': port_id, 'router_id': router_id})
# Finally remove the data from the Quantum DB
# This will also destroy the port on the logical switch
info = super(NvpPluginV2, self).remove_router_interface(
context, router_id, interface_info)
# Destroy router port (no need to unplug the attachment)
# FIXME(salvatore-orlando): In case of failures in the Quantum plugin
# this migth leave a dangling port. We perform the operation here
# to leverage validation performed in the base class
if not lrouter_port_id:
LOG.warning(_("Unable to find NVP logical router port for "
"Quantum port id:%s. Was this port ever paired "
"with a logical router?"), port_id)
return info
# Ensure the connection to the 'metadata access network'
# is removed (with the network) if this the last subnet
# on the router
self._handle_metadata_access_network(context, router_id)
try:
if not subnet:
subnet = self._get_subnet(context, subnet_id)
router = self._get_router(context, router_id)
# Remove SNAT rule if external gateway is configured
if router.gw_port:
nvplib.delete_nat_rules_by_match(
self.cluster, router_id, "SourceNatRule",
max_num_expected=1, min_num_expected=1,
source_ip_addresses=subnet['cidr'])
# Relax the minimum expected number as the nosnat rules
# do not exist in 2.x deployments
nvplib.delete_nat_rules_by_match(
self.cluster, router_id, "NoSourceNatRule",
max_num_expected=1, min_num_expected=0,
destination_ip_addresses=subnet['cidr'])
nvplib.delete_router_lport(self.cluster,
router_id, lrouter_port_id)
except NvpApiClient.ResourceNotFound:
raise nvp_exc.NvpPluginException(
err_msg=(_("Logical router port resource %s not found "
"on NVP platform"), lrouter_port_id))
except NvpApiClient.NvpApiException:
raise nvp_exc.NvpPluginException(
err_msg=(_("Unable to update logical router"
"on NVP Platform")))
return info
def _retrieve_and_delete_nat_rules(self, floating_ip_address,
internal_ip, router_id,
min_num_rules_expected=0):
try:
nvplib.delete_nat_rules_by_match(
self.cluster, router_id, "DestinationNatRule",
max_num_expected=1,
min_num_expected=min_num_rules_expected,
destination_ip_addresses=floating_ip_address)
# Remove SNAT rule associated with the single fixed_ip
# to floating ip
nvplib.delete_nat_rules_by_match(
self.cluster, router_id, "SourceNatRule",
max_num_expected=1,
min_num_expected=min_num_rules_expected,
source_ip_addresses=internal_ip)
except NvpApiClient.NvpApiException:
LOG.exception(_("An error occurred while removing NAT rules "
"on the NVP platform for floating ip:%s"),
floating_ip_address)
raise
except nvp_exc.NvpNatRuleMismatch:
# Do not surface to the user
LOG.warning(_("An incorrect number of matching NAT rules "
"was found on the NVP platform"))
def _remove_floatingip_address(self, context, fip_db):
# Remove floating IP address from logical router port
# Fetch logical port of router's external gateway
router_id = fip_db.router_id
nvp_gw_port_id = nvplib.find_router_gw_port(
context, self.cluster, router_id)['uuid']
ext_quantum_port_db = self._get_port(context.elevated(),
fip_db.floating_port_id)
nvp_floating_ips = self._build_ip_address_list(
context.elevated(), ext_quantum_port_db['fixed_ips'])
nvplib.update_lrouter_port_ips(self.cluster,
router_id,
nvp_gw_port_id,
ips_to_add=[],
ips_to_remove=nvp_floating_ips)
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
"""Update floating IP association data.
Overrides method from base class.
The method is augmented for creating NAT rules in the process.
"""
if (('fixed_ip_address' in fip and fip['fixed_ip_address']) and
not ('port_id' in fip and fip['port_id'])):
msg = _("fixed_ip_address cannot be specified without a port_id")
raise q_exc.BadRequest(resource='floatingip', msg=msg)
port_id = internal_ip = router_id = None
if 'port_id' in fip and fip['port_id']:
port_qry = context.session.query(l3_db.FloatingIP)
try:
port_qry.filter_by(fixed_port_id=fip['port_id']).one()
raise l3.FloatingIPPortAlreadyAssociated(
port_id=fip['port_id'],
fip_id=floatingip_db['id'],
floating_ip_address=floatingip_db['floating_ip_address'],
fixed_ip=floatingip_db['fixed_ip_address'],
net_id=floatingip_db['floating_network_id'])
except sa_exc.NoResultFound:
pass
port_id, internal_ip, router_id = self.get_assoc_data(
context,
fip,
floatingip_db['floating_network_id'])
floating_ip = floatingip_db['floating_ip_address']
# Retrieve and delete existing NAT rules, if any
if not router_id and floatingip_db.get('fixed_port_id'):
# This happens if we're disassociating. Need to explicitly
# find the router serving this floating IP
tmp_fip = fip.copy()
tmp_fip['port_id'] = floatingip_db['fixed_port_id']
_pid, internal_ip, router_id = self.get_assoc_data(
context, tmp_fip, floatingip_db['floating_network_id'])
# If there's no association router_id will be None
if router_id:
self._retrieve_and_delete_nat_rules(floating_ip,
internal_ip,
router_id)
# Fetch logical port of router's external gateway
nvp_gw_port_id = nvplib.find_router_gw_port(
context, self.cluster, router_id)['uuid']
nvp_floating_ips = self._build_ip_address_list(
context.elevated(), external_port['fixed_ips'])
LOG.debug(_("Address list for NVP logical router "
"port:%s"), nvp_floating_ips)
# Re-create NAT rules only if a port id is specified
if 'port_id' in fip and fip['port_id']:
try:
# Create new NAT rules
nvplib.create_lrouter_dnat_rule(
self.cluster, router_id, internal_ip,
order=NVP_FLOATINGIP_NAT_RULES_ORDER,
match_criteria={'destination_ip_addresses':
floating_ip})
# setup snat rule such that src ip of a IP packet when
# using floating is the floating ip itself.
nvplib.create_lrouter_snat_rule(
self.cluster, router_id, floating_ip, floating_ip,
order=NVP_FLOATINGIP_NAT_RULES_ORDER,
match_criteria={'source_ip_addresses': internal_ip})
# Add Floating IP address to router_port
nvplib.update_lrouter_port_ips(self.cluster,
router_id,
nvp_gw_port_id,
ips_to_add=nvp_floating_ips,
ips_to_remove=[])
except NvpApiClient.NvpApiException:
LOG.exception(_("An error occurred while creating NAT "
"rules on the NVP platform for floating "
"ip:%(floating_ip)s mapped to "
"internal ip:%(internal_ip)s"),
{'floating_ip': floating_ip,
'internal_ip': internal_ip})
raise nvp_exc.NvpPluginException(err_msg=msg)
elif floatingip_db['fixed_port_id']:
# This is a disassociation.
# Remove floating IP address from logical router port
nvplib.update_lrouter_port_ips(self.cluster,
router_id,
nvp_gw_port_id,
ips_to_add=[],
ips_to_remove=nvp_floating_ips)
floatingip_db.update({'fixed_ip_address': internal_ip,
'fixed_port_id': port_id,
'router_id': router_id})
def delete_floatingip(self, context, id):
fip_db = self._get_floatingip(context, id)
# Check whether the floating ip is associated or not
if fip_db.fixed_port_id:
self._retrieve_and_delete_nat_rules(fip_db.floating_ip_address,
fip_db.fixed_ip_address,
fip_db.router_id,
min_num_rules_expected=1)
# Remove floating IP address from logical router port
self._remove_floatingip_address(context, fip_db)
return super(NvpPluginV2, self).delete_floatingip(context, id)
def disassociate_floatingips(self, context, port_id):
try:
fip_qry = context.session.query(l3_db.FloatingIP)
fip_db = fip_qry.filter_by(fixed_port_id=port_id).one()
self._retrieve_and_delete_nat_rules(fip_db.floating_ip_address,
fip_db.fixed_ip_address,
fip_db.router_id,
min_num_rules_expected=1)
self._remove_floatingip_address(context, fip_db)
except sa_exc.NoResultFound:
LOG.debug(_("The port '%s' is not associated with floating IPs"),
port_id)
except q_exc.NotFound:
LOG.warning(_("Nat rules not found in nvp for port: %s"), id)
super(NvpPluginV2, self).disassociate_floatingips(context, port_id)
def create_network_gateway(self, context, network_gateway):
"""Create a layer-2 network gateway.
Create the gateway service on NVP platform and corresponding data
structures in Quantum datase.
"""
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
# Need to re-do authZ checks here in order to avoid creation on NVP
gw_data = network_gateway[networkgw.RESOURCE_NAME.replace('-', '_')]
tenant_id = self._get_tenant_id_for_create(context, gw_data)
devices = gw_data['devices']
# Populate default physical network where not specified
for device in devices:
if not device.get('interface_name'):
device['interface_name'] = self.cluster.default_interface_name
try:
nvp_res = nvplib.create_l2_gw_service(self.cluster, tenant_id,
gw_data['name'], devices)
nvp_uuid = nvp_res.get('uuid')
except Exception:
raise nvp_exc.NvpPluginException(
err_msg=_("Create_l2_gw_service did not "
"return an uuid for the newly "
"created resource:%s") % nvp_res)
gw_data['id'] = nvp_uuid
return super(NvpPluginV2, self).create_network_gateway(context,
network_gateway)
def delete_network_gateway(self, context, id):
"""Remove a layer-2 network gateway.
Remove the gateway service from NVP platform and corresponding data
structures in Quantum datase.
"""
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
with context.session.begin(subtransactions=True):
try:
super(NvpPluginV2, self).delete_network_gateway(context, id)
nvplib.delete_l2_gw_service(self.cluster, id)
except NvpApiClient.ResourceNotFound:
# Do not cause a 500 to be returned to the user if
# the corresponding NVP resource does not exist
LOG.exception(_("Unable to remove gateway service from "
"NVP plaform - the resource was not found"))
def _ensure_tenant_on_net_gateway(self, context, net_gateway):
if not net_gateway['tenant_id']:
net_gateway['tenant_id'] = context.tenant_id
return net_gateway
def get_network_gateway(self, context, id, fields=None):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
# Ensure the tenant_id attribute is populated on the returned gateway
#return self._ensure_tenant_on_net_gateway(
# context, super(NvpPluginV2, self).get_network_gateway(
# context, id, fields))
return super(NvpPluginV2, self).get_network_gateway(context,
id, fields)
def get_network_gateways(self, context, filters=None, fields=None):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
# Ensure the tenant_id attribute is populated on returned gateways
net_gateways = super(NvpPluginV2,
self).get_network_gateways(context,
filters,
fields)
return net_gateways
def update_network_gateway(self, context, id, network_gateway):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
return super(NvpPluginV2, self).update_network_gateway(
context, id, network_gateway)
def connect_network(self, context, network_gateway_id,
network_mapping_info):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
return super(NvpPluginV2, self).connect_network(
context, network_gateway_id, network_mapping_info)
def disconnect_network(self, context, network_gateway_id,
network_mapping_info):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
return super(NvpPluginV2, self).disconnect_network(
context, network_gateway_id, network_mapping_info)
def get_plugin_version(self):
return PLUGIN_VERSION
def create_security_group(self, context, security_group, default_sg=False):
"""Create security group.
If default_sg is true that means a we are creating a default security
group and we don't need to check if one exists.
"""
s = security_group.get('security_group')
tenant_id = self._get_tenant_id_for_create(context, s)
if not default_sg:
self._ensure_default_security_group(context, tenant_id)
nvp_secgroup = nvplib.create_security_profile(self.cluster,
tenant_id, s)
security_group['security_group']['id'] = nvp_secgroup['uuid']
return super(NvpPluginV2, self).create_security_group(
context, security_group, default_sg)
def delete_security_group(self, context, security_group_id):
"""Delete a security group.
:param security_group_id: security group rule to remove.
"""
with context.session.begin(subtransactions=True):
security_group = super(NvpPluginV2, self).get_security_group(
context, security_group_id)
if not security_group:
raise ext_sg.SecurityGroupNotFound(id=security_group_id)
if security_group['name'] == 'default' and not context.is_admin:
raise ext_sg.SecurityGroupCannotRemoveDefault()
filters = {'security_group_id': [security_group['id']]}
if super(NvpPluginV2, self)._get_port_security_group_bindings(
context, filters):
raise ext_sg.SecurityGroupInUse(id=security_group['id'])
nvplib.delete_security_profile(self.cluster,
security_group['id'])
return super(NvpPluginV2, self).delete_security_group(
context, security_group_id)
def create_security_group_rule(self, context, security_group_rule):
"""Create a single security group rule."""
bulk_rule = {'security_group_rules': [security_group_rule]}
return self.create_security_group_rule_bulk(context, bulk_rule)[0]
def create_security_group_rule_bulk(self, context, security_group_rule):
"""Create security group rules.
:param security_group_rule: list of rules to create
"""
s = security_group_rule.get('security_group_rules')
tenant_id = self._get_tenant_id_for_create(context, s)
# TODO(arosen) is there anyway we could avoid having the update of
# the security group rules in nvp outside of this transaction?
with context.session.begin(subtransactions=True):
self._ensure_default_security_group(context, tenant_id)
security_group_id = self._validate_security_group_rules(
context, security_group_rule)
# Check to make sure security group exists
security_group = super(NvpPluginV2, self).get_security_group(
context, security_group_id)
if not security_group:
raise ext_sg.SecurityGroupNotFound(id=security_group_id)
# Check for duplicate rules
self._check_for_duplicate_rules(context, s)
# gather all the existing security group rules since we need all
# of them to PUT to NVP.
combined_rules = self._merge_security_group_rules_with_current(
context, s, security_group['id'])
nvplib.update_security_group_rules(self.cluster,
security_group['id'],
combined_rules)
return super(
NvpPluginV2, self).create_security_group_rule_bulk_native(
context, security_group_rule)
def delete_security_group_rule(self, context, sgrid):
"""Delete a security group rule
:param sgrid: security group id to remove.
"""
with context.session.begin(subtransactions=True):
# determine security profile id
security_group_rule = (
super(NvpPluginV2, self).get_security_group_rule(
context, sgrid))
if not security_group_rule:
raise ext_sg.SecurityGroupRuleNotFound(id=sgrid)
sgid = security_group_rule['security_group_id']
current_rules = self._get_security_group_rules_nvp_format(
context, sgid, True)
self._remove_security_group_with_id_and_id_field(
current_rules, sgrid)
nvplib.update_security_group_rules(
self.cluster, sgid, current_rules)
return super(NvpPluginV2, self).delete_security_group_rule(context,
sgrid)
def create_qos_queue(self, context, qos_queue, check_policy=True):
q = qos_queue.get('qos_queue')
self._validate_qos_queue(context, q)
q['id'] = nvplib.create_lqueue(self.cluster,
self._nvp_lqueue(q))
return super(NvpPluginV2, self).create_qos_queue(context, qos_queue)
def delete_qos_queue(self, context, id, raise_in_use=True):
filters = {'queue_id': [id]}
queues = self._get_port_queue_bindings(context, filters)
if queues:
if raise_in_use:
raise ext_qos.QueueInUseByPort()
else:
return
nvplib.delete_lqueue(self.cluster, id)
return super(NvpPluginV2, self).delete_qos_queue(context, id)
| linvictor88/vse-lbaas-driver | quantum/plugins/nicira/QuantumPlugin.py | Python | apache-2.0 | 104,734 | 0.000134 |
"""
vue2svg : spike/prototype for scenetool.
generates an svg scene from VUE files specified on command line.
usage:
python3.2 vue2svg.py ../test/vue/*.vue
https://github.com/tangentstorm/scenetool
copyright (c) 2013 michal j wallace.
available to the public under the MIT/x11 license. (see ../LICENSE)
"""
import os, sys, io, itertools as it
from collections import namedtuple
import sqlite3
from lxml import etree
DB_PATH = "vuedata.sdb" # note: will be wiped out on each run!
nsmap = {
'xsi':"http://www.w3.org/2001/XMLSchema-instance"
}
def xp(tree, path):
match = tree.xpath(path, namespaces=nsmap)
return match[0] if match else ''
VueData = namedtuple('VueData',
('parent ntype shape id ts x y w h text layer autosized'
' fill strokewidth strokecolor strokestyle textcolor'
' font id1 id2 p0x p0y p1x p1y ctrlcount arrowstate'
' c0x c0y c1x c1y').split( ))
def walk(tree, parent=0):
"""
walk the tree recursively, extracting node data
"""
children = tree.xpath('child')
for child in children:
row = VueData(*([parent] +
[xp(child, path) for path in [
'@xsi:type',
'shape/@xsi:type',
'@ID',
'@created',
'@x',
'@y',
'@width',
'@height',
'@label',
'@layerID',
'@autoSized',
'fillColor/text()',
'strokeWidth/text()',
'strokeColor/text()',
'strokeStyle/text()',
'textColor/text()',
'font/text()',
'ID1/text()',
'ID2/text()',
'point1/@x',
'point1/@y',
'point2/@x',
'point2/@y',
'@controlCount',
'@arrowState',
'ctrlPoint0/@x',
'ctrlPoint0/@y',
'ctrlPoint1/@x',
'ctrlPoint1/@y' ]]))
yield row
for item in walk(child, row.id): yield item
def load(dbc, filename):
"""
load data from the vue file into the database
"""
# vue files are not valid xml because the doctype is misplaced.
# so to fix, just strip off the opening comments:
data = open(filename, 'r').read()
data = data[data.find('<?xml'):]
vue = etree.parse(io.BytesIO(bytes(data, 'ascii')))
cur = dbc.cursor()
cur.execute('insert into file values (?)', [filename])
fid = cur.lastrowid
for row in walk(vue, 0):
sql = 'insert into vuedata values (? %s)' \
% (', ? ' * len(VueData._fields))
cur.execute(sql, [fid] + list(row))
def connect():
return sqlite3.connect(DB_PATH, isolation_level=None) # autocommit
def main(filenames):
if os.path.exists(DB_PATH): os.unlink(DB_PATH)
dbc = connect()
cur = dbc.cursor()
cur.execute('create table if not exists file (filename string)')
sql = 'create table if not exists vuedata (fid integer, %s)' \
% ', '.join('%s data' % col for col in VueData._fields)
cur.execute(sql)
for filename in filenames:
load(dbc,filename)
dbc.close()
# run the scripts and check for error (non-zero exit code)
if ( os.system("sqlite3 %s < schema.sql" % DB_PATH)
+ os.system("sqlite3 %s < vue2elem.sql" % DB_PATH)
+ os.system("sqlite3 %s < views.sql" % DB_PATH)
) > 0: sys.exit()
dbc = connect()
cur = dbc.cursor()
def fetch_ntups(cur):
cols = [tup[0] for tup in cur.description]
ntup = namedtuple('row', cols)
for row in cur.fetchall():
yield ntup(*row)
def fetch_dicts(cur):
cols = [tup[0] for tup in cur.description]
for row in cur.fetchall():
yield dict(zip(cols, row))
print('<!doctype html>')
print('<html><head><title>vue2svg</title>')
print('<style type="text/css">')
cur.execute(
"""
SELECT s.rowid AS id, fg, bg, sc, sw, f.font
FROM style s LEFT JOIN font f ON s.font = f.rowid
""")
for row in fetch_dicts(cur):
print(' '.join(
"""
svg .style-{id} text {{
fill: {fg};
}}
svg .style-{id} {{
stroke: {sc};
stroke-width: {sw};
fill: {bg};
}}
""".format(**row).split()))
print('</style>')
print('</head>')
cur.execute("select * from scenes")
cols = [tup[0] for tup in cur.description]
ntup = namedtuple('rec', cols)
templates = {
'node':
'<rect x="{x}" y="{y}" class="style-{style}" '
' width="{w}" height="{h}" />',
'edge':
'<line x1="{x0}" y1="{y0}" class="style-{style}"'
' x2="{x1}" y2="{y1}" />',
}
print('<body>')
for filename, rows in it.groupby(cur.fetchall(), lambda r: r[0]):
print(' <h1>%s</h1>' % filename)
print(' <svg>')
for row in rows:
rec = ntup(*row)
print(' ',templates.get(rec.tag, rec.tag or '')
.format(**rec.__dict__))
print(' </svg>')
print('</body>')
print('</html>')
if __name__=="__main__":
if len(sys.argv) > 1: main(sys.argv[1:])
else: print(__doc__)
| tangentstorm/scenetool | spike/vue2svg.py | Python | mit | 5,403 | 0.004442 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
import pytest
import spack.cmd.find
from spack.util.pattern import Bunch
@pytest.fixture(scope='module')
def parser():
"""Returns the parser for the module command"""
prs = argparse.ArgumentParser()
spack.cmd.find.setup_parser(prs)
return prs
@pytest.fixture()
def specs():
s = []
return s
@pytest.fixture()
def mock_display(monkeypatch, specs):
"""Monkeypatches the display function to return its first argument"""
def display(x, *args, **kwargs):
specs.extend(x)
monkeypatch.setattr(spack.cmd.find, 'display_specs', display)
def test_query_arguments():
query_arguments = spack.cmd.find.query_arguments
# Default arguments
args = Bunch(
only_missing=False,
missing=False,
unknown=False,
explicit=False,
implicit=False,
start_date="2018-02-23",
end_date=None
)
q_args = query_arguments(args)
assert 'installed' in q_args
assert 'known' in q_args
assert 'explicit' in q_args
assert q_args['installed'] is True
assert q_args['known'] is any
assert q_args['explicit'] is any
assert 'start_date' in q_args
assert 'end_date' not in q_args
# Check that explicit works correctly
args.explicit = True
q_args = query_arguments(args)
assert q_args['explicit'] is True
args.explicit = False
args.implicit = True
q_args = query_arguments(args)
assert q_args['explicit'] is False
@pytest.mark.db
@pytest.mark.usefixtures('database', 'mock_display')
def test_tag1(parser, specs):
args = parser.parse_args(['--tags', 'tag1'])
spack.cmd.find.find(parser, args)
assert len(specs) == 2
assert 'mpich' in [x.name for x in specs]
assert 'mpich2' in [x.name for x in specs]
@pytest.mark.db
@pytest.mark.usefixtures('database', 'mock_display')
def test_tag2(parser, specs):
args = parser.parse_args(['--tags', 'tag2'])
spack.cmd.find.find(parser, args)
assert len(specs) == 1
assert 'mpich' in [x.name for x in specs]
@pytest.mark.db
@pytest.mark.usefixtures('database', 'mock_display')
def test_tag2_tag3(parser, specs):
args = parser.parse_args(['--tags', 'tag2', '--tags', 'tag3'])
spack.cmd.find.find(parser, args)
assert len(specs) == 0
| EmreAtes/spack | lib/spack/spack/test/cmd/find.py | Python | lgpl-2.1 | 3,528 | 0 |
__author__="vvladych"
__date__ ="$09.10.2014 23:01:15$"
from forecastmgmt.dao.db_connection import get_db_connection
import psycopg2.extras
from MDO import MDO
from person_name import PersonName
class Person(MDO):
sql_dict={"get_all":"SELECT sid, common_name, birth_date, birth_place, person_uuid FROM fc_person",
"insert":"INSERT INTO fc_person(common_name, birth_date, birth_place) VALUES(%s,%s,%s) RETURNING sid",
"delete":"DELETE FROM fc_person WHERE sid=%s",
"load":"SELECT sid, common_name, birth_date, birth_place, person_uuid FROM fc_person WHERE sid=%s",
"update_person":"UPDATE fc_person SET common_name=%s, birth_date=%s, birth_place=%s WHERE sid=%s"
}
def __init__(self, sid=None, common_name=None, birth_date=None, birth_place=None, person_uuid=None):
super(Person, self).__init__(Person.sql_dict,sid,person_uuid)
self.common_name=common_name
self.birth_date=birth_date
self.birth_place=birth_place
if sid!=None:
self.names=PersonName().get_all_for_foreign_key(self.sid)
else:
self.names=[]
def load_object_from_db(self,rec):
self.common_name=rec.common_name
self.birth_date=rec.birth_date
self.birth_place=rec.birth_place
self.uuid=rec.person_uuid
self.names=PersonName().get_all_for_foreign_key(self.sid)
def get_insert_data(self):
return (self.common_name,self.birth_date,self.birth_place)
def insert(self):
super(Person, self).insert()
for name in self.names:
name.person_sid=self.sid
name.insert()
get_db_connection().commit()
def add_name(self, person_name_sid, person_name_role, person_sid, namepart_list):
self.names.append(PersonName(person_name_sid, person_name_role, person_sid, namepart_list))
def fabric_method(self,rec):
return Person(rec.sid, rec.common_name, rec.birth_date, rec.birth_place, rec.person_uuid)
def update(self, other):
cur=get_db_connection().cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)
data=(other.common_name, other.birth_date, other.birth_place, self.sid)
cur.execute(Person.sql_dict["update_person"],data)
cur.close()
# update person_names
# delete outdated person_names
for person_name in self.names:
if person_name not in other.names:
person_name.delete()
for person_name in other.names:
if person_name not in self.names:
person_name.insert()
get_db_connection().commit()
| vvladych/forecastmgmt | src/forecastmgmt/model/person.py | Python | unlicense | 2,819 | 0.025186 |
import unittest
import opm.io
import numpy as np
from opm.io.parser import Parser
from opm.io.deck import DeckKeyword
from opm.io.ecl_state import EclipseState
try:
from tests.utils import test_path
except ImportError:
from utils import test_path
class TestFieldProps(unittest.TestCase):
def assertClose(self, expected, observed, epsilon=1e-08):
diff = abs(expected - observed)
err_msg = '|%g - %g| = %g > %g' % (expected, observed, diff, epsilon)
self.assertTrue(diff <= epsilon, msg=err_msg)
def setUp(self):
parser = Parser()
deck = parser.parse(test_path('spe3/SPE3CASE1.DATA'))
int_array = np.ones(324)
actnum_kw = DeckKeyword( parser["ACTNUM"], int_array)
deck.add(actnum_kw)
self.spe3 = EclipseState(deck)
self.props = self.spe3.field_props()
def test_contains(self):
p = self.props
self.assertTrue('PORO' in p)
self.assertFalse('NONO' in p)
self.assertTrue('PORV' in p)
self.assertTrue('ACTNUM' in p)
def test_getitem(self):
p = self.props
poro = p.get_double_array('PORO')
self.assertEqual(324, len(poro))
self.assertEqual(0.13, poro[0])
self.assertTrue( 'PERMX' in p )
px = p.get_double_array('PERMX')
print(len(px))
self.assertEqual(324, len(px))
self.assertEqual(324, len(p.get_int_array('ACTNUM')))
def test_permx_values(self):
def md2si(md):
#millidarcy->SI
return md * 1e-3 * 9.869233e-13
field_props = self.props
grid = self.spe3.grid()
permx = field_props.get_double_array('PERMX')
print('set(PERMX) = %s' % set(permx))
# 130mD, 40mD, 20mD, and 150mD, respectively, top to bottom
darcys = {0:md2si(130), 1:md2si(40), 2:md2si(20), 3:md2si(150)}
for i in range(grid.nx):
for j in range(grid.ny):
for k in range(grid.nz):
g_idx = grid.globalIndex(i,j,k)
perm = permx[g_idx]
darcy = darcys[k]
self.assertClose(darcy, perm)
def test_volume(self):
grid = self.spe3.grid()
for i in range(grid.nx):
for j in range(grid.ny):
for k in range(grid.nz):
g_idx = grid.globalIndex(i,j,k)
exp = 293.3 * 293.3 * 30 # cubicfeet = 73 078.6084 cubic meter
exp *= (12*0.0254)**3 # cubic feet to cubic meter
if k == 0:
self.assertClose(exp, grid.getCellVolume(g_idx))
self.assertEqual(grid.getCellVolume(g_idx), grid.getCellVolume(i, j, k))
| OPM/opm-common | python/tests/test_field_props.py | Python | gpl-3.0 | 2,742 | 0.006929 |
from ..workdays import *
from datetime import datetime, timedelta
from time import strptime
import math
import traceback
tests=[]
def test( fn ):
tests.append(fn)
return fn
def runTests():
for t in tests:
print t
try: t()
except Exception as e:
print e
traceback.print_exc()
print
def _parse_date( datestr ):
return datetime(*strptime(datestr, "%Y-%m-%d")[0:5]).date()
def _parse_datetime( datestr ):
if type(datestr) == type(""):
return datetime(*strptime(datestr, "%Y-%m-%d %H:%M")[0:5])
elif type(datestr) == type((1,2)):
return datetime(*datestr)
elif type(datestr) == type(dt.datetime(1900,1,1)):
return datestr
return None
def _is_same_dt( d1, d2, numParts=5 ):
return d1.timetuple()[:numParts] == d2.timetuple()[:numParts]
@test
def shouldEstimateEnd():
def test( d1, d2, total, remaining, dexp ):
d1 = _parse_datetime(d1)
d2 = _parse_datetime(d2)
dexp = _parse_date(dexp)
dres = estimate_end( d1, d2, total, remaining )
print "expected: %s, actual %s, %s" % (dexp, dres, _is_same_dt( dres, dexp, 3 ) )
# Monday 2017-03-06
test( "2017-03-06 00:00", "2017-03-07 00:00", 2, 1, "2017-03-08" )
test( "2017-03-06 00:00", "2017-03-08 00:00", 2, 1, "2017-03-10" )
test( "2017-03-06 00:00", "2017-03-09 00:00", 2, 1, "2017-03-12" )
test( "2017-03-06 00:00", "2017-03-10 00:00", 2, 1, "2017-03-14" )
test( "2017-03-06 00:00", "2017-03-13 00:00", 2, 1, "2017-03-20" )
@test
def shouldAdjustStart():
def test( d1, dexp ):
dexp = _parse_datetime(dexp)
dres = adjusted_start( _parse_datetime( d1 ) )
print "expected: %s, actual %s, %s" % (dexp, dres, _is_same_dt( dres, dexp, 5 ) )
# Monday 2017-03-06
test( "2017-03-06 08:00", "2017-03-06 08:00" )
test( "2017-03-07 08:00", "2017-03-07 08:00" )
test( "2017-03-08 08:00", "2017-03-08 08:00" )
test( "2017-03-09 08:00", "2017-03-09 08:00" )
test( "2017-03-10 08:00", "2017-03-10 08:00" )
test( "2017-03-11 08:00", "2017-03-13 00:00" )
test( "2017-03-12 08:00", "2017-03-13 00:00" )
@test
def shouldAdjustEnd():
def test( d1, dexp ):
dexp = _parse_datetime(dexp)
dres = adjusted_end( _parse_datetime( d1 ) )
print "expected: %s, actual %s, %s" % (dexp, dres, _is_same_dt( dres, dexp, 5 ) )
# Monday 2017-03-06
test( "2017-03-06 08:00", "2017-03-06 08:00" )
test( "2017-03-07 08:00", "2017-03-07 08:00" )
test( "2017-03-08 08:00", "2017-03-08 08:00" )
test( "2017-03-09 08:00", "2017-03-09 08:00" )
test( "2017-03-10 08:00", "2017-03-10 08:00" )
test( "2017-03-11 08:00", "2017-03-10 23:59" )
test( "2017-03-12 08:00", "2017-03-10 23:59" )
@test
def shouldEstimateEndWorkdays():
def test( d1, d2, total, remaining, dexp ):
d1 = _parse_datetime(d1)
d2 = _parse_datetime(d2)
dexp = _parse_datetime(dexp)
dres = estimate_end_workdays( d1, d2, total, remaining )
print "expected: %s, actual %s, %s" % (dexp, dres, _is_same_dt( dres, dexp, 3 ) )
# Monday 2017-03-06
# same week
test( "2017-03-06 08:00", "2017-03-07 08:00", 2, 1, "2017-03-08 08:00" )
test( "2017-03-06 08:00", "2017-03-08 08:00", 2, 1, "2017-03-10 08:00" )
# projection spans weekends
test( "2017-03-06 08:00", "2017-03-09 08:00", 2, 1, "2017-03-14 08:00" )
test( "2017-03-06 08:00", "2017-03-10 08:00", 2, 1, "2017-03-16 08:00" )
# a weekend is in the completed time, estimate falls on weekend
# 06 07 08 09 10 w11 w12 13 14 15 16 17 w18 w19 20
test( "2017-03-06 08:00", "2017-03-13 08:00", 2, 1, "2017-03-20 08:00" )
# Start on weekend
test( "2017-03-05 08:00", "2017-03-10 08:00", 2, 1, "2017-03-16 08:00" )
test( "2017-03-04 08:00", "2017-03-10 08:00", 2, 1, "2017-03-16 08:00" )
# Start and now on weekend
test( "2017-03-05 08:00", "2017-03-11 08:00", 2, 1, "2017-03-17 23:59" )
test( "2017-03-04 08:00", "2017-03-12 08:00", 2, 1, "2017-03-17 23:59" )
@test
def shouldEstimateEndWorkdays2():
def test( d1, d2, total, remaining, dexp ):
d1 = _parse_datetime(d1)
d2 = _parse_datetime(d2)
dexp = _parse_datetime(dexp)
dres = estimate_end_workdays( d1, d2, total, remaining )
print "expected: %s, actual %s, %s" % (dexp, dres, _is_same_dt( dres, dexp, 3 ) )
if not _is_same_dt( dres, dexp ):
print " diff:", dres - dexp
# Monday 2017-03-06
d1 = dt.datetime(2017, 03, 06, 8 )
d2 = dt.datetime(2017, 03, 13, 8 )
for done in xrange(1, 22, 5):
dexp = d2 + dt.timedelta( weeks=done )
print done, dt.timedelta( weeks=done ),
test( d1, d2, done+1, done, dexp )
runTests()
| mmahnic/trac-tickethistory | tickethistory/test/workdays_t.py | Python | mit | 4,805 | 0.028512 |
# -*- coding: utf-8 -*-
# Copyright 2014 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_frules
~~~~~~~~~~~
Tests for fiptables. Much of this module is tested in test_felix, but this covers
some parts that are not.
"""
from copy import copy
import logging
import mock
import unittest
import calico.felix.frules as frules
from calico.felix.futils import IPV4, IPV6, FailedSystemCall
import calico.felix.ipsets
import calico.felix.test.stub_ipsets as stub_ipsets
# Expected state
expected_ipsets = stub_ipsets.IpsetState()
# Logger
log = logging.getLogger(__name__)
class TestUpdateIpsets(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Completely replace the ipsets modules.
cls.real_ipsets = calico.felix.ipsets
frules.ipsets = stub_ipsets
@classmethod
def tearDownClass(cls):
# Reinstate the modules we overwrote
frules.ipsets = cls.real_ipsets
def setUp(self):
stub_ipsets.reset()
# Set the expected IP tables state to be clean.
expected_ipsets.reset()
def create_ipsets(self, family):
stub_ipsets.create("ipset_port", "hash:net,port", family)
stub_ipsets.create("ipset_addr", "hash:net", family)
stub_ipsets.create("ipset_icmp", "hash:net", family)
expected_ipsets.create("ipset_port", "hash:net,port", family)
expected_ipsets.create("ipset_addr", "hash:net", family)
expected_ipsets.create("ipset_icmp", "hash:net", family)
stub_ipsets.create("tmp_ipset_port", "hash:net,port", family)
stub_ipsets.create("tmp_ipset_addr", "hash:net", family)
stub_ipsets.create("tmp_ipset_icmp", "hash:net", family)
expected_ipsets.create("tmp_ipset_port", "hash:net,port", family)
expected_ipsets.create("tmp_ipset_addr", "hash:net", family)
expected_ipsets.create("tmp_ipset_icmp", "hash:net", family)
if family == "inet":
addr = "9.8.7.6/24"
else:
addr = "9:8:7::6/64"
# Shove some junk into ipsets that will be tidied away.
stub_ipsets.add("ipset_addr", addr)
stub_ipsets.add("ipset_port", addr + ",tcp:123")
stub_ipsets.add("ipset_icmp", addr)
def tearDown(self):
pass
def test_empty_ipsets(self):
"""
Empty ipsets.
"""
description = "Description : blah"
suffix = "whatever"
rule_list = []
self.create_ipsets("inet")
frules.update_ipsets(IPV4,
description,
suffix,
rule_list,
"ipset_addr",
"ipset_port",
"ipset_icmp",
"tmp_ipset_addr",
"tmp_ipset_port",
"tmp_ipset_icmp")
stub_ipsets.check_state(expected_ipsets)
def test_ipv4_ipsets(self):
"""
IPv4 ipsets
"""
description = "description"
suffix = "suffix"
rule_list = []
default_cidr = "1.2.3.4/24"
self.create_ipsets("inet")
# Ignored rules
rule_list.append({ 'blah': "junk" }) # no CIDR
rule_list.append({ 'cidr': "junk" }) # junk CIDR
rule_list.append({ 'cidr': "::/64" }) # IPv6, not v4
rule_list.append({ 'cidr': default_cidr,
'port': 123 }) # port, no protocol
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': "blah" }) # bad port
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': ["blah", "bloop"] }) # bad port range
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [0, 123] }) # bad port in range
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [1, 2, 3] }) # not two in range
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [1] }) # not two in range
rule_list.append({ 'cidr': default_cidr,
'protocol': "icmp",
'port': "1" }) # port not allowed
rule_list.append({ 'cidr': default_cidr,
'protocol': "ipv6-icmp",
'port': "1" }) # port not allowed
rule_list.append({ 'cidr': default_cidr,
'protocol': "icmp",
'icmp_code': "1" }) # code without type
rule_list.append({ 'cidr': default_cidr,
'protocol': "blah",
'port': "1" }) # port not allowed for protocol
# Better rules
rule_list.append({ 'cidr': "1.2.3.4/24" })
expected_ipsets.add("ipset_addr", "1.2.3.4/24")
rule_list.append({ 'cidr': "10.0.10.0/0",
'protocol': "tcp"})
expected_ipsets.add("ipset_port", "0.0.0.0/1,tcp:1-65535")
expected_ipsets.add("ipset_port", "128.0.0.0/1,tcp:1-65535")
rule_list.append({ 'cidr': "1.0.0.1/8",
'protocol': "udp",
'port': [2,10]})
expected_ipsets.add("ipset_port", "1.0.0.1/8,udp:2-10")
rule_list.append({ 'cidr': "1.0.0.2/8",
'protocol': "sctp",
'port': "2"})
expected_ipsets.add("ipset_port", "1.0.0.2/8,sctp:2")
rule_list.append({ 'cidr': "1.0.0.3/8",
'protocol': "udplite",
'port': [2,10]})
expected_ipsets.add("ipset_port", "1.0.0.3/8,udplite:2-10")
rule_list.append({ 'cidr': "1.0.0.4/8",
'protocol': "icmp" })
expected_ipsets.add("ipset_icmp", "1.0.0.4/8")
rule_list.append({ 'cidr': "1.0.0.5/8",
'protocol': "icmp",
'icmp_type': 123})
expected_ipsets.add("ipset_port", "1.0.0.5/8,icmp:123/0")
rule_list.append({ 'cidr': "1.0.0.6/8",
'protocol': "icmp",
'icmp_type': "type"})
expected_ipsets.add("ipset_port", "1.0.0.6/8,icmp:type")
rule_list.append({ 'cidr': "1.0.0.7/8",
'protocol': "icmp",
'icmp_type': 123,
'icmp_code': "code"})
expected_ipsets.add("ipset_port", "1.0.0.7/8,icmp:123/code")
rule_list.append({ 'cidr': "1.0.0.8/8",
'protocol': "icmp",
'icmp_type': "type",
'icmp_code': "code"}) # code ignored
expected_ipsets.add("ipset_port", "1.0.0.8/8,icmp:type")
rule_list.append({ 'cidr': "1.0.0.9/8",
'protocol': "blah" })
expected_ipsets.add("ipset_port", "1.0.0.9/8,blah:0")
frules.update_ipsets(IPV4,
description,
suffix,
rule_list,
"ipset_addr",
"ipset_port",
"ipset_icmp",
"tmp_ipset_addr",
"tmp_ipset_port",
"tmp_ipset_icmp")
stub_ipsets.check_state(expected_ipsets)
def test_ipv6_ipsets(self):
"""
IPv6 ipsets
"""
description = "description"
suffix = "suffix"
rule_list = []
default_cidr = "2001::1:2:3:4/24"
self.create_ipsets("inet6")
# Ignored rules
rule_list.append({ 'blah': "junk" }) # no CIDR
rule_list.append({ 'cidr': "junk" }) # junk CIDR
rule_list.append({ 'cidr': "1.2.3.4/32" }) # IPv4, not v6
rule_list.append({ 'cidr': default_cidr,
'port': 123 }) # port, no protocol
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': "blah" }) # bad port
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': ["blah", "bloop"] }) # bad port range
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [0, 123] }) # bad port in range
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [1, 2, 3] }) # not two in range
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [1] }) # not two in range
rule_list.append({ 'cidr': default_cidr,
'protocol': "icmp",
'port': "1" }) # port not allowed
rule_list.append({ 'cidr': default_cidr,
'protocol': "ipv6-icmp",
'port': "1" }) # port not allowed
rule_list.append({ 'cidr': default_cidr,
'protocol': "icmp",
'icmp_code': "1" }) # code without type
rule_list.append({ 'cidr': default_cidr,
'protocol': "blah",
'port': "1" }) # port not allowed for protocol
# Better rules
rule_list.append({ 'cidr': "1:2:3::4/24" })
expected_ipsets.add("ipset_addr", "1:2:3::4/24")
rule_list.append({ 'cidr': "1:2:3::/0",
'protocol': "tcp"})
expected_ipsets.add("ipset_port", "::/1,tcp:1-65535")
expected_ipsets.add("ipset_port", "8000::/1,tcp:1-65535")
rule_list.append({ 'cidr': "1::1/8",
'protocol': "udp",
'port': [2,10]})
expected_ipsets.add("ipset_port", "1::1/8,udp:2-10")
rule_list.append({ 'cidr': "1::2/8",
'protocol': "sctp",
'port': "2"})
expected_ipsets.add("ipset_port", "1::2/8,sctp:2")
rule_list.append({ 'cidr': "1::3/8",
'protocol': "udplite",
'port': [2,10]})
expected_ipsets.add("ipset_port", "1::3/8,udplite:2-10")
rule_list.append({ 'cidr': "1::4/8",
'protocol': "ipv6-icmp" })
expected_ipsets.add("ipset_icmp", "1::4/8")
rule_list.append({ 'cidr': "1::5/8",
'protocol': "ipv6-icmp",
'icmp_type': 123})
expected_ipsets.add("ipset_port", "1::5/8,ipv6-icmp:123/0")
rule_list.append({ 'cidr': "1::6/8",
'protocol': "ipv6-icmp",
'icmp_type': "type"})
expected_ipsets.add("ipset_port", "1::6/8,ipv6-icmp:type")
rule_list.append({ 'cidr': "1::7/8",
'protocol': "ipv6-icmp",
'icmp_type': 123,
'icmp_code': "code"})
expected_ipsets.add("ipset_port", "1::7/8,ipv6-icmp:123/code")
rule_list.append({ 'cidr': "1::8/8",
'protocol': "ipv6-icmp",
'icmp_type': "type",
'icmp_code': "code"}) # code ignored
expected_ipsets.add("ipset_port", "1::8/8,ipv6-icmp:type")
rule_list.append({ 'cidr': "1::9/8",
'protocol': "blah" })
expected_ipsets.add("ipset_port", "1::9/8,blah:0")
frules.update_ipsets(IPV6,
description,
suffix,
rule_list,
"ipset_addr",
"ipset_port",
"ipset_icmp",
"tmp_ipset_addr",
"tmp_ipset_port",
"tmp_ipset_icmp")
stub_ipsets.check_state(expected_ipsets)
def test_exception(self):
"""
Test exception when adding ipset value.
"""
description = "description"
suffix = "suffix"
rule_list = [{'cidr': "1.2.3.4/24"}]
self.create_ipsets("inet")
with mock.patch('calico.felix.test.stub_ipsets.add',
side_effect=FailedSystemCall("oops", [], 1, "", "")):
frules.update_ipsets(IPV4,
description,
suffix,
rule_list,
"ipset_addr",
"ipset_port",
"ipset_icmp",
"tmp_ipset_addr",
"tmp_ipset_port",
"tmp_ipset_icmp")
stub_ipsets.check_state(expected_ipsets)
| fasaxc/felix | calico/felix/test/test_frules.py | Python | apache-2.0 | 13,792 | 0.008338 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the update tool."""
from __future__ import unicode_literals
import os
import sys
import unittest
from tools import update
from tests import test_lib
@unittest.skipIf(
os.environ.get('TRAVIS_OS_NAME') == 'osx',
'TLS 1.2 not supported by macOS on Travis')
class GithubRepoDownloadHelperTest(test_lib.BaseTestCase):
"""Tests for the GitHub repo download helper class."""
_DOWNLOAD_URL = 'https://github.com/ForensicArtifacts/artifacts/releases'
_PROJECT_NAME = 'artifacts'
_PROJECT_VERSION = '20180628'
def testGetPackageDownloadURLs(self):
"""Tests the GetPackageDownloadURLs function."""
download_helper = update.GithubRepoDownloadHelper(self._DOWNLOAD_URL)
package_download_urls = download_helper.GetPackageDownloadURLs(
preferred_machine_type='x86', preferred_operating_system='Windows')
if (sys.version_info[0] not in (2, 3) or
(sys.version_info[0] == 2 and sys.version_info[1] != 7) or
(sys.version_info[0] == 3 and sys.version_info[1] != 6)):
# Python versions other than 2.7 and 3.6 are not supported.
self.assertIsNone(package_download_urls)
else:
self.assertIsNotNone(package_download_urls)
expected_url = (
'https://github.com/log2timeline/l2tbinaries/raw/master/win32/'
'{0:s}-{1:s}.1.win32.msi').format(
self._PROJECT_NAME, self._PROJECT_VERSION)
self.assertIn(expected_url, package_download_urls)
@unittest.skipIf(
os.environ.get('TRAVIS_OS_NAME') == 'osx',
'TLS 1.2 not supported by macOS on Travis')
class DependencyUpdaterTest(test_lib.BaseTestCase):
"""Tests for the dependency updater class."""
# pylint: disable=protected-access
_PROJECT_NAME = 'dfvfs'
_PROJECT_VERSION = '20180510'
def testGetPackageFilenamesAndVersions(self):
"""Tests the GetPackageFilenamesAndVersions function."""
dependency_updater = update.DependencyUpdater(
preferred_machine_type='x86', preferred_operating_system='Windows')
package_filenames, package_versions = (
dependency_updater._GetPackageFilenamesAndVersions([]))
if (sys.version_info[0] not in (2, 3) or
(sys.version_info[0] == 2 and sys.version_info[1] != 7) or
(sys.version_info[0] == 3 and sys.version_info[1] != 6)):
# Python versions other than 2.7 and 3.6 are not supported.
self.assertIsNone(package_filenames)
self.assertIsNone(package_versions)
else:
self.assertIsNotNone(package_filenames)
self.assertIsNotNone(package_versions)
self.assertEqual(
package_filenames.get(self._PROJECT_NAME, None),
'{0:s}-{1:s}.1.win32.msi'.format(
self._PROJECT_NAME, self._PROJECT_VERSION))
self.assertEqual(
package_versions.get(self._PROJECT_NAME, None),
[self._PROJECT_VERSION, '1'])
if __name__ == '__main__':
unittest.main()
| rgayon/l2tdevtools | tests/update.py | Python | apache-2.0 | 2,954 | 0.008463 |
config = {
"name": "Tombstone counter", # plugin name
"type": "receiver", #plugin type
"description": ["counts tombstones in a world"] #description
}
import database as db # import terraria database
class Receiver(): # required class to be called by plugin manager
def __init__(self): #do any initialization stuff
self.tile_id = db.tiles.index("Tombstone") #we grab the ID of tombstone from database
def rec_header(self, header): #this is called by plugin manager when the header is read
print("Counting Tombstones for %s" % header["name"]) #so we print the name from header
def rec_tiles(self, tiles): #called when tiles are ready
x = 0 #our counter variable
for column in tiles: # tiles come as 2D list
for tile in column: #so we need to get tiles like this
if tile[0] == self.tile_id: #tile[0] is the tile_id
x += 1 #increment counter for each found tombstone tile
print("Found %d Tombstones" % (x // 4)) #divide counter by 4, because each tombstone consists of 4 "sub tiles"
return False #signal plugin manager we are done and dont need any further data
| flying-sheep/omnitool | plugins/tombstone.py | Python | mit | 1,209 | 0.01737 |
import cvlib
angle = 0
angles = []
center = []
for i in range(24): #24
img = cvlib.load("findloop_%d.jpg" % angle)
angles.append(angle)
rng = cvlib.inRangeThresh(img, (20,30,20), (200,130,120))
rng = cvlib.bitNot(rng)
cnt = cvlib.findContours(rng, thresh=250)
if cvlib.area(cnt[0]) > cvlib.area(cnt[1]):
crystal = cnt[0]
else:
crystal = cnt[1]
centroid = cvlib.centroid(crystal)
center.append(centroid[1])
cvlib.drawContour(img, crystal, thickness=10)
cvlib.plotCentroid(img, crystal, radius=7)
cvlib.display(img)
cvlib.save(img, "found%d.jpg" % angle)
angle += 15
cvlib.saveGraph(angles, center, "Y Coord Per Angle", "Angles in Degrees", "Original Data Coord", [0,360,0,400], filename="graph.png")
d = cvlib.approxSinCurve(center)
print d["amplitude"], d["phase shift"], d["vertical shift"]
cvlib.saveGraph(angles, d["data"], "Y Coord Per Angle", "Angles in Degrees", "Y Coord Centroid Best Fit", [0,360,0,400], style="--", filename="fit.png")
cvlib.makeGraph(angles, d["data"], "Y Coord Per Angle", "Angles in Degrees", "Y Coord Centroid", [0,360,0,400], style="r--")
# X = - (MC/PEL) * A * sin(phase)
# Y = - (MC/PEL) * A * cos(phase)
| nextBillyonair/compVision | AMX/Crystal/loop.py | Python | mit | 1,215 | 0.015638 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Positions'
db.delete_table(u'positions_positions')
# Adding model 'Position'
db.create_table(u'positions_position', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('date', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2013, 8, 21, 0, 0))),
('content', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'positions', ['Position'])
def backwards(self, orm):
# Adding model 'Positions'
db.create_table(u'positions_positions', (
('content', self.gf('django.db.models.fields.TextField')()),
('date', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2013, 8, 21, 0, 0))),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'positions', ['Positions'])
# Deleting model 'Position'
db.delete_table(u'positions_position')
models = {
u'positions.position': {
'Meta': {'object_name': 'Position'},
'content': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 8, 21, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['positions'] | Hackfmi/Diaphanum | positions/migrations/0002_auto__del_positions__add_position.py | Python | mit | 1,927 | 0.006227 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.