text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#############################################################################
##
## Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
## Contact: Qt Software Information (qt-info@nokia.com)
##
## This file is part of the Graphics Dojo project on Qt Labs.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 or 3.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following information to ensure GNU
## General Public Licensing requirements will be met:
## http:#www.fsf.org/licensing/licenses/info/GPLv2.html and
## http:#www.gnu.org/copyleft/gpl.html.
##
## If you are unsure which license is appropriate for your use, please
## contact the sales department at qt-sales@nokia.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
if QT_VERSION < 0x0040500:
sys.stderr.write("You need Qt 4.5 or newer to run this example.\n")
sys.exit(1)
SNAP_THRESHOLD = 10
class SnapView(QWebView):
def __init__(self):
QWebView.__init__(self)
self.snapEnabled = True
self.setWindowTitle(self.tr("Snap-scrolling is ON"))
# rects hit by the line, in main frame's view coordinate
def hitBoundingRects(self, line):
hitRects = []
points = 8
delta = QPoint(line.dx() / points, line.dy() / points)
point = line.p1()
i = 0
while i < points - 1:
point += delta
hit = self.page().mainFrame().hitTestContent(point)
if not hit.boundingRect().isEmpty():
hitRects.append(hit.boundingRect())
i += 1
return hitRects
def keyPressEvent(self, event):
# toggle snapping
if event.key() == Qt.Key_F3:
self.snapEnabled = not self.snapEnabled
if self.snapEnabled:
self.setWindowTitle(self.tr("Snap-scrolling is ON"))
else:
self.setWindowTitle(self.tr("Snap-scrolling is OFF"))
event.accept()
return
# no snapping? do not bother...
if not self.snapEnabled:
QWebView.keyReleaseEvent(self, event)
return
previousOffset = self.page().mainFrame().scrollPosition()
QWebView.keyReleaseEvent(self, event)
if not event.isAccepted():
return
if event.key() == Qt.Key_Down:
ofs = self.page().mainFrame().scrollPosition()
jump = ofs.y() - previousOffset.y()
if jump == 0:
return
jump += SNAP_THRESHOLD
rects = self.hitBoundingRects(QLine(1, 1, self.width() - 1, 1))
i = 0
while i < len(rects):
j = rects[i].top() - previousOffset.y()
if j > SNAP_THRESHOLD and j < jump:
jump = j
i += 1
self.page().mainFrame().setScrollPosition(previousOffset + QPoint(0, jump))
if __name__ == "__main__":
app = QApplication(sys.argv)
view = SnapView()
view.load(QUrl("http://news.bbc.co.uk/text_only.stm"))
view.resize(320, 500)
view.show()
QMessageBox.information(view, "Hint", "Use F3 to toggle snapping on and off")
sys.exit(app.exec_())
| anak10thn/graphics-dojo-qt5 | snapscroll/snapscroll.py | Python | gpl-2.0 | 3,660 | 0.007104 |
'''"Executable documentation" for the pickle module.
Extensive comments about the pickle protocols and pickle-machine opcodes
can be found here. Some functions meant for external use:
genops(pickle)
Generate all the opcodes in a pickle, as (opcode, arg, position) triples.
dis(pickle, out=None, memo=None, indentlevel=4)
Print a symbolic disassembly of a pickle.
'''
import codecs
import pickle
import re
__all__ = ['dis', 'genops', 'optimize']
bytes_types = pickle.bytes_types
# Other ideas:
#
# - A pickle verifier: read a pickle and check it exhaustively for
# well-formedness. dis() does a lot of this already.
#
# - A protocol identifier: examine a pickle and return its protocol number
# (== the highest .proto attr value among all the opcodes in the pickle).
# dis() already prints this info at the end.
#
# - A pickle optimizer: for example, tuple-building code is sometimes more
# elaborate than necessary, catering for the possibility that the tuple
# is recursive. Or lots of times a PUT is generated that's never accessed
# by a later GET.
"""
"A pickle" is a program for a virtual pickle machine (PM, but more accurately
called an unpickling machine). It's a sequence of opcodes, interpreted by the
PM, building an arbitrarily complex Python object.
For the most part, the PM is very simple: there are no looping, testing, or
conditional instructions, no arithmetic and no function calls. Opcodes are
executed once each, from first to last, until a STOP opcode is reached.
The PM has two data areas, "the stack" and "the memo".
Many opcodes push Python objects onto the stack; e.g., INT pushes a Python
integer object on the stack, whose value is gotten from a decimal string
literal immediately following the INT opcode in the pickle bytestream. Other
opcodes take Python objects off the stack. The result of unpickling is
whatever object is left on the stack when the final STOP opcode is executed.
The memo is simply an array of objects, or it can be implemented as a dict
mapping little integers to objects. The memo serves as the PM's "long term
memory", and the little integers indexing the memo are akin to variable
names. Some opcodes pop a stack object into the memo at a given index,
and others push a memo object at a given index onto the stack again.
At heart, that's all the PM has. Subtleties arise for these reasons:
+ Object identity. Objects can be arbitrarily complex, and subobjects
may be shared (for example, the list [a, a] refers to the same object a
twice). It can be vital that unpickling recreate an isomorphic object
graph, faithfully reproducing sharing.
+ Recursive objects. For example, after "L = []; L.append(L)", L is a
list, and L[0] is the same list. This is related to the object identity
point, and some sequences of pickle opcodes are subtle in order to
get the right result in all cases.
+ Things pickle doesn't know everything about. Examples of things pickle
does know everything about are Python's builtin scalar and container
types, like ints and tuples. They generally have opcodes dedicated to
them. For things like module references and instances of user-defined
classes, pickle's knowledge is limited. Historically, many enhancements
have been made to the pickle protocol in order to do a better (faster,
and/or more compact) job on those.
+ Backward compatibility and micro-optimization. As explained below,
pickle opcodes never go away, not even when better ways to do a thing
get invented. The repertoire of the PM just keeps growing over time.
For example, protocol 0 had two opcodes for building Python integers (INT
and LONG), protocol 1 added three more for more-efficient pickling of short
integers, and protocol 2 added two more for more-efficient pickling of
long integers (before protocol 2, the only ways to pickle a Python long
took time quadratic in the number of digits, for both pickling and
unpickling). "Opcode bloat" isn't so much a subtlety as a source of
wearying complication.
Pickle protocols:
For compatibility, the meaning of a pickle opcode never changes. Instead new
pickle opcodes get added, and each version's unpickler can handle all the
pickle opcodes in all protocol versions to date. So old pickles continue to
be readable forever. The pickler can generally be told to restrict itself to
the subset of opcodes available under previous protocol versions too, so that
users can create pickles under the current version readable by older
versions. However, a pickle does not contain its version number embedded
within it. If an older unpickler tries to read a pickle using a later
protocol, the result is most likely an exception due to seeing an unknown (in
the older unpickler) opcode.
The original pickle used what's now called "protocol 0", and what was called
"text mode" before Python 2.3. The entire pickle bytestream is made up of
printable 7-bit ASCII characters, plus the newline character, in protocol 0.
That's why it was called text mode. Protocol 0 is small and elegant, but
sometimes painfully inefficient.
The second major set of additions is now called "protocol 1", and was called
"binary mode" before Python 2.3. This added many opcodes with arguments
consisting of arbitrary bytes, including NUL bytes and unprintable "high bit"
bytes. Binary mode pickles can be substantially smaller than equivalent
text mode pickles, and sometimes faster too; e.g., BININT represents a 4-byte
int as 4 bytes following the opcode, which is cheaper to unpickle than the
(perhaps) 11-character decimal string attached to INT. Protocol 1 also added
a number of opcodes that operate on many stack elements at once (like APPENDS
and SETITEMS), and "shortcut" opcodes (like EMPTY_DICT and EMPTY_TUPLE).
The third major set of additions came in Python 2.3, and is called "protocol
2". This added:
- A better way to pickle instances of new-style classes (NEWOBJ).
- A way for a pickle to identify its protocol (PROTO).
- Time- and space- efficient pickling of long ints (LONG{1,4}).
- Shortcuts for small tuples (TUPLE{1,2,3}}.
- Dedicated opcodes for bools (NEWTRUE, NEWFALSE).
- The "extension registry", a vector of popular objects that can be pushed
efficiently by index (EXT{1,2,4}). This is akin to the memo and GET, but
the registry contents are predefined (there's nothing akin to the memo's
PUT).
Another independent change with Python 2.3 is the abandonment of any
pretense that it might be safe to load pickles received from untrusted
parties -- no sufficient security analysis has been done to guarantee
this and there isn't a use case that warrants the expense of such an
analysis.
To this end, all tests for __safe_for_unpickling__ or for
copyreg.safe_constructors are removed from the unpickling code.
References to these variables in the descriptions below are to be seen
as describing unpickling in Python 2.2 and before.
"""
# Meta-rule: Descriptions are stored in instances of descriptor objects,
# with plain constructors. No meta-language is defined from which
# descriptors could be constructed. If you want, e.g., XML, write a little
# program to generate XML from the objects.
##############################################################################
# Some pickle opcodes have an argument, following the opcode in the
# bytestream. An argument is of a specific type, described by an instance
# of ArgumentDescriptor. These are not to be confused with arguments taken
# off the stack -- ArgumentDescriptor applies only to arguments embedded in
# the opcode stream, immediately following an opcode.
# Represents the number of bytes consumed by an argument delimited by the
# next newline character.
UP_TO_NEWLINE = -1
# Represents the number of bytes consumed by a two-argument opcode where
# the first argument gives the number of bytes in the second argument.
TAKEN_FROM_ARGUMENT1 = -2 # num bytes is 1-byte unsigned int
TAKEN_FROM_ARGUMENT4 = -3 # num bytes is 4-byte signed little-endian int
class ArgumentDescriptor(object):
__slots__ = (
# name of descriptor record, also a module global name; a string
'name',
# length of argument, in bytes; an int; UP_TO_NEWLINE and
# TAKEN_FROM_ARGUMENT{1,4} are negative values for variable-length
# cases
'n',
# a function taking a file-like object, reading this kind of argument
# from the object at the current position, advancing the current
# position by n bytes, and returning the value of the argument
'reader',
# human-readable docs for this arg descriptor; a string
'doc',
)
def __init__(self, name, n, reader, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(n, int) and (n >= 0 or
n in (UP_TO_NEWLINE,
TAKEN_FROM_ARGUMENT1,
TAKEN_FROM_ARGUMENT4))
self.n = n
self.reader = reader
assert isinstance(doc, str)
self.doc = doc
from struct import unpack as _unpack
def read_uint1(f):
r"""
>>> import io
>>> read_uint1(io.BytesIO(b'\xff'))
255
"""
data = f.read(1)
if data:
return data[0]
raise ValueError("not enough data in stream to read uint1")
uint1 = ArgumentDescriptor(
name='uint1',
n=1,
reader=read_uint1,
doc="One-byte unsigned integer.")
def read_uint2(f):
r"""
>>> import io
>>> read_uint2(io.BytesIO(b'\xff\x00'))
255
>>> read_uint2(io.BytesIO(b'\xff\xff'))
65535
"""
data = f.read(2)
if len(data) == 2:
return _unpack("<H", data)[0]
raise ValueError("not enough data in stream to read uint2")
uint2 = ArgumentDescriptor(
name='uint2',
n=2,
reader=read_uint2,
doc="Two-byte unsigned integer, little-endian.")
def read_int4(f):
r"""
>>> import io
>>> read_int4(io.BytesIO(b'\xff\x00\x00\x00'))
255
>>> read_int4(io.BytesIO(b'\x00\x00\x00\x80')) == -(2**31)
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<i", data)[0]
raise ValueError("not enough data in stream to read int4")
int4 = ArgumentDescriptor(
name='int4',
n=4,
reader=read_int4,
doc="Four-byte signed integer, little-endian, 2's complement.")
def read_stringnl(f, decode=True, stripquotes=True):
r"""
>>> import io
>>> read_stringnl(io.BytesIO(b"'abcd'\nefg\n"))
'abcd'
>>> read_stringnl(io.BytesIO(b"\n"))
Traceback (most recent call last):
...
ValueError: no string quotes around b''
>>> read_stringnl(io.BytesIO(b"\n"), stripquotes=False)
''
>>> read_stringnl(io.BytesIO(b"''\n"))
''
>>> read_stringnl(io.BytesIO(b'"abcd"'))
Traceback (most recent call last):
...
ValueError: no newline found when trying to read stringnl
Embedded escapes are undone in the result.
>>> read_stringnl(io.BytesIO(br"'a\n\\b\x00c\td'" + b"\n'e'"))
'a\n\\b\x00c\td'
"""
data = f.readline()
if not data.endswith(b'\n'):
raise ValueError("no newline found when trying to read stringnl")
data = data[:-1] # lose the newline
if stripquotes:
for q in (b'"', b"'"):
if data.startswith(q):
if not data.endswith(q):
raise ValueError("strinq quote %r not found at both "
"ends of %r" % (q, data))
data = data[1:-1]
break
else:
raise ValueError("no string quotes around %r" % data)
if decode:
data = codecs.escape_decode(data)[0].decode("ascii")
return data
stringnl = ArgumentDescriptor(
name='stringnl',
n=UP_TO_NEWLINE,
reader=read_stringnl,
doc="""A newline-terminated string.
This is a repr-style string, with embedded escapes, and
bracketing quotes.
""")
def read_stringnl_noescape(f):
return read_stringnl(f, stripquotes=False)
stringnl_noescape = ArgumentDescriptor(
name='stringnl_noescape',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape,
doc="""A newline-terminated string.
This is a str-style string, without embedded escapes,
or bracketing quotes. It should consist solely of
printable ASCII characters.
""")
def read_stringnl_noescape_pair(f):
r"""
>>> import io
>>> read_stringnl_noescape_pair(io.BytesIO(b"Queue\nEmpty\njunk"))
'Queue Empty'
"""
return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f))
stringnl_noescape_pair = ArgumentDescriptor(
name='stringnl_noescape_pair',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape_pair,
doc="""A pair of newline-terminated strings.
These are str-style strings, without embedded
escapes, or bracketing quotes. They should
consist solely of printable ASCII characters.
The pair is returned as a single string, with
a single blank separating the two strings.
""")
def read_string4(f):
r"""
>>> import io
>>> read_string4(io.BytesIO(b"\x00\x00\x00\x00abc"))
''
>>> read_string4(io.BytesIO(b"\x03\x00\x00\x00abcdef"))
'abc'
>>> read_string4(io.BytesIO(b"\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a string4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("string4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return data.decode("latin-1")
raise ValueError("expected %d bytes in a string4, but only %d remain" %
(n, len(data)))
string4 = ArgumentDescriptor(
name="string4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_string4,
doc="""A counted string.
The first argument is a 4-byte little-endian signed int giving
the number of bytes in the string, and the second argument is
that many bytes.
""")
def read_string1(f):
r"""
>>> import io
>>> read_string1(io.BytesIO(b"\x00"))
''
>>> read_string1(io.BytesIO(b"\x03abcdef"))
'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data.decode("latin-1")
raise ValueError("expected %d bytes in a string1, but only %d remain" %
(n, len(data)))
string1 = ArgumentDescriptor(
name="string1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_string1,
doc="""A counted string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_unicodestringnl(f):
r"""
>>> import io
>>> read_unicodestringnl(io.BytesIO(b"abc\\uabcd\njunk")) == 'abc\uabcd'
True
"""
data = f.readline()
if not data.endswith(b'\n'):
raise ValueError("no newline found when trying to read "
"unicodestringnl")
data = data[:-1] # lose the newline
return str(data, 'raw-unicode-escape')
unicodestringnl = ArgumentDescriptor(
name='unicodestringnl',
n=UP_TO_NEWLINE,
reader=read_unicodestringnl,
doc="""A newline-terminated Unicode string.
This is raw-unicode-escape encoded, so consists of
printable ASCII characters, and may contain embedded
escape sequences.
""")
def read_unicodestring4(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc), 0, 0, 0]) # little-endian 4-byte length
>>> t = read_unicodestring4(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring4(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("unicodestring4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring4, but only %d "
"remain" % (n, len(data)))
unicodestring4 = ArgumentDescriptor(
name="unicodestring4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_unicodestring4,
doc="""A counted Unicode string.
The first argument is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_decimalnl_short(f):
r"""
>>> import io
>>> read_decimalnl_short(io.BytesIO(b"1234\n56"))
1234
>>> read_decimalnl_short(io.BytesIO(b"1234L\n56"))
Traceback (most recent call last):
...
ValueError: trailing 'L' not allowed in b'1234L'
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if s.endswith(b"L"):
raise ValueError("trailing 'L' not allowed in %r" % s)
# It's not necessarily true that the result fits in a Python short int:
# the pickle may have been written on a 64-bit box. There's also a hack
# for True and False here.
if s == b"00":
return False
elif s == b"01":
return True
try:
return int(s)
except OverflowError:
return int(s)
def read_decimalnl_long(f):
r"""
>>> import io
>>> read_decimalnl_long(io.BytesIO(b"1234L\n56"))
1234
>>> read_decimalnl_long(io.BytesIO(b"123456789012345678901234L\n6"))
123456789012345678901234
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if s[-1:] == b'L':
s = s[:-1]
return int(s)
decimalnl_short = ArgumentDescriptor(
name='decimalnl_short',
n=UP_TO_NEWLINE,
reader=read_decimalnl_short,
doc="""A newline-terminated decimal integer literal.
This never has a trailing 'L', and the integer fit
in a short Python int on the box where the pickle
was written -- but there's no guarantee it will fit
in a short Python int on the box where the pickle
is read.
""")
decimalnl_long = ArgumentDescriptor(
name='decimalnl_long',
n=UP_TO_NEWLINE,
reader=read_decimalnl_long,
doc="""A newline-terminated decimal integer literal.
This has a trailing 'L', and can represent integers
of any size.
""")
def read_floatnl(f):
r"""
>>> import io
>>> read_floatnl(io.BytesIO(b"-1.25\n6"))
-1.25
"""
s = read_stringnl(f, decode=False, stripquotes=False)
return float(s)
floatnl = ArgumentDescriptor(
name='floatnl',
n=UP_TO_NEWLINE,
reader=read_floatnl,
doc="""A newline-terminated decimal floating literal.
In general this requires 17 significant digits for roundtrip
identity, and pickling then unpickling infinities, NaNs, and
minus zero doesn't work across boxes, or on some boxes even
on itself (e.g., Windows can't read the strings it produces
for infinities or NaNs).
""")
def read_float8(f):
r"""
>>> import io, struct
>>> raw = struct.pack(">d", -1.25)
>>> raw
b'\xbf\xf4\x00\x00\x00\x00\x00\x00'
>>> read_float8(io.BytesIO(raw + b"\n"))
-1.25
"""
data = f.read(8)
if len(data) == 8:
return _unpack(">d", data)[0]
raise ValueError("not enough data in stream to read float8")
float8 = ArgumentDescriptor(
name='float8',
n=8,
reader=read_float8,
doc="""An 8-byte binary representation of a float, big-endian.
The format is unique to Python, and shared with the struct
module (format string '>d') "in theory" (the struct and pickle
implementations don't share the code -- they should). It's
strongly related to the IEEE-754 double format, and, in normal
cases, is in fact identical to the big-endian 754 double format.
On other boxes the dynamic range is limited to that of a 754
double, and "add a half and chop" rounding is used to reduce
the precision to 53 bits. However, even on a 754 box,
infinities, NaNs, and minus zero may not be handled correctly
(may not survive roundtrip pickling intact).
""")
# Protocol 2 formats
from pickle import decode_long
def read_long1(f):
r"""
>>> import io
>>> read_long1(io.BytesIO(b"\x00"))
0
>>> read_long1(io.BytesIO(b"\x02\xff\x00"))
255
>>> read_long1(io.BytesIO(b"\x02\xff\x7f"))
32767
>>> read_long1(io.BytesIO(b"\x02\x00\xff"))
-256
>>> read_long1(io.BytesIO(b"\x02\x00\x80"))
-32768
"""
n = read_uint1(f)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long1")
return decode_long(data)
long1 = ArgumentDescriptor(
name="long1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_long1,
doc="""A binary long, little-endian, using 1-byte size.
This first reads one byte as an unsigned size, then reads that
many bytes and interprets them as a little-endian 2's-complement long.
If the size is 0, that's taken as a shortcut for the long 0L.
""")
def read_long4(f):
r"""
>>> import io
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x00"))
255
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x7f"))
32767
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\xff"))
-256
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\x80"))
-32768
>>> read_long1(io.BytesIO(b"\x00\x00\x00\x00"))
0
"""
n = read_int4(f)
if n < 0:
raise ValueError("long4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long4")
return decode_long(data)
long4 = ArgumentDescriptor(
name="long4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_long4,
doc="""A binary representation of a long, little-endian.
This first reads four bytes as a signed size (but requires the
size to be >= 0), then reads that many bytes and interprets them
as a little-endian 2's-complement long. If the size is 0, that's taken
as a shortcut for the int 0, although LONG1 should really be used
then instead (and in any case where # of bytes < 256).
""")
##############################################################################
# Object descriptors. The stack used by the pickle machine holds objects,
# and in the stack_before and stack_after attributes of OpcodeInfo
# descriptors we need names to describe the various types of objects that can
# appear on the stack.
class StackObject(object):
__slots__ = (
# name of descriptor record, for info only
'name',
# type of object, or tuple of type objects (meaning the object can
# be of any type in the tuple)
'obtype',
# human-readable docs for this kind of stack object; a string
'doc',
)
def __init__(self, name, obtype, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(obtype, type) or isinstance(obtype, tuple)
if isinstance(obtype, tuple):
for contained in obtype:
assert isinstance(contained, type)
self.obtype = obtype
assert isinstance(doc, str)
self.doc = doc
def __repr__(self):
return self.name
pyint = StackObject(
name='int',
obtype=int,
doc="A short (as opposed to long) Python integer object.")
pylong = StackObject(
name='long',
obtype=int,
doc="A long (as opposed to short) Python integer object.")
pyinteger_or_bool = StackObject(
name='int_or_bool',
obtype=(int, bool),
doc="A Python integer object (short or long), or "
"a Python bool.")
pybool = StackObject(
name='bool',
obtype=(bool,),
doc="A Python bool object.")
pyfloat = StackObject(
name='float',
obtype=float,
doc="A Python float object.")
pystring = StackObject(
name='string',
obtype=bytes,
doc="A Python (8-bit) string object.")
pybytes = StackObject(
name='bytes',
obtype=bytes,
doc="A Python bytes object.")
pyunicode = StackObject(
name='str',
obtype=str,
doc="A Python (Unicode) string object.")
pynone = StackObject(
name="None",
obtype=type(None),
doc="The Python None object.")
pytuple = StackObject(
name="tuple",
obtype=tuple,
doc="A Python tuple object.")
pylist = StackObject(
name="list",
obtype=list,
doc="A Python list object.")
pydict = StackObject(
name="dict",
obtype=dict,
doc="A Python dict object.")
anyobject = StackObject(
name='any',
obtype=object,
doc="Any kind of object whatsoever.")
markobject = StackObject(
name="mark",
obtype=StackObject,
doc="""'The mark' is a unique object.
Opcodes that operate on a variable number of objects
generally don't embed the count of objects in the opcode,
or pull it off the stack. Instead the MARK opcode is used
to push a special marker object on the stack, and then
some other opcodes grab all the objects from the top of
the stack down to (but not including) the topmost marker
object.
""")
stackslice = StackObject(
name="stackslice",
obtype=StackObject,
doc="""An object representing a contiguous slice of the stack.
This is used in conjuction with markobject, to represent all
of the stack following the topmost markobject. For example,
the POP_MARK opcode changes the stack from
[..., markobject, stackslice]
to
[...]
No matter how many object are on the stack after the topmost
markobject, POP_MARK gets rid of all of them (including the
topmost markobject too).
""")
##############################################################################
# Descriptors for pickle opcodes.
class OpcodeInfo(object):
__slots__ = (
# symbolic name of opcode; a string
'name',
# the code used in a bytestream to represent the opcode; a
# one-character string
'code',
# If the opcode has an argument embedded in the byte string, an
# instance of ArgumentDescriptor specifying its type. Note that
# arg.reader(s) can be used to read and decode the argument from
# the bytestream s, and arg.doc documents the format of the raw
# argument bytes. If the opcode doesn't have an argument embedded
# in the bytestream, arg should be None.
'arg',
# what the stack looks like before this opcode runs; a list
'stack_before',
# what the stack looks like after this opcode runs; a list
'stack_after',
# the protocol number in which this opcode was introduced; an int
'proto',
# human-readable docs for this opcode; a string
'doc',
)
def __init__(self, name, code, arg,
stack_before, stack_after, proto, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(code, str)
assert len(code) == 1
self.code = code
assert arg is None or isinstance(arg, ArgumentDescriptor)
self.arg = arg
assert isinstance(stack_before, list)
for x in stack_before:
assert isinstance(x, StackObject)
self.stack_before = stack_before
assert isinstance(stack_after, list)
for x in stack_after:
assert isinstance(x, StackObject)
self.stack_after = stack_after
assert isinstance(proto, int) and 0 <= proto <= 3
self.proto = proto
assert isinstance(doc, str)
self.doc = doc
I = OpcodeInfo
opcodes = [
# Ways to spell integers.
I(name='INT',
code='I',
arg=decimalnl_short,
stack_before=[],
stack_after=[pyinteger_or_bool],
proto=0,
doc="""Push an integer or bool.
The argument is a newline-terminated decimal literal string.
The intent may have been that this always fit in a short Python int,
but INT can be generated in pickles written on a 64-bit box that
require a Python long on a 32-bit box. The difference between this
and LONG then is that INT skips a trailing 'L', and produces a short
int whenever possible.
Another difference is due to that, when bool was introduced as a
distinct type in 2.3, builtin names True and False were also added to
2.2.2, mapping to ints 1 and 0. For compatibility in both directions,
True gets pickled as INT + "I01\\n", and False as INT + "I00\\n".
Leading zeroes are never produced for a genuine integer. The 2.3
(and later) unpicklers special-case these and return bool instead;
earlier unpicklers ignore the leading "0" and return the int.
"""),
I(name='BININT',
code='J',
arg=int4,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a four-byte signed integer.
This handles the full range of Python (short) integers on a 32-bit
box, directly as binary bytes (1 for the opcode and 4 for the integer).
If the integer is non-negative and fits in 1 or 2 bytes, pickling via
BININT1 or BININT2 saves space.
"""),
I(name='BININT1',
code='K',
arg=uint1,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a one-byte unsigned integer.
This is a space optimization for pickling very small non-negative ints,
in range(256).
"""),
I(name='BININT2',
code='M',
arg=uint2,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a two-byte unsigned integer.
This is a space optimization for pickling small positive ints, in
range(256, 2**16). Integers in range(256) can also be pickled via
BININT2, but BININT1 instead saves a byte.
"""),
I(name='LONG',
code='L',
arg=decimalnl_long,
stack_before=[],
stack_after=[pylong],
proto=0,
doc="""Push a long integer.
The same as INT, except that the literal ends with 'L', and always
unpickles to a Python long. There doesn't seem a real purpose to the
trailing 'L'.
Note that LONG takes time quadratic in the number of digits when
unpickling (this is simply due to the nature of decimal->binary
conversion). Proto 2 added linear-time (in C; still quadratic-time
in Python) LONG1 and LONG4 opcodes.
"""),
I(name="LONG1",
code='\x8a',
arg=long1,
stack_before=[],
stack_after=[pylong],
proto=2,
doc="""Long integer using one-byte length.
A more efficient encoding of a Python long; the long1 encoding
says it all."""),
I(name="LONG4",
code='\x8b',
arg=long4,
stack_before=[],
stack_after=[pylong],
proto=2,
doc="""Long integer using found-byte length.
A more efficient encoding of a Python long; the long4 encoding
says it all."""),
# Ways to spell strings (8-bit, not Unicode).
I(name='STRING',
code='S',
arg=stringnl,
stack_before=[],
stack_after=[pystring],
proto=0,
doc="""Push a Python string object.
The argument is a repr-style string, with bracketing quote characters,
and perhaps embedded escapes. The argument extends until the next
newline character. (Actually, they are decoded into a str instance
using the encoding given to the Unpickler constructor. or the default,
'ASCII'.)
"""),
I(name='BINSTRING',
code='T',
arg=string4,
stack_before=[],
stack_after=[pystring],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second is that many
bytes, which are taken literally as the string content. (Actually,
they are decoded into a str instance using the encoding given to the
Unpickler constructor. or the default, 'ASCII'.)
"""),
I(name='SHORT_BINSTRING',
code='U',
arg=string1,
stack_before=[],
stack_after=[pystring],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes in the string, and the second is that many bytes,
which are taken literally as the string content. (Actually, they
are decoded into a str instance using the encoding given to the
Unpickler constructor. or the default, 'ASCII'.)
"""),
# Bytes (protocol 3 only; older protocols don't support bytes at all)
I(name='BINBYTES',
code='B',
arg=string4,
stack_before=[],
stack_after=[pybytes],
proto=3,
doc="""Push a Python bytes object.
There are two arguments: the first is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second is that many
bytes, which are taken literally as the bytes content.
"""),
I(name='SHORT_BINBYTES',
code='C',
arg=string1,
stack_before=[],
stack_after=[pybytes],
proto=3,
doc="""Push a Python string object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes in the string, and the second is that many bytes,
which are taken literally as the string content.
"""),
# Ways to spell None.
I(name='NONE',
code='N',
arg=None,
stack_before=[],
stack_after=[pynone],
proto=0,
doc="Push None on the stack."),
# Ways to spell bools, starting with proto 2. See INT for how this was
# done before proto 2.
I(name='NEWTRUE',
code='\x88',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push True onto the stack."""),
I(name='NEWFALSE',
code='\x89',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push False onto the stack."""),
# Ways to spell Unicode strings.
I(name='UNICODE',
code='V',
arg=unicodestringnl,
stack_before=[],
stack_after=[pyunicode],
proto=0, # this may be pure-text, but it's a later addition
doc="""Push a Python Unicode string object.
The argument is a raw-unicode-escape encoding of a Unicode string,
and so may contain embedded escape sequences. The argument extends
until the next newline character.
"""),
I(name='BINUNICODE',
code='X',
arg=unicodestring4,
stack_before=[],
stack_after=[pyunicode],
proto=1,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 4-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
# Ways to spell floats.
I(name='FLOAT',
code='F',
arg=floatnl,
stack_before=[],
stack_after=[pyfloat],
proto=0,
doc="""Newline-terminated decimal float literal.
The argument is repr(a_float), and in general requires 17 significant
digits for roundtrip conversion to be an identity (this is so for
IEEE-754 double precision values, which is what Python float maps to
on most boxes).
In general, FLOAT cannot be used to transport infinities, NaNs, or
minus zero across boxes (or even on a single box, if the platform C
library can't read the strings it produces for such things -- Windows
is like that), but may do less damage than BINFLOAT on boxes with
greater precision or dynamic range than IEEE-754 double.
"""),
I(name='BINFLOAT',
code='G',
arg=float8,
stack_before=[],
stack_after=[pyfloat],
proto=1,
doc="""Float stored in binary form, with 8 bytes of data.
This generally requires less than half the space of FLOAT encoding.
In general, BINFLOAT cannot be used to transport infinities, NaNs, or
minus zero, raises an exception if the exponent exceeds the range of
an IEEE-754 double, and retains no more than 53 bits of precision (if
there are more than that, "add a half and chop" rounding is used to
cut it back to 53 significant bits).
"""),
# Ways to build lists.
I(name='EMPTY_LIST',
code=']',
arg=None,
stack_before=[],
stack_after=[pylist],
proto=1,
doc="Push an empty list."),
I(name='APPEND',
code='a',
arg=None,
stack_before=[pylist, anyobject],
stack_after=[pylist],
proto=0,
doc="""Append an object to a list.
Stack before: ... pylist anyobject
Stack after: ... pylist+[anyobject]
although pylist is really extended in-place.
"""),
I(name='APPENDS',
code='e',
arg=None,
stack_before=[pylist, markobject, stackslice],
stack_after=[pylist],
proto=1,
doc="""Extend a list by a slice of stack objects.
Stack before: ... pylist markobject stackslice
Stack after: ... pylist+stackslice
although pylist is really extended in-place.
"""),
I(name='LIST',
code='l',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pylist],
proto=0,
doc="""Build a list out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python list, which single list object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... [1, 2, 3, 'abc']
"""),
# Ways to build tuples.
I(name='EMPTY_TUPLE',
code=')',
arg=None,
stack_before=[],
stack_after=[pytuple],
proto=1,
doc="Push an empty tuple."),
I(name='TUPLE',
code='t',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pytuple],
proto=0,
doc="""Build a tuple out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python tuple, which single tuple object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... (1, 2, 3, 'abc')
"""),
I(name='TUPLE1',
code='\x85',
arg=None,
stack_before=[anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a one-tuple out of the topmost item on the stack.
This code pops one value off the stack and pushes a tuple of
length 1 whose one item is that value back onto it. In other
words:
stack[-1] = tuple(stack[-1:])
"""),
I(name='TUPLE2',
code='\x86',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a two-tuple out of the top two items on the stack.
This code pops two values off the stack and pushes a tuple of
length 2 whose items are those values back onto it. In other
words:
stack[-2:] = [tuple(stack[-2:])]
"""),
I(name='TUPLE3',
code='\x87',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a three-tuple out of the top three items on the stack.
This code pops three values off the stack and pushes a tuple of
length 3 whose items are those values back onto it. In other
words:
stack[-3:] = [tuple(stack[-3:])]
"""),
# Ways to build dicts.
I(name='EMPTY_DICT',
code='}',
arg=None,
stack_before=[],
stack_after=[pydict],
proto=1,
doc="Push an empty dict."),
I(name='DICT',
code='d',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pydict],
proto=0,
doc="""Build a dict out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python dict, which single dict object replaces all of the
stack from the topmost markobject onward. The stack slice alternates
key, value, key, value, .... For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... {1: 2, 3: 'abc'}
"""),
I(name='SETITEM',
code='s',
arg=None,
stack_before=[pydict, anyobject, anyobject],
stack_after=[pydict],
proto=0,
doc="""Add a key+value pair to an existing dict.
Stack before: ... pydict key value
Stack after: ... pydict
where pydict has been modified via pydict[key] = value.
"""),
I(name='SETITEMS',
code='u',
arg=None,
stack_before=[pydict, markobject, stackslice],
stack_after=[pydict],
proto=1,
doc="""Add an arbitrary number of key+value pairs to an existing dict.
The slice of the stack following the topmost markobject is taken as
an alternating sequence of keys and values, added to the dict
immediately under the topmost markobject. Everything at and after the
topmost markobject is popped, leaving the mutated dict at the top
of the stack.
Stack before: ... pydict markobject key_1 value_1 ... key_n value_n
Stack after: ... pydict
where pydict has been modified via pydict[key_i] = value_i for i in
1, 2, ..., n, and in that order.
"""),
# Stack manipulation.
I(name='POP',
code='0',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="Discard the top stack item, shrinking the stack by one item."),
I(name='DUP',
code='2',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject, anyobject],
proto=0,
doc="Push the top stack item onto the stack again, duplicating it."),
I(name='MARK',
code='(',
arg=None,
stack_before=[],
stack_after=[markobject],
proto=0,
doc="""Push markobject onto the stack.
markobject is a unique object, used by other opcodes to identify a
region of the stack containing a variable number of objects for them
to work on. See markobject.doc for more detail.
"""),
I(name='POP_MARK',
code='1',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[],
proto=1,
doc="""Pop all the stack objects at and above the topmost markobject.
When an opcode using a variable number of stack objects is done,
POP_MARK is used to remove those objects, and to remove the markobject
that delimited their starting position on the stack.
"""),
# Memo manipulation. There are really only two operations (get and put),
# each in all-text, "short binary", and "long binary" flavors.
I(name='GET',
code='g',
arg=decimalnl_short,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the newline-terminated
decimal string following. BINGET and LONG_BINGET are space-optimized
versions.
"""),
I(name='BINGET',
code='h',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 1-byte unsigned
integer following.
"""),
I(name='LONG_BINGET',
code='j',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 4-byte signed
little-endian integer following.
"""),
I(name='PUT',
code='p',
arg=decimalnl_short,
stack_before=[],
stack_after=[],
proto=0,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the newline-
terminated decimal string following. BINPUT and LONG_BINPUT are
space-optimized versions.
"""),
I(name='BINPUT',
code='q',
arg=uint1,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 1-byte
unsigned integer following.
"""),
I(name='LONG_BINPUT',
code='r',
arg=int4,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 4-byte
signed little-endian integer following.
"""),
# Access the extension registry (predefined objects). Akin to the GET
# family.
I(name='EXT1',
code='\x82',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
This code and the similar EXT2 and EXT4 allow using a registry
of popular objects that are pickled by name, typically classes.
It is envisioned that through a global negotiation and
registration process, third parties can set up a mapping between
ints and object names.
In order to guarantee pickle interchangeability, the extension
code registry ought to be global, although a range of codes may
be reserved for private use.
EXT1 has a 1-byte integer argument. This is used to index into the
extension registry, and the object at that index is pushed on the stack.
"""),
I(name='EXT2',
code='\x83',
arg=uint2,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT2 has a two-byte integer argument.
"""),
I(name='EXT4',
code='\x84',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT4 has a four-byte integer argument.
"""),
# Push a class object, or module function, on the stack, via its module
# and name.
I(name='GLOBAL',
code='c',
arg=stringnl_noescape_pair,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push a global object (module.attr) on the stack.
Two newline-terminated strings follow the GLOBAL opcode. The first is
taken as a module name, and the second as a class name. The class
object module.class is pushed on the stack. More accurately, the
object returned by self.find_class(module, class) is pushed on the
stack, so unpickling subclasses can override this form of lookup.
"""),
# Ways to build objects of classes pickle doesn't know about directly
# (user-defined classes). I despair of documenting this accurately
# and comprehensibly -- you really have to read the pickle code to
# find all the special cases.
I(name='REDUCE',
code='R',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Push an object built from a callable and an argument tuple.
The opcode is named to remind of the __reduce__() method.
Stack before: ... callable pytuple
Stack after: ... callable(*pytuple)
The callable and the argument tuple are the first two items returned
by a __reduce__ method. Applying the callable to the argtuple is
supposed to reproduce the original object, or at least get it started.
If the __reduce__ method returns a 3-tuple, the last component is an
argument to be passed to the object's __setstate__, and then the REDUCE
opcode is followed by code to create setstate's argument, and then a
BUILD opcode to apply __setstate__ to that argument.
If not isinstance(callable, type), REDUCE complains unless the
callable has been registered with the copyreg module's
safe_constructors dict, or the callable has a magic
'__safe_for_unpickling__' attribute with a true value. I'm not sure
why it does this, but I've sure seen this complaint often enough when
I didn't want to <wink>.
"""),
I(name='BUILD',
code='b',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Finish building an object, via __setstate__ or dict update.
Stack before: ... anyobject argument
Stack after: ... anyobject
where anyobject may have been mutated, as follows:
If the object has a __setstate__ method,
anyobject.__setstate__(argument)
is called.
Else the argument must be a dict, the object must have a __dict__, and
the object is updated via
anyobject.__dict__.update(argument)
"""),
I(name='INST',
code='i',
arg=stringnl_noescape_pair,
stack_before=[markobject, stackslice],
stack_after=[anyobject],
proto=0,
doc="""Build a class instance.
This is the protocol 0 version of protocol 1's OBJ opcode.
INST is followed by two newline-terminated strings, giving a
module and class name, just as for the GLOBAL opcode (and see
GLOBAL for more details about that). self.find_class(module, name)
is used to get a class object.
In addition, all the objects on the stack following the topmost
markobject are gathered into a tuple and popped (along with the
topmost markobject), just as for the TUPLE opcode.
Now it gets complicated. If all of these are true:
+ The argtuple is empty (markobject was at the top of the stack
at the start).
+ The class object does not have a __getinitargs__ attribute.
then we want to create an old-style class instance without invoking
its __init__() method (pickle has waffled on this over the years; not
calling __init__() is current wisdom). In this case, an instance of
an old-style dummy class is created, and then we try to rebind its
__class__ attribute to the desired class object. If this succeeds,
the new instance object is pushed on the stack, and we're done.
Else (the argtuple is not empty, it's not an old-style class object,
or the class object does have a __getinitargs__ attribute), the code
first insists that the class object have a __safe_for_unpickling__
attribute. Unlike as for the __safe_for_unpickling__ check in REDUCE,
it doesn't matter whether this attribute has a true or false value, it
only matters whether it exists (XXX this is a bug). If
__safe_for_unpickling__ doesn't exist, UnpicklingError is raised.
Else (the class object does have a __safe_for_unpickling__ attr),
the class object obtained from INST's arguments is applied to the
argtuple obtained from the stack, and the resulting instance object
is pushed on the stack.
NOTE: checks for __safe_for_unpickling__ went away in Python 2.3.
"""),
I(name='OBJ',
code='o',
arg=None,
stack_before=[markobject, anyobject, stackslice],
stack_after=[anyobject],
proto=1,
doc="""Build a class instance.
This is the protocol 1 version of protocol 0's INST opcode, and is
very much like it. The major difference is that the class object
is taken off the stack, allowing it to be retrieved from the memo
repeatedly if several instances of the same class are created. This
can be much more efficient (in both time and space) than repeatedly
embedding the module and class names in INST opcodes.
Unlike INST, OBJ takes no arguments from the opcode stream. Instead
the class object is taken off the stack, immediately above the
topmost markobject:
Stack before: ... markobject classobject stackslice
Stack after: ... new_instance_object
As for INST, the remainder of the stack above the markobject is
gathered into an argument tuple, and then the logic seems identical,
except that no __safe_for_unpickling__ check is done (XXX this is
a bug). See INST for the gory details.
NOTE: In Python 2.3, INST and OBJ are identical except for how they
get the class object. That was always the intent; the implementations
had diverged for accidental reasons.
"""),
I(name='NEWOBJ',
code='\x81',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=2,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple (the tuple being the stack
top). Call these cls and args. They are popped off the stack,
and the value returned by cls.__new__(cls, *args) is pushed back
onto the stack.
"""),
# Machine control.
I(name='PROTO',
code='\x80',
arg=uint1,
stack_before=[],
stack_after=[],
proto=2,
doc="""Protocol version indicator.
For protocol 2 and above, a pickle must start with this opcode.
The argument is the protocol version, an int in range(2, 256).
"""),
I(name='STOP',
code='.',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="""Stop the unpickling machine.
Every pickle ends with this opcode. The object at the top of the stack
is popped, and that's the result of unpickling. The stack should be
empty then.
"""),
# Ways to deal with persistent IDs.
I(name='PERSID',
code='P',
arg=stringnl_noescape,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push an object identified by a persistent ID.
The pickle module doesn't define what a persistent ID means. PERSID's
argument is a newline-terminated str-style (no embedded escapes, no
bracketing quote characters) string, which *is* "the persistent ID".
The unpickler passes this string to self.persistent_load(). Whatever
object that returns is pushed on the stack. There is no implementation
of persistent_load() in Python's unpickler: it must be supplied by an
unpickler subclass.
"""),
I(name='BINPERSID',
code='Q',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=1,
doc="""Push an object identified by a persistent ID.
Like PERSID, except the persistent ID is popped off the stack (instead
of being a string embedded in the opcode bytestream). The persistent
ID is passed to self.persistent_load(), and whatever object that
returns is pushed on the stack. See PERSID for more detail.
"""),
]
del I
# Verify uniqueness of .name and .code members.
name2i = {}
code2i = {}
for i, d in enumerate(opcodes):
if d.name in name2i:
raise ValueError("repeated name %r at indices %d and %d" %
(d.name, name2i[d.name], i))
if d.code in code2i:
raise ValueError("repeated code %r at indices %d and %d" %
(d.code, code2i[d.code], i))
name2i[d.name] = i
code2i[d.code] = i
del name2i, code2i, i, d
##############################################################################
# Build a code2op dict, mapping opcode characters to OpcodeInfo records.
# Also ensure we've got the same stuff as pickle.py, although the
# introspection here is dicey.
code2op = {}
for d in opcodes:
code2op[d.code] = d
del d
def assure_pickle_consistency(verbose=False):
copy = code2op.copy()
for name in pickle.__all__:
if not re.match("[A-Z][A-Z0-9_]+$", name):
if verbose:
print("skipping %r: it doesn't look like an opcode name" % name)
continue
picklecode = getattr(pickle, name)
if not isinstance(picklecode, bytes) or len(picklecode) != 1:
if verbose:
print(("skipping %r: value %r doesn't look like a pickle "
"code" % (name, picklecode)))
continue
picklecode = picklecode.decode("latin-1")
if picklecode in copy:
if verbose:
print("checking name %r w/ code %r for consistency" % (
name, picklecode))
d = copy[picklecode]
if d.name != name:
raise ValueError("for pickle code %r, pickle.py uses name %r "
"but we're using name %r" % (picklecode,
name,
d.name))
# Forget this one. Any left over in copy at the end are a problem
# of a different kind.
del copy[picklecode]
else:
raise ValueError("pickle.py appears to have a pickle opcode with "
"name %r and code %r, but we don't" %
(name, picklecode))
if copy:
msg = ["we appear to have pickle opcodes that pickle.py doesn't have:"]
for code, d in copy.items():
msg.append(" name %r with code %r" % (d.name, code))
raise ValueError("\n".join(msg))
assure_pickle_consistency()
del assure_pickle_consistency
##############################################################################
# A pickle opcode generator.
def genops(pickle):
"""Generate all the opcodes in a pickle.
'pickle' is a file-like object, or string, containing the pickle.
Each opcode in the pickle is generated, from the current pickle position,
stopping after a STOP opcode is delivered. A triple is generated for
each opcode:
opcode, arg, pos
opcode is an OpcodeInfo record, describing the current opcode.
If the opcode has an argument embedded in the pickle, arg is its decoded
value, as a Python object. If the opcode doesn't have an argument, arg
is None.
If the pickle has a tell() method, pos was the value of pickle.tell()
before reading the current opcode. If the pickle is a bytes object,
it's wrapped in a BytesIO object, and the latter's tell() result is
used. Else (the pickle doesn't have a tell(), and it's not obvious how
to query its current position) pos is None.
"""
if isinstance(pickle, bytes_types):
import io
pickle = io.BytesIO(pickle)
if hasattr(pickle, "tell"):
getpos = pickle.tell
else:
getpos = lambda: None
while True:
pos = getpos()
code = pickle.read(1)
opcode = code2op.get(code.decode("latin-1"))
if opcode is None:
if code == b"":
raise ValueError("pickle exhausted before seeing STOP")
else:
raise ValueError("at position %s, opcode %r unknown" % (
pos is None and "<unknown>" or pos,
code))
if opcode.arg is None:
arg = None
else:
arg = opcode.arg.reader(pickle)
yield opcode, arg, pos
if code == b'.':
assert opcode.name == 'STOP'
break
##############################################################################
# A pickle optimizer.
def optimize(p):
'Optimize a pickle string by removing unused PUT opcodes'
gets = set() # set of args used by a GET opcode
puts = [] # (arg, startpos, stoppos) for the PUT opcodes
prevpos = None # set to pos if previous opcode was a PUT
for opcode, arg, pos in genops(p):
if prevpos is not None:
puts.append((prevarg, prevpos, pos))
prevpos = None
if 'PUT' in opcode.name:
prevarg, prevpos = arg, pos
elif 'GET' in opcode.name:
gets.add(arg)
# Copy the pickle string except for PUTS without a corresponding GET
s = []
i = 0
for arg, start, stop in puts:
j = stop if (arg in gets) else start
s.append(p[i:j])
i = stop
s.append(p[i:])
return b''.join(s)
##############################################################################
# A symbolic pickle disassembler.
def dis(pickle, out=None, memo=None, indentlevel=4, annotate=0):
"""Produce a symbolic disassembly of a pickle.
'pickle' is a file-like object, or string, containing a (at least one)
pickle. The pickle is disassembled from the current position, through
the first STOP opcode encountered.
Optional arg 'out' is a file-like object to which the disassembly is
printed. It defaults to sys.stdout.
Optional arg 'memo' is a Python dict, used as the pickle's memo. It
may be mutated by dis(), if the pickle contains PUT or BINPUT opcodes.
Passing the same memo object to another dis() call then allows disassembly
to proceed across multiple pickles that were all created by the same
pickler with the same memo. Ordinarily you don't need to worry about this.
Optional arg 'indentlevel' is the number of blanks by which to indent
a new MARK level. It defaults to 4.
Optional arg 'annotate' if nonzero instructs dis() to add short
description of the opcode on each line of disassembled output.
The value given to 'annotate' must be an integer and is used as a
hint for the column where annotation should start. The default
value is 0, meaning no annotations.
In addition to printing the disassembly, some sanity checks are made:
+ All embedded opcode arguments "make sense".
+ Explicit and implicit pop operations have enough items on the stack.
+ When an opcode implicitly refers to a markobject, a markobject is
actually on the stack.
+ A memo entry isn't referenced before it's defined.
+ The markobject isn't stored in the memo.
+ A memo entry isn't redefined.
"""
# Most of the hair here is for sanity checks, but most of it is needed
# anyway to detect when a protocol 0 POP takes a MARK off the stack
# (which in turn is needed to indent MARK blocks correctly).
stack = [] # crude emulation of unpickler stack
if memo is None:
memo = {} # crude emulation of unpicker memo
maxproto = -1 # max protocol number seen
markstack = [] # bytecode positions of MARK opcodes
indentchunk = ' ' * indentlevel
errormsg = None
annocol = annotate # columnt hint for annotations
for opcode, arg, pos in genops(pickle):
if pos is not None:
print("%5d:" % pos, end=' ', file=out)
line = "%-4s %s%s" % (repr(opcode.code)[1:-1],
indentchunk * len(markstack),
opcode.name)
maxproto = max(maxproto, opcode.proto)
before = opcode.stack_before # don't mutate
after = opcode.stack_after # don't mutate
numtopop = len(before)
# See whether a MARK should be popped.
markmsg = None
if markobject in before or (opcode.name == "POP" and
stack and
stack[-1] is markobject):
assert markobject not in after
if __debug__:
if markobject in before:
assert before[-1] is stackslice
if markstack:
markpos = markstack.pop()
if markpos is None:
markmsg = "(MARK at unknown opcode offset)"
else:
markmsg = "(MARK at %d)" % markpos
# Pop everything at and after the topmost markobject.
while stack[-1] is not markobject:
stack.pop()
stack.pop()
# Stop later code from popping too much.
try:
numtopop = before.index(markobject)
except ValueError:
assert opcode.name == "POP"
numtopop = 0
else:
errormsg = markmsg = "no MARK exists on stack"
# Check for correct memo usage.
if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT"):
assert arg is not None
if arg in memo:
errormsg = "memo key %r already defined" % arg
elif not stack:
errormsg = "stack is empty -- can't store into memo"
elif stack[-1] is markobject:
errormsg = "can't store markobject in the memo"
else:
memo[arg] = stack[-1]
elif opcode.name in ("GET", "BINGET", "LONG_BINGET"):
if arg in memo:
assert len(after) == 1
after = [memo[arg]] # for better stack emulation
else:
errormsg = "memo key %r has never been stored into" % arg
if arg is not None or markmsg:
# make a mild effort to align arguments
line += ' ' * (10 - len(opcode.name))
if arg is not None:
line += ' ' + repr(arg)
if markmsg:
line += ' ' + markmsg
if annotate:
line += ' ' * (annocol - len(line))
# make a mild effort to align annotations
annocol = len(line)
if annocol > 50:
annocol = annotate
line += ' ' + opcode.doc.split('\n', 1)[0]
print(line, file=out)
if errormsg:
# Note that we delayed complaining until the offending opcode
# was printed.
raise ValueError(errormsg)
# Emulate the stack effects.
if len(stack) < numtopop:
raise ValueError("tries to pop %d items from stack with "
"only %d items" % (numtopop, len(stack)))
if numtopop:
del stack[-numtopop:]
if markobject in after:
assert markobject not in before
markstack.append(pos)
stack.extend(after)
print("highest protocol among opcodes =", maxproto, file=out)
if stack:
raise ValueError("stack not empty after STOP: %r" % stack)
# For use in the doctest, simply as an example of a class to pickle.
class _Example:
def __init__(self, value):
self.value = value
_dis_test = r"""
>>> import pickle
>>> x = [1, 2, (3, 4), {b'abc': "def"}]
>>> pkl0 = pickle.dumps(x, 0)
>>> dis(pkl0)
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 1
9: a APPEND
10: L LONG 2
14: a APPEND
15: ( MARK
16: L LONG 3
20: L LONG 4
24: t TUPLE (MARK at 15)
25: p PUT 1
28: a APPEND
29: ( MARK
30: d DICT (MARK at 29)
31: p PUT 2
34: c GLOBAL '_codecs encode'
50: p PUT 3
53: ( MARK
54: V UNICODE 'abc'
59: p PUT 4
62: V UNICODE 'latin1'
70: p PUT 5
73: t TUPLE (MARK at 53)
74: p PUT 6
77: R REDUCE
78: p PUT 7
81: V UNICODE 'def'
86: p PUT 8
89: s SETITEM
90: a APPEND
91: . STOP
highest protocol among opcodes = 0
Try again with a "binary" pickle.
>>> pkl1 = pickle.dumps(x, 1)
>>> dis(pkl1)
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 1
6: K BININT1 2
8: ( MARK
9: K BININT1 3
11: K BININT1 4
13: t TUPLE (MARK at 8)
14: q BINPUT 1
16: } EMPTY_DICT
17: q BINPUT 2
19: c GLOBAL '_codecs encode'
35: q BINPUT 3
37: ( MARK
38: X BINUNICODE 'abc'
46: q BINPUT 4
48: X BINUNICODE 'latin1'
59: q BINPUT 5
61: t TUPLE (MARK at 37)
62: q BINPUT 6
64: R REDUCE
65: q BINPUT 7
67: X BINUNICODE 'def'
75: q BINPUT 8
77: s SETITEM
78: e APPENDS (MARK at 3)
79: . STOP
highest protocol among opcodes = 1
Exercise the INST/OBJ/BUILD family.
>>> import pickletools
>>> dis(pickle.dumps(pickletools.dis, 0))
0: c GLOBAL 'pickletools dis'
17: p PUT 0
20: . STOP
highest protocol among opcodes = 0
>>> from pickletools import _Example
>>> x = [_Example(42)] * 2
>>> dis(pickle.dumps(x, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: c GLOBAL 'copy_reg _reconstructor'
30: p PUT 1
33: ( MARK
34: c GLOBAL 'pickletools _Example'
56: p PUT 2
59: c GLOBAL '__builtin__ object'
79: p PUT 3
82: N NONE
83: t TUPLE (MARK at 33)
84: p PUT 4
87: R REDUCE
88: p PUT 5
91: ( MARK
92: d DICT (MARK at 91)
93: p PUT 6
96: V UNICODE 'value'
103: p PUT 7
106: L LONG 42
111: s SETITEM
112: b BUILD
113: a APPEND
114: g GET 5
117: a APPEND
118: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(x, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: c GLOBAL 'copy_reg _reconstructor'
29: q BINPUT 1
31: ( MARK
32: c GLOBAL 'pickletools _Example'
54: q BINPUT 2
56: c GLOBAL '__builtin__ object'
76: q BINPUT 3
78: N NONE
79: t TUPLE (MARK at 31)
80: q BINPUT 4
82: R REDUCE
83: q BINPUT 5
85: } EMPTY_DICT
86: q BINPUT 6
88: X BINUNICODE 'value'
98: q BINPUT 7
100: K BININT1 42
102: s SETITEM
103: b BUILD
104: h BINGET 5
106: e APPENDS (MARK at 3)
107: . STOP
highest protocol among opcodes = 1
Try "the canonical" recursive-object test.
>>> L = []
>>> T = L,
>>> L.append(T)
>>> L[0] is T
True
>>> T[0] is L
True
>>> L[0][0] is L
True
>>> T[0][0] is T
True
>>> dis(pickle.dumps(L, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: g GET 0
9: t TUPLE (MARK at 5)
10: p PUT 1
13: a APPEND
14: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(L, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: h BINGET 0
6: t TUPLE (MARK at 3)
7: q BINPUT 1
9: a APPEND
10: . STOP
highest protocol among opcodes = 1
Note that, in the protocol 0 pickle of the recursive tuple, the disassembler
has to emulate the stack in order to realize that the POP opcode at 16 gets
rid of the MARK at 0.
>>> dis(pickle.dumps(T, 0))
0: ( MARK
1: ( MARK
2: l LIST (MARK at 1)
3: p PUT 0
6: ( MARK
7: g GET 0
10: t TUPLE (MARK at 6)
11: p PUT 1
14: a APPEND
15: 0 POP
16: 0 POP (MARK at 0)
17: g GET 1
20: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(T, 1))
0: ( MARK
1: ] EMPTY_LIST
2: q BINPUT 0
4: ( MARK
5: h BINGET 0
7: t TUPLE (MARK at 4)
8: q BINPUT 1
10: a APPEND
11: 1 POP_MARK (MARK at 0)
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 1
Try protocol 2.
>>> dis(pickle.dumps(L, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: . STOP
highest protocol among opcodes = 2
>>> dis(pickle.dumps(T, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: 0 POP
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 2
Try protocol 3 with annotations:
>>> dis(pickle.dumps(T, 3), annotate=1)
0: \x80 PROTO 3 Protocol version indicator.
2: ] EMPTY_LIST Push an empty list.
3: q BINPUT 0 Store the stack top into the memo. The stack is not popped.
5: h BINGET 0 Read an object from the memo and push it on the stack.
7: \x85 TUPLE1 Build a one-tuple out of the topmost item on the stack.
8: q BINPUT 1 Store the stack top into the memo. The stack is not popped.
10: a APPEND Append an object to a list.
11: 0 POP Discard the top stack item, shrinking the stack by one item.
12: h BINGET 1 Read an object from the memo and push it on the stack.
14: . STOP Stop the unpickling machine.
highest protocol among opcodes = 2
"""
_memo_test = r"""
>>> import pickle
>>> import io
>>> f = io.BytesIO()
>>> p = pickle.Pickler(f, 2)
>>> x = [1, 2, 3]
>>> p.dump(x)
>>> p.dump(x)
>>> f.seek(0)
0
>>> memo = {}
>>> dis(f, memo=memo)
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 1
8: K BININT1 2
10: K BININT1 3
12: e APPENDS (MARK at 5)
13: . STOP
highest protocol among opcodes = 2
>>> dis(f, memo=memo)
14: \x80 PROTO 2
16: h BINGET 0
18: . STOP
highest protocol among opcodes = 2
"""
__test__ = {'disassembler_test': _dis_test,
'disassembler_memo_test': _memo_test,
}
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
import sys, argparse
parser = argparse.ArgumentParser(
description='disassemble one or more pickle files')
parser.add_argument(
'pickle_file', type=argparse.FileType('br'),
nargs='*', help='the pickle file')
parser.add_argument(
'-o', '--output', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the output should be written')
parser.add_argument(
'-m', '--memo', action='store_true',
help='preserve memo between disassemblies')
parser.add_argument(
'-l', '--indentlevel', default=4, type=int,
help='the number of blanks by which to indent a new MARK level')
parser.add_argument(
'-a', '--annotate', action='store_true',
help='annotate each line with a short opcode description')
parser.add_argument(
'-p', '--preamble', default="==> {name} <==",
help='if more than one pickle file is specified, print this before'
' each disassembly')
parser.add_argument(
'-t', '--test', action='store_true',
help='run self-test suite')
parser.add_argument(
'-v', action='store_true',
help='run verbosely; only affects self-test run')
args = parser.parse_args()
if args.test:
_test()
else:
annotate = 30 if args.annotate else 0
if not args.pickle_file:
parser.print_help()
elif len(args.pickle_file) == 1:
dis(args.pickle_file[0], args.output, None,
args.indentlevel, annotate)
else:
memo = {} if args.memo else None
for f in args.pickle_file:
preamble = args.preamble.format(name=f.name)
args.output.write(preamble + '\n')
dis(f, args.output, memo, args.indentlevel, annotate)
| wdv4758h/ZipPy | lib-python/3/pickletools.py | Python | bsd-3-clause | 79,093 | 0.000582 |
from tests import BaseTestCase
import mock
import time
from redash.models import User
from redash.authentication.account import invite_token
from tests.handlers import get_request, post_request
class TestInvite(BaseTestCase):
def test_expired_invite_token(self):
with mock.patch('time.time') as patched_time:
patched_time.return_value = time.time() - (7 * 24 * 3600) - 10
token = invite_token(self.factory.user)
response = get_request('/invite/{}'.format(token), org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_invalid_invite_token(self):
response = get_request('/invite/badtoken', org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_valid_token(self):
token = invite_token(self.factory.user)
response = get_request('/invite/{}'.format(token), org=self.factory.org)
self.assertEqual(response.status_code, 200)
def test_already_active_user(self):
pass
class TestInvitePost(BaseTestCase):
def test_empty_password(self):
token = invite_token(self.factory.user)
response = post_request('/invite/{}'.format(token), data={'password': ''}, org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_invalid_password(self):
token = invite_token(self.factory.user)
response = post_request('/invite/{}'.format(token), data={'password': '1234'}, org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_bad_token(self):
response = post_request('/invite/{}'.format('jdsnfkjdsnfkj'), data={'password': '1234'}, org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_already_active_user(self):
pass
def test_valid_password(self):
token = invite_token(self.factory.user)
password = 'test1234'
response = post_request('/invite/{}'.format(token), data={'password': password}, org=self.factory.org)
self.assertEqual(response.status_code, 302)
user = User.get_by_id(self.factory.user.id)
self.assertTrue(user.verify_password(password))
| easytaxibr/redash | tests/handlers/test_authentication.py | Python | bsd-2-clause | 2,187 | 0.003201 |
#!/usr/bin/env python
# Notes:
# 1) ini file items:
# NGCGUI_PREAMBLE
# NGCGUI_SUBFILE
# NGCGUI_POSTAMBLE
# NGCGUI_OPTIONS
# nonew disallow new tabs
# noremove disallow removal of tabs
# noauto don't automatically send result file
# noexpand (ngcgui used, not supported pyngcgui)
# nom2 (no m2 terminator (use %))
# 2) To make pyngcgui embedded fit in small screen:
# Try:
# max_parms=10|20|30 (will reject otherwise valid subfiles)
# image_width=240
# reduce subroutine parm name lengths and/or comment string length
#------------------------------------------------------------------------------
# Copyright: 2013-4
# Author: Dewey Garrett <dgarrett@panix.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#------------------------------------------------------------------------------
""" python classes to implement an ngcgui-like application
These ini file items are compatible with ngcgui.tcl:
[DISPLAY]NGCGUI_PREAMBLE single specifier
[DISPLAY]NGCGUI_POSTAMBLE single specifier
[DISPLAY]NGCGUI_SUBFILE multiples allowed, use "" for Custom tab
[DISPLAY]NGCGUI_OPTIONS
noremove disallow tabpage removal
nonew disallow tabpage creation
noiframe don't show image in tabpage
noauto don't automatically send result file
[DISPLAY]PROGRAM_PREFIX subroutine path: start
[RS274NGC]SUBROUTINE_PATH subroutine path: middle
[WIZARD]WIZARD_ROOT subroutine path: end
[DISPLAY]NGCGUI_FONT not used
[DISPLAY]TKPKG not applicable
"""
from types import * # IntType etc
import os
import sys
import re
import gtk
import getopt
import datetime
import subprocess
import linuxcnc
import hashlib
import gobject
import glob
import shutil
import popupkeyboard
import exceptions # for debug printing
import traceback # for debug printing
import hal # notused except for debug
from gladevcp import hal_actions
g_ui_dir = linuxcnc.SHARE + "/linuxcnc"
# determine if glade interface designer is running
# in order to prevent connection of most signals
g_is_glade = False
if ( ('glade' in sys.argv[0])
and ('gladevcp' not in sys.argv[0])):
for d in os.environ['PATH'].split(':'):
f = os.path.join(d,sys.argv[0])
if ( os.path.isfile(f)
and os.access(f, os.X_OK)):
g_is_glade = True
break
g_alive = not g_is_glade
import gettext
LOCALEDIR = linuxcnc.SHARE + "/locale"
gettext.install("linuxcnc", localedir=LOCALEDIR, unicode=True)
try:
import pygtk
pygtk.require('2.0')
except ImportError,msg:
print('import pygtk failed: %s',msg)
pass
#------------------------------------------------------------------------------
g_debug = False
g_verbose = False
g_nom2 = False # True for no m2 terminator (use %)
g_strict = False # enforce additional subfile formatting requirements
g_tmode = 0 # for development
g_entry_height = 20 # default parm entry height
# (override for popupkeyboard)
g_big_height = 35 # increased parm entry height value
g_image_width = 320 # image size
g_image_height = 240 # image size
g_check_interval = 2 # periodic check (seconds)
g_label_id = 0 # subroutine labels modifier when expanding in place
g_progname = os.path.splitext(os.path.basename(__file__))[0]
g_dtfmt = "%y%m%d:%H.%M.%S"
g_stat = None # linuxcnc.stat object
g_popkbd = None # PopupKeyboard object
g_candidate_files = None # CandidateFiles object
g_send_function = None # function object f(fname) return True for success
g_tab_controls_loc ='top' # 'top' | 'bottom'
g_keyboardfile = os.path.join(g_ui_dir,'popupkeyboard.ui')
g_control_font = None
g_font_users = []
g_auto_file_ct = 1
INTERP_SUB_PARAMS = 30 # (1-based) conform to:
# src/emc/rs274ngc/interp_internal.hh:#define INTERP_SUB_PARAMS 30
g_max_parm = INTERP_SUB_PARAMS
g_max_msg_len = 500 # limit popup msg len for errant gcmc input
g_gcmc_exe = None
g_gcmc_funcname = 'tmpgcmc'
g_gcmc_id = 0
black_color = gtk.gdk.color_parse('black')
white_color = gtk.gdk.color_parse('white')
error_color = gtk.gdk.color_parse('red')
green_color = gtk.gdk.color_parse('green')
blue_color = gtk.gdk.color_parse('blue')
yellow_color = gtk.gdk.color_parse('yellow')
purple_color = gtk.gdk.color_parse('purple')
feature_color = gtk.gdk.color_parse('lightslategray')
label_normal_color = gtk.gdk.color_parse('lightsteelblue2')
label_active_color = gtk.gdk.color_parse('ivory2')
base_entry_color = gtk.gdk.color_parse('azure1')
fg_created_color = gtk.gdk.color_parse('palegreen')
fg_multiple_color = gtk.gdk.color_parse('cyan')
fg_normal_color = black_color
bg_dvalue_color = gtk.gdk.color_parse('darkseagreen2')
#------------------------------------------------------------------------------
def exception_show(ename,detail,src=''):
print('\n%s:' % src )
print('Exception: %s' % ename )
print(' detail: %s' % detail )
if type(detail) == exceptions.ValueError:
for x in detail:
if type(x) in (StringType, UnicodeType):
print('detail(s):',x)
else:
for y in x:
print('detail(d):',y,)
elif type(detail) == StringType:
print('detail(s):',detail)
elif type(detail) == ListType:
for x in detail:
print('detail(l):',x)
else:
print(ename,detail)
if g_debug:
#print(sys.exc_info())
print( traceback.format_exc())
def save_a_copy(fname,archive_dir='/tmp/old_ngc'):
if fname is None:
return
try:
if not os.path.exists(archive_dir):
os.mkdir(archive_dir)
shutil.copyfile(fname
,os.path.join(archive_dir,dt() + '_' + os.path.basename(fname)))
except IOError,msg:
print(_('save_a_copy: IOError copying file to %s') % archive_dir)
print(msg)
except Exception, detail:
exception_show(Exception,detail,src='save_a_copy')
print(traceback.format_exc())
sys.exit(1)
def get_linuxcnc_ini_file():
ps = subprocess.Popen('ps -C linuxcncsvr --no-header -o args'.split(),
stdout=subprocess.PIPE
)
p,e = ps.communicate()
if ps.returncode:
print(_('get_linuxcnc_ini_file: stdout= %s') % p)
print(_('get_linuxcnc_ini_file: stderr= %s') % e)
return None
ans = p.split()[p.split().index('-ini')+1]
return ans
def dummy_send(filename):
return False # always fail
def default_send(filename):
import gladevcp.hal_filechooser
try:
s = linuxcnc.stat().poll()
except:
user_message(mtype=gtk.MESSAGE_ERROR
,title=_('linuxcnc not running')
,msg = _('cannot send, linuxcnc not running'))
return False
try:
fchooser = gladevcp.hal_filechooser.EMC_Action_Open()
fchooser._hal_init()
fchooser._load_file(filename)
return True
except:
return False
def send_to_axis(filename): # return True for success
# NB: file with errors may hang in axis gui
s = subprocess.Popen(['axis-remote',filename]
,stdout=subprocess.PIPE
,stderr=subprocess.PIPE
)
p,e = s.communicate()
if s.returncode:
print(_('%s:send_to_axis: stdout= %s') % (g_progname,p))
print(_('%s:send_to_axis: stderr= %s') % (g_progname,e))
return False
if p: print(_('%s:send_to_axis: stdout= %s') % (g_progname,p))
if e: print(_('%s:send_to_axis: stderr= %s') % (g_progname,e))
return True
def file_save(fname,title_message='Save File'):
start_dir = os.path.dirname(fname)
if start_dir == '': start_dir = os.path.curdir
fc = gtk.FileChooserDialog(title=title_message
,parent=None
,action=gtk.FILE_CHOOSER_ACTION_SAVE
,buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL
,gtk.STOCK_OK, gtk.RESPONSE_OK
)
,backend=None
)
fc.set_current_folder(start_dir)
fc.set_do_overwrite_confirmation(True)
filter = gtk.FileFilter()
filter.set_name('NGC files')
filter.add_pattern('*.ngc')
filter.add_pattern('*.NGC')
filter.add_pattern('*.nc')
filter.add_pattern('*.NC')
filter.add_pattern('*.gcmc')
filter.add_pattern('*.GCMC')
fc.add_filter(filter)
fc.set_current_name(os.path.basename(fname)) # suggest name (for save)
fname = None
ans = fc.run()
if ans == gtk.RESPONSE_OK:
fname = fc.get_filename()
elif ans == gtk.RESPONSE_CANCEL:
print(_('file_save:canceled'))
elif ans == gtk.RESPONSE_DELETE_EVENT: # window close
print(_('file_save:window closed'))
else:
raise IOError,_('file_save:unexpected')
fc.destroy()
return(fname)
def is_comment(s):
if s[0] == ';': return bool(1) # ;xxx
elif s[0] == '(':
if s[-2] == ')': return bool(1) # (yyy)
else: return bool(1) # (yyy)zzz maybe bogus
return bool(0)
def get_info_item(line):
# expect line as unaltered line with whitespace
l = line.translate(None,' \t').lower()
r = re.search(r'^\(info:(.*)\)',l)
if r:
r = re.search(r'.*info:(.*)\)',line)
if r: return r.group(1)
return None
def check_sub_start(s):
r = re.search(r'^o<(.*)>sub.*',s)
if r:
#print('check_sub_start:g0:',r.group(0))
#print('check_sub_start:g1:',r.group(1))
return r.group(1)
return None
def check_sub_end(s):
r = re.search(r'^o<(.*)>endsub.*',s)
if r:
#print('check_sub_end:g0:',r.group(0))
#print('check_sub_end:g1:',r.group(1))
return r.group(1)
return None
def check_for_label(s):
r = re.search(r'^o<(.*?)> *(sub|endsub).*',s)
if r:
return 'ignoreme' # do not include on expand
r = re.search(r'^o<(.*?)> *(call).*',s)
if r:
return None # do not mod label on expand
r = re.search(r'^o<(.*?)>.*',s)
if r:
return r.group(1) # make label unique on expand
return None
def check_positional_parm_range(s,min,max):
r = re.search(r'#([0-9]+)',s)
if r: pnum = int(r.group(1))
# here check is against system limit; g_max_parm applied elsewhere
if r and (pnum <= INTERP_SUB_PARAMS):
if pnum < min: min = pnum
if pnum > max: max = pnum
return pnum,min,max
return None,None,None
def find_positional_parms(s):
# requires original line (mixed case with whitespace)
# find special association lines for positional parameters
# The '*', '+', and '?' qualifiers are all greedy.
# Greedy <.*> matches all of <H1>title</H1>
# NonGreedy <.*?> matches the only first <H1>
# case1 #<parmname>=#n (=defaultvalue comment_text)
# case2 #<parmname>=#n (=defaultvalue)
# case3 #<parmname>=#n (comment_text)
# case4 #<parmname>=#n
name = None
pnum = None
dvalue = None
comment = None
s = s.expandtabs() # tabs to spaces
r = re.search(
r' *# *<([a-z0-9_-]+)> *= *#([0-9]+) *\(= *([0-9.+-]+[0-9.]*?) *(.*)\)'
,s,re.I)
#case1 1name 2pnum 3dvalue 4comment
if r is None: r=re.search(
r' *# *<([a-z0-9_-]+)> *= *#([0-9]+) *\( *([0-9.+-]+)\)',s,re.I)
#case2 1name 2pnum 3dvalue
if r is None: r=re.search(
r' *# *<([a-z0-9_-]+)> *= *#([0-9]+) *\((.*)\)',s,re.I)
#case3 1name 2pnum 3comment
if r is None: r=re.search(
r' *# *<([a-z0-9_-]+)> *= *#([0-9]+) *$',s,re.I)
#case4 1name 2pnum
# if r:
# for i in range(0,1+len(r.groups())):
# print('PARSE groups',len(r.groups()),i,r.group(i))
if r:
n = len(r.groups())
if r and n >= 2:
name = comment = r.group(1) # use name as comment if not specified
pnum = int(r.group(2))
# here check is against system limit; g_max_parm applied elsewhere
if pnum > INTERP_SUB_PARAMS:
return None,None,None,None
if n == 3:
if r.group(3)[0] == '=': dvalue = r.group(3)[1:]
else: comment = r.group(3)[:]
if n == 4:
dvalue = r.group(3)
if dvalue.find('.') >= 0:
dvalue = float(dvalue)
else:
dvalue = int(dvalue)
if r.group(4): comment = r.group(4)
if n > 4:
print('find_positional_parametrs unexpected n>4',s,)
comment = r.group(4)
if comment is None:
print('find_positional_parameters:NOCOMMENT') # can't happen
comment = ''
return name,pnum,dvalue,comment
def user_message(title=""
,mtype=gtk.MESSAGE_INFO
,flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT
,msg=None):
if msg is None: return(None)
if type(msg) == ListType:
txt = "".join(msg)
else:
txt = msg
vprint('USER_MESSAGE:\n%s' % txt)
popup = gtk.MessageDialog(parent = None
,flags=flags
,type=mtype
,buttons = gtk.BUTTONS_OK
,message_format = txt
)
popup.set_title(title)
result = popup.run()
popup.destroy()
return(result)
def dt():
return(datetime.datetime.now().strftime(g_dtfmt))
def md5sum(fname):
if not fname: return None
return(hashlib.md5(open(fname, 'r').read()).hexdigest())
def find_image(fname):
found = False
for suffix in ('png','gif','jpg','pgm'):
name = os.path.splitext(os.path.basename(fname))[0]
dir = os.path.dirname(fname)
ifile = os.path.join(dir,name + '.' + suffix)
if os.path.isfile(ifile):
found = True
break
if not found: return None
return ifile
def sized_image(ifile):
twidth = g_image_width
theight = g_image_height
img = gtk.Image()
img.set_from_file(ifile)
pixbuf = img.get_pixbuf()
iwidth = pixbuf.get_width() # image size
iheight = pixbuf.get_height()
scale = min(float(twidth)/iwidth, float(theight)/iheight)
#print('iw,ih %d,%d tw,th=%d,%d, scale=%f' % (
# iwidth,iheight,twidth,theight,scale))
new_width = int(scale*iwidth)
new_height = int(scale*iheight)
pixbuf = pixbuf.scale_simple(new_width,new_height
,gtk.gdk.INTERP_BILINEAR)
img.set_from_pixbuf(pixbuf)
return(img)
def show_dir(x,tag=''):
l = []
for name in sorted(dir(x)):
if name[0:2] == '__': continue
item = getattr(x,name)
ty = type(item)
if ty == MethodType:
l.append('%-8s %s()' % ('0 Meth',name))
elif ty == ListType:
i = 0
for v in item:
try:
vnonewline = v[:-1] if v.endswith('\n') else v
l.append('%-8s %s[%2s] = %s' % ('2 List',name,i,vnonewline))
i += 1
except:
l.append('xxx %s %s' % (name,str(item)))
elif ty == DictionaryType:
for k in sorted(item):
l.append('%-8s %s[%2s] = %s' % ('3 Dict',name,k,item[k]))
elif ty == BooleanType:
l.append('%-8s %s = %s' % ('4 Bool',name,str(item)))
elif ty == IntType:
l.append('%-8s %s = %s' % ('5 Int',name,str(item)))
elif ty == FloatType:
l.append('%-8s %s = %s' % ('6 Float',name,str(item)))
elif ty == StringType:
l.append('%-8s %s = %s' % ('7 Str',name,item))
else:
s = str(item).split(' ')[0] + '>'
s=item
l.append('%-8s %s = %s' % ('1 Obj',name,s))
print('\n')
print('%s----------------------------------------------------------' % tag)
for i in sorted(l):
print(i)
print('%s==========================================================' % tag)
def dprint(txt):
if g_debug:
print(':' + txt)
def vprint(txt):
if g_verbose:
print('::' + txt)
def spath_from_inifile(fname):
if not fname:
return []
ini = linuxcnc.ini(fname)
homedir = os.path.dirname(os.path.realpath(fname))
# http://www.linuxcnc.org/docs/devel/html/config/ini_config.html
l = []
p = ini.find('DISPLAY','PROGRAM_PREFIX')
if p:
l = [p]
p = ini.find('RS274NGC','SUBROUTINE_PATH')
if p:
newdirs = p.split(':')
for dir in newdirs:
# dont add duplicates
if dir not in l:
l.append(dir)
p = ini.find('WIZARD','WIZARD_ROOT')
if p:
l.extend(p.split(':'))
lfull = []
for d in l:
d = os.path.expanduser(d)
if os.path.isabs(d):
lfull.append(d)
else:
# relative path implies cwd is correct
d2 = os.path.join(homedir,d)
lfull.append(os.path.abspath(d2))
if lfull:
return lfull
return []
def mpath_from_inifile(fname):
if not fname:
return None
ini = linuxcnc.ini(ifname)
homedir = os.path.dirname(os.path.abspath(fname))
l = []
p = ini.find('DISPLAY','PROGRAM_PREFIX')
if p:
l = [p]
else:
l = 'nc_files'
p = ini.find('RS274NGC','USER_M_PATH')
if p:
l.extend(p.split(':'))
lfull = []
for d in l:
if os.path.isabs(d):
lfull.append(d)
else:
d2 = os.path.join(homedir,d)
lfull.append(os.path.abspath(d2))
if lfull:
return lfull
return None
def spath_from_files(pre_file,sub_files,pst_file):
# when there is no ini file for path because
# linuxcnc not running
# and
# no ini specifed on cmd line
l = []
slist = []
if type(sub_files) == StringType and sub_files:
slist.append(sub_files)
else:
slist = sub_files
for sub_file in slist:
dir = os.path.dirname(os.path.abspath(sub_file))
if dir not in l:
l.append(dir)
if pre_file:
dir = os.path.dirname(os.path.abspath(pre_file))
if dir not in l:
l.append(dir)
if pst_file:
dir = os.path.dirname(os.path.abspath(pst_file))
if dir not in l:
l.append(dir)
if l:
return l
return []
def long_name(name):
if name == 'pre':
return 'Preamble'
elif name == 'sub':
return 'Subroutine'
elif name == 'pst':
return 'Postamble'
else:
return 'Unknown'
def show_parent(w,ct=0):
if w is None:
print('show_parent: None')
return
print('show_parent:',ct,w)
if w.is_toplevel():
print('TOP\n')
return
else:
show_parent(w.get_parent(),ct+1)
def all_coords(iterable):
ans = ''
for t in iterable:
ans = ans + '%7.3f' % t
return ans
def show_position():
g_stat.poll()
print('POSITION-----------------------------------------------------')
print(' ap',all_coords(g_stat.actual_position))
print(' p',all_coords(g_stat.position))
l = []
p = g_stat.actual_position
for i in range(9): l.append(p[i]
- g_stat.g5x_offset[i]
- g_stat.tool_offset[i]
)
print('offset ap',all_coords(l))
l = []
p = g_stat.position
for i in range(9): l.append(p[i]
- g_stat.g5x_offset[i]
- g_stat.tool_offset[i]
)
print('offset p',all_coords(l))
print('POSITION=====================================================')
def coord_value(char):
# offset calc from emc_interface.py (touchy et al)
# char = 'x' | 'y' | ...
# 'd' is for diameter
c = char.lower()
g_stat.poll()
p = g_stat.position # tuple: (xvalue, yvalue, ...
if (c == 'd'):
if (1 & g_stat.axis_mask):
# diam = 2 * x
return (p[0] - g_stat.g5x_offset[0] - g_stat.tool_offset[0])* 2
else:
return 'xxx' # return a string that will convert with float()
axno = 'xyzabcuvw'.find(c)
if not ( (1 << axno) & g_stat.axis_mask ):
return 'xxx' # return a string that will convert with float()
return p[axno] - g_stat.g5x_offset[axno] - g_stat.tool_offset[axno]
def make_g_styles():
dummylabel = gtk.Label()
global g_lbl_style_default
g_lbl_style_default = dummylabel.get_style().copy()
g_lbl_style_default.bg[gtk.STATE_NORMAL] = label_normal_color
g_lbl_style_default.bg[gtk.STATE_ACTIVE] = label_active_color
global g_lbl_style_created
g_lbl_style_created = dummylabel.get_style().copy()
global g_lbl_style_multiple
g_lbl_style_multiple = dummylabel.get_style().copy()
g_lbl_style_multiple.bg[gtk.STATE_NORMAL] = feature_color
g_lbl_style_multiple.bg[gtk.STATE_ACTIVE] = feature_color
g_lbl_style_created.bg[gtk.STATE_NORMAL] = feature_color
g_lbl_style_created.bg[gtk.STATE_ACTIVE] = feature_color
del dummylabel
dummyentry = gtk.Entry()
global g_ent_style_normal
g_ent_style_normal = dummyentry.get_style().copy()
global g_ent_style_default
g_ent_style_default = dummyentry.get_style().copy()
global g_ent_style_error
g_ent_style_error = dummyentry.get_style().copy()
g_ent_style_normal.base[gtk.STATE_NORMAL] = base_entry_color
g_ent_style_default.base[gtk.STATE_NORMAL] = bg_dvalue_color
g_ent_style_error.text[gtk.STATE_NORMAL] = error_color
g_ent_style_error.base[gtk.STATE_NORMAL] = base_entry_color
del dummyentry
def mod_font_by_category(obj,mode='control'):
# currently mode = control (only)
# touchy has 4 font categories: control,dro,error,listing
if mode == 'control':
font = g_control_font
else:
print('mod_font_by_category:unknown mode %s' % mode)
return
targetobj = None
if type(obj) == type(gtk.Label()):
targetobj = obj
elif type(obj) == type(gtk.Entry()):
targetobj = obj
elif type(obj) == type(gtk.Button()):
#gtk.Alignment object
if isinstance(obj.child, gtk.Label):
targetobj = obj.child
elif isinstance(obj.child, gtk.Alignment):
pass
elif hasattr(obj,'modify_font'):
targetobj = obj
else:
raise ValueError,'mod_font_by_category: no child'
return
else:
raise ValueError,'mod_font_by_category: unsupported:',type(obj)
return
if targetobj is None:
return
if font is None:
#print('mod_font_by_category:nofont available for %s' % mode)
return # silently
targetobj.modify_font(g_control_font)
global g_font_users
if targetobj not in g_font_users:
g_font_users.append(targetobj)
def update_fonts(fontname):
global g_control_font
g_control_font = fontname
for obj in g_font_users:
mod_font_by_category(obj)
def clean_tmpgcmc(odir):
if odir == "":
odir = g_searchpath[0]
savedir = os.path.join("/tmp", g_gcmc_funcname) # typ /tmp/tmpgcmc
if not os.path.isdir(savedir):
os.mkdir(savedir,0755)
for f in glob.glob(os.path.join(odir,g_gcmc_funcname + "*.ngc")):
# rename ng across file systems
shutil.move(f,os.path.join(savedir,os.path.basename(f)))
def find_gcmc():
global g_gcmc_exe # find on first request
if g_gcmc_exe == "NOTFOUND": return False # earlier search failed
if g_gcmc_exe is not None: return True # already found
for dir in os.environ["PATH"].split(os.pathsep):
exe = os.path.join(dir,'gcmc')
if os.path.isfile(exe):
if os.access(exe,os.X_OK):
clean_tmpgcmc("") # clean on first find_gcmc
g_gcmc_exe = exe
return True # success
g_gcmc_exe = "NOTFOUND"
user_message(mtype=gtk.MESSAGE_ERROR
,title=_('Error for:')
,msg = _('gcmc executable not available:'
+ '\nCheck path and permissions'))
return False # fail
#-----------------------------------------------------------------------------
make_g_styles()
class CandidateDialog():
"""CandidateDialog: dialog with a treeview in a scrollwindow"""
def __init__(self,ftype=''):
self.ftype = ftype
lname = long_name(self.ftype)
title = "Choose %s file" % lname
btns=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT
,gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)
if ( (self.ftype == 'pre') or (self.ftype == 'pst') ):
# RESPONSE_NO used to allow 'nofile' for 'pre','pst'
btns = btns + ('No %s File' % lname, gtk.RESPONSE_NO)
self.fdialog = gtk.Dialog(title=title
,parent=None
,flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT
,buttons=btns
)
self.fdialog.set_size_request(600,600)
scrollw = gtk.ScrolledWindow()
scrollw.set_border_width(5)
scrollw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
scrollw.show()
box = self.fdialog.get_content_area()
box.pack_start(scrollw, True, True, 0)
global g_candidate_files
self.canfiles = g_candidate_files
self.canfiles.refresh()
self.treestore = g_candidate_files.treestore
self.treeview = gtk.TreeView(self.treestore)
if g_alive: self.treeview.connect('row-activated',self.row_activated)
column0 = gtk.TreeViewColumn('Subroutine Directories')
self.treeview.append_column(column0)
cell0 = gtk.CellRendererText()
column0.pack_start(cell0, True)
column0.add_attribute(cell0, 'text', 0)
column1 = gtk.TreeViewColumn('Hint')
self.treeview.append_column(column1)
cell1 = gtk.CellRendererText()
column1.pack_start(cell1, True)
column1.add_attribute(cell1, 'text', 1)
column2 = gtk.TreeViewColumn('mtime')
self.treeview.append_column(column2)
cell2 = gtk.CellRendererText()
column2.pack_start(cell2, True)
column2.add_attribute(cell2, 'text', 2)
scrollw.add_with_viewport(self.treeview)
scrollw.show_all()
def get_file_result(self):
# return: (name,errmsg)
try:
(model,iter) = self.treeview.get_selection().get_selected()
except AttributeError:
return(None,'') # nothing selected
if not iter:
return(None,'')
fname,status,mtime = self.canfiles.get_tree_data(iter)
if os.path.isdir(fname):
return(None,'') # cannot use a selected dir
ok = True # contradict this
if (self.ftype == 'pre') or (self.ftype == 'pst'):
if status.find('not_a_subfile') >= 0: ok = True
if status.find('Preempted') >= 0: ok = False
else:
if status.find('not_a_subfile') >= 0: ok = False
if status.find('not_allowed') >= 0: ok = False
if status.find('Preempted') >= 0: ok = False
if ok:
return (fname,'')
emsg = (_('The selected file is not usable\n'
'as a %s file\n'
'(%s)') % (long_name(self.ftype),status)
)
return('TRYAGAIN',emsg)
def row_activated(self,tview,iter,column):
self.fdialog.response(gtk.RESPONSE_ACCEPT)
pass
def run(self):
return(self.fdialog.run())
def destroy(self):
self.fdialog.destroy()
class CandidateFiles():
"""CandidateFiles treestore for candidate files"""
def __init__(self,dirlist):
self.dirlist=dirlist
self.treestore = gtk.TreeStore(str,str,str)
self.tdict = {}
self.make_tree()
def refresh(self):
# currently, just do over
# potential to reread only files with modified mtimes
self.__init__(self.dirlist)
def make_tree(self):
didx = 0
flist = []
for dir in self.dirlist:
self.tdict[didx,] = dir
# row must be a tuple or list containing as many items
# as the number of columns
try:
mtime = datetime.datetime.fromtimestamp(os.path.getmtime(dir))
except OSError,detail:
print(_('%s:make_tree:%s' % (g_progname,detail) ))
continue # try to skip this dir with message
mtime = mtime.strftime(g_dtfmt) # truncate fractional seconds
iter = self.treestore.append(None, [dir,"Directory",mtime])
fidx = 0
for f in ( sorted(glob.glob(os.path.join(dir,"*.ngc")))
+ sorted(glob.glob(os.path.join(dir,"*.NGC")))
+ sorted(glob.glob(os.path.join(dir,"*.gcmc")))
+ sorted(glob.glob(os.path.join(dir,"*.GCMC")))
):
fname = os.path.basename(f)
self.tdict[didx,fidx] = fname
stat = ""
fd = open(f)
ftxt = fd.read()
fd.close()
if os.path.splitext(fname)[-1] in ['.gcmc','.GCMC']:
stat = '%sgcmc:ok' % stat
if ftxt.find('not_a_subfile') >= 0:
stat = '%snot_a_subfile ' % stat
if ftxt.find('(info:') >= 0:
stat = '%sngcgui-ok ' % stat
if fname in flist:
stat = '%sPreempted ' % stat
if ftxt.find('FEATURE') >= 0:
stat = '%snot_allowed ' % stat
if stat == "":
stat = "?"
if stat.find("Preempted") >= 0:
stat = "Preempted" # suppress ok
flist.append(fname)
mtime = datetime.datetime.fromtimestamp(os.path.getmtime(f))
mtime = mtime.strftime(g_dtfmt) # truncate fractional seconds
self.treestore.append(iter, [fname,stat,mtime])
fidx += 1
didx += 1
def get_tree_data(self,iter):
path = self.treestore.get_path(iter)
if len(path) > 1:
row,col = path
dir = self.tdict[row,]
fname = self.treestore.get_value(iter,0)
status = self.treestore.get_value(iter,1)
mtime = self.treestore.get_value(iter,2)
else:
dir = self.tdict[path]
fname = ''
status = ''
mtime = ''
return os.path.join(dir,fname),status,mtime
class LinuxcncInterface():
"""LinuxcncInterface: ini file and running linuxcnc data"""
def __init__(self,cmdline_ini_file=''):
self.lrunning = False
self.ini_data = None
self.subroutine_path = []
self.user_m_path = None
self.ini_file = None
self.ngcgui_options = []
self.editor = os.environ.get("VISUAL")
use_ini_file = None
l_ini_file = ''
stat = linuxcnc.stat()
try:
global g_stat
g_stat = linuxcnc.stat()
g_stat.poll() # poll faults if linuxcnc not running
self.lrunning = True
l_ini_file = get_linuxcnc_ini_file()
except linuxcnc.error,msg:
g_stat = None
print('INTFC:err:',msg)
print('INTFC:' + _('Warning: linuxcnc not running'))
print('%s:INTFC:linuxcnc running=%d' % (g_progname,self.lrunning))
print('%s:INTFC:ini_file=<%s>' % (g_progname,l_ini_file))
# cmdline_ini_file can be specified on cmdline and from intfc:
# if neither ok: if no cmdline subfile, make custom page
# if cmdonly ok
# if runonly ok
# if both ok: warn message and continue
if cmdline_ini_file:
cmdline_spath = spath_from_inifile(cmdline_ini_file)
if l_ini_file:
l_spath = spath_from_inifile(l_ini_file)
if not cmdline_ini_file and not l_ini_file:
ini_file = None
spath = []
#print('NEITHER')
if not cmdline_ini_file and l_ini_file:
ini_file = l_ini_file
spath = l_spath
#print("OK running only <,",cmdline_ini_file,l_ini_file,">")
if cmdline_ini_file and not l_ini_file:
ini_file = cmdline_ini_file
spath = cmdline_spath
#print('OK cmdline only')
if cmdline_ini_file and l_ini_file:
#print("BOTH ini file on both cmdline and running linuxcnc")
msg = ""
if os.path.abspath(cmdline_ini_file) != l_ini_file:
ini_file = l_ini_file
msg = (_('The ini file specified on cmdline') + ':\n'
+ os.path.abspath(cmdline_ini_file) + '\n\n'
+ _('is different from the one used by the running linuxcnc')
+ ':\n'
+ l_ini_file + '\n\n'
)
if cmdline_spath == l_spath:
ini_file = cmdline_ini_file
spath = cmdline_spath
msg = msg + _('Using cmd line ini file (same paths)')
else:
ini_file = l_ini_file
spath = l_spath
msg = msg + _('Ignoring cmd line ini file (different paths)')
user_message(mtype=gtk.MESSAGE_WARNING
,title=_('Warning')
,msg=msg
)
if ini_file:
self.ini_file = ini_file
self.ini_data = linuxcnc.ini(self.ini_file)
# get it again to avoid (unlikely) race
self.subroutine_path = spath_from_inifile(ini_file)
self.ngcgui_options = self.ini_data.find('DISPLAY','NGCGUI_OPTIONS')
self.editor = ( self.editor
or self.ini_data.find('DISPLAY','EDITOR'))
# create at startup, refresh as required
global g_candidate_files
g_candidate_files = CandidateFiles(self.get_subroutine_path())
def addto_spath(self,pathtoadd):
if type(pathtoadd) != ListType:
raise ValueError,(
'addto_spath: List required not: %s %s'
% (pathtoadd,type(pathtoadd))
)
# dont add duplicates
if pathtoadd not in self.subroutine_path:
self.subroutine_path.extend(pathtoadd)
def get_editor(self):
return self.editor or 'gedit'
def get_ini_file(self):
return(self.ini_file)
def get_subroutine_path(self):
return(self.subroutine_path)
def get_user_m_path(self):
return(self.user_m_path)
def find_file_in_path(self,fname):
# return tuple:
# '', 'NULLFILE' if fname None or ''
# fname, 'NOPATH' no path defined (eg no inifile)
# foundfilename, 'FOUND' found in path
# fname, 'NOTFOUND' not in path (may exist)
if not fname:
return('','NULLFILE')
if not self.subroutine_path:
return(fname,'NOPATH')
bname = os.path.basename(fname) # only basename used
foundlist = []
foundfilename = None
for p in self.subroutine_path:
f = os.path.join(p,bname)
if os.path.isfile(f):
if not foundfilename:
foundfilename = f #first one wins
foundlist.append(f)
if len(foundlist) > 1:
print(_('find_file_in_path:Multiple Results: %s') % foundlist)
print(_(' Search path: %s') % self.subroutine_path)
if foundfilename:
vprint('find_file_in_path:%s' % foundfilename)
return(foundfilename,'FOUND')
print('find_file_in_path<%s> NOTFOUND' % fname)
return(fname,'NOTFOUND')
def get_subfiles(self):
if self.ini_data:
#returns list
return(self.ini_data.findall('DISPLAY','NGCGUI_SUBFILE'))
else:
return(None)
def get_preamble(self):
if self.ini_data:
return(self.ini_data.find('DISPLAY','NGCGUI_PREAMBLE'))
else:
return(None)
def get_postamble(self):
if self.ini_data:
return(self.ini_data.find('DISPLAY','NGCGUI_POSTAMBLE'))
else:
return(None)
def get_font(self):
if self.ini_data:
return(self.ini_data.find('DISPLAY','NGCGUI_FONT'))
else:
return(None)
def get_ngcgui_options(self):
return(self.ngcgui_options or [])
def get_gcmc_include_path(self):
dirs = (self.ini_data.find('DISPLAY','GCMC_INCLUDE_PATH'))
return(dirs)
def get_program_prefix(self):
if self.ini_data:
dir = self.ini_data.find('DISPLAY','PROGRAM_PREFIX')
dir = os.path.expanduser(dir)
if not os.path.isabs(dir):
# relative, base on inidir
dir = os.path.join(os.path.dirname(self.ini_file),dir)
return(dir)
else:
return(None)
class PreFile():
"""PreFile: preamble file data"""
def __init__(self,thefile):
self.pre_file = thefile
self.read()
def clear(self):
self.pre_file = ''
self.inputlines=[]
def read(self):
#print('PreFile read')
self.md5 = None
self.mtime = None
self.inputlines = []
if self.pre_file == "": return
self.mtime = os.path.getmtime(self.pre_file)
f = open(self.pre_file)
for l in f.readlines():
# dont include not_a_subfile lines
if (l.find('not_a_subfile') < 0) and (l.strip() != ''):
self.inputlines.append(l)
f.close()
self.md5 = md5sum(self.pre_file)
class PstFile():
"""PstFile: postamble file data"""
def __init__(self,thefile):
self.pst_file = thefile
self.read()
def clear(self):
self.pst_file = ''
self.inputlines = []
def read(self):
#print('PstFile read')
self.md5 = None
self.mtime = None
self.inputlines = []
if self.pst_file == "": return
self.mtime = os.path.getmtime(self.pst_file)
f = open(self.pst_file)
for l in f.readlines():
# dont include not_a_subfile lines
if (l.find('not_a_subfile') < 0) and (l.strip() != ''):
self.inputlines.append(l)
f.close()
self.md5 = md5sum(self.pst_file)
class SubFile():
"""SubFile: subfile data"""
def __init__(self,thefile):
self.sub_file = thefile
self.min_num = sys.maxint
self.max_num = 0
self.pdict = {} # named items: pdict[keyword] = value
self.ndict = {} # ordinal items: ndict[idx] = (name,dvalue,comment)
self.ldict = {} # label items: ldict[lno] = thelabel
self.pdict['info'] = ''
self.pdict['lastparm'] = 0
self.pdict['subname'] = ''
self.inputlines = []
self.errlist=[]
self.md5 = None
self.mtime = None
if self.sub_file == '': return
self.mtime = os.path.getmtime(self.sub_file)
self.md5 = md5sum(self.sub_file)
if os.path.splitext(self.sub_file)[-1] in ['.ngc','.NGC','.nc','.NC']:
self.read_ngc()
elif os.path.splitext(self.sub_file)[-1] in ['.gcmc','.GCMC']:
self.read_gcmc()
else:
user_message(mtype=gtk.MESSAGE_ERROR
,title=_('Unknown file suffix')
,msg = _('Unknown suffix for: %s:')
% os.path.basename(self.sub_file)
)
return
def clear(self):
self.sub_file = ''
self.pdict = {}
self.ndict = {}
self.ldict = {}
self.inputlines = []
def flagerror(self,e):
# accumulate erors from read() so entire file can be processed
self.errlist.append(e)
def specialcomments_ngc(self,s):
if s.find(' FEATURE ') >= 0 :
self.flagerror(
"Disallowed use of ngcgui generated file as Subfile")
if s.find('not_a_subfile') >= 0 :
self.flagerror(
"marked (not_a_subfile)\nNot intended for use as a subfile")
def re_read(self):
if self.pdict.has_key('isgcmc'):
self.read_gcmc()
else:
self.read_ngc()
def read_ngc(self):
thesubname = os.path.splitext(os.path.basename(self.sub_file))[0]
f = open(self.sub_file)
self.inputlines = [] # in case rereading
for l in f.readlines():
self.specialcomments_ngc(l) # for compat, check on unaltered line
self.inputlines.append(l)
idx = 1 # 1 based for labels ldict
nextparm = 0
subname = None
endsubname = None
for line in self.inputlines:
# rs274: no whitespace, simplify with lowercase
info = get_info_item(line) # check on unaltered line
l = line.translate(None,' \t').lower()
lineiscomment = is_comment(l)
if info is not None: self.pdict['info'] = info
sname = check_sub_start(l)
if subname is not None and sname is not None:
self.flagerror("Multiple subroutines in file not allowed")
if subname is None and sname is not None:
subname = sname
if subname is not None and subname != thesubname:
self.flagerror("sub label "
"%s does not match subroutine file name" % thesubname)
if endsubname is not None:
if lineiscomment or (l.strip() == ''):
pass
elif l.find('m2') >= 0:
# linuxcnc ignores m2 after endsub in
# single-file subroutines
# mark as ignored here for use with expandsub option
self.inputlines[-1] = (';' + g_progname +
' ignoring: ' + self.inputlines[-1])
pass
else:
self.flagerror('file contains lines after subend:\n'
'%s' % l)
ename = check_sub_end(l)
if subname is None and ename is not None:
self.flagerror("endsub before sub %s" % ename)
if subname is not None and ename is not None:
endsubname = ename
if endsubname != subname:
self.flagerror("endsubname different from subname")
label = check_for_label(l)
if label: self.ldict[idx] = label
if ( subname is not None
and endsubname is None
and (not lineiscomment)):
pparm,min,max= check_positional_parm_range(l
,self.min_num,self.max_num)
if pparm > g_max_parm:
self.flagerror(
_('parm #%s exceeds config limit on no. of parms= %d\n')
% (pparm,g_max_parm))
if pparm:
self.min_num = min
self.max_num = max
# blanks required for this, use line not l
name,pnum,dvalue,comment = find_positional_parms(line)
if name:
self.ndict[pnum] = (name,dvalue,comment)
# require parms in sequence to minimize user errors
nextparm = nextparm + 1
if g_strict:
if pnum != nextparm:
self.flagerror(
_('out of sequence positional parameter'
'%d expected: %d')
% (pnum, nextparm))
while pnum > nextparm:
makename = "#"+str(nextparm)
self.ndict[nextparm] = makename,"",makename
nextparm = nextparm + 1
self.pdict['lastparm'] = pnum
idx = idx + 1
f.close()
if subname is None: self.flagerror(_('no sub found in file\n'))
if endsubname is None: self.flagerror(_('no endsub found in file\n'))
if g_strict:
if nextparm == 0: self.flagerror(_('no subroutine parms found\n'))
self.pdict['subname'] = subname
if self.pdict['info'] == '':
self.pdict['info'] = 'sub: '+str(subname)
if self.errlist:
user_message(mtype=gtk.MESSAGE_ERROR
,title=_('Error for: %s ')
% os.path.basename(self.sub_file)
,msg = self.errlist)
self.errlist.append('SUBERROR')
raise ValueError,self.errlist
def read_gcmc(self):
self.gcmc_opts = [] # list of options for gcmc
pnum = 0
f = open(self.sub_file)
for l in f.readlines():
rinfo = re.search(r'^ *\/\/ *ngcgui *: *info: *(.*)' ,l)
if rinfo:
#print 'info read_gcmc:g1:',rinfo.group(1)
self.pdict['info'] = rinfo.group(1) # last one wins
continue
ropt = re.search(r'^ *\/\/ *ngcgui *: *(-.*)$' ,l)
if ropt:
gopt = ropt.group(1)
gopt = gopt.split("//")[0] ;# trailing comment
gopt = gopt.split(";")[0] ;# convenience
gopt = gopt.strip() ;# leading/trailing spaces
self.gcmc_opts.append(gopt)
continue
name = None
dvalue = None
comment = ''
r3 = re.search(r'^ *\/\/ *ngcgui *: *(.*?) *= *(.*?) *\, *(.*?) *$', l)
r2 = re.search(r'^ *\/\/ *ngcgui *: *(.*?) *= *(.*?) *$', l)
r1 = re.search(r'^ *\\/\\/ *ngcgui *: *\(.*?\) *$', l)
if r3:
name = r3.group(1)
dvalue = r3.group(2)
comment = r3.group(3)
elif r2:
name = r2.group(1)
dvalue = r2.group(2)
elif r1:
print 'r1-1 opt read_gcmc:g1:',r1.group(1)
name = r1.group(1)
if dvalue:
# this is a convenience to make it simple to edit to
# add a var without removing the semicolon
# xstart = 10;
# //ngcgui: xstart = 10;
dvalue = dvalue.split(";")[0] # ignore all past a ;
else:
dvalue = ''
if name:
if comment is '':
comment = name
pnum += 1
self.ndict[pnum] = (name,dvalue,comment)
self.pdict['isgcmc'] = True
self.pdict['lastparm'] = pnum
self.pdict['subname'] = os.path.splitext(os.path.basename(self.sub_file))[0]
if self.pdict['info'] == '':
self.pdict['info'] = 'gcmc: '+ self.pdict['subname']
f.close()
return True # ok
class FileSet():
"""FileSet: set of preamble,subfile,postamble files"""
def __init__(self,pre_file
,sub_file
,pst_file
):
# sub_file=='' is not an error, opens Custom
self.pre_data = PreFile(pre_file)
self.sub_data = SubFile(sub_file)
self.pst_data = PstFile(pst_file)
class OneParmEntry():
"""OneParmEntry: one parameter labels and entry box"""
def __init__(self,ltxt='ltxt' ,etxt='etxt' ,rtxt='rtxt'):
self.box = gtk.HBox()
self.ll = gtk.Label()
self.en = gtk.Entry()
self.lr = gtk.Label()
self.dv = None
ww = -1
hh = g_entry_height
self.ll.set_label(ltxt)
self.ll.set_width_chars(2)
self.ll.set_justify(gtk.JUSTIFY_RIGHT)
self.ll.set_alignment(xalign=.90,yalign=0.5) # right aligned
self.ll.set_size_request(ww,hh)
self.en.set_text(etxt)
self.en.set_width_chars(6)
self.en.set_alignment(xalign=.90) # right aligned
self.en.set_size_request(ww,hh)
self.en.hide()
#self.en.connect("button-press-event",self.grabit)
if g_popkbd is not None:
if g_alive: self.en.connect("button-press-event",self.popkeyboard)
if g_alive: self.en.connect('changed', self.entry_changed) #-->w + txt
self.lr.set_label(rtxt)
self.lr.set_width_chars(0) # allow any width for compat with ngcgui
self.lr.set_justify(gtk.JUSTIFY_LEFT)
self.lr.set_alignment(xalign=0.2,yalign=0.5) # left aligned
self.lr.set_size_request(ww,hh)
self.lr.hide()
mod_font_by_category(self.lr,'control')
self.tbtns = gtk.HBox(homogeneous=0,spacing=2)
self.tbtns.set_border_width(0)
self.box.pack_start(self.tbtns, expand=0, fill=0, padding=0)
self.tbtns.pack_start(self.ll, expand=0, fill=0, padding=0)
self.tbtns.pack_start(self.en, expand=0, fill=0, padding=0)
self.tbtns.pack_start(self.lr, expand=0, fill=0, padding=0)
def grabit(self,*args,**kwargs):
#print 'grabit',self,args,kwargs
print '\ngrabit:can_get_focus:',self.en.get_can_focus()
self.en.grab_focus()
print 'grabit:has_focus',self.en.has_focus()
print 'grabit: is_focus',self.en.is_focus()
def popkeyboard(self,widget,v):
origtxt = self.en.get_text()
title = '#%s, <%s> %s' % (self.ll.get_text()
,self.en.get_text()
,self.lr.get_text()
)
self.en.set_text('')
if g_popkbd.run(initial_value='',title=title):
self.en.set_text(g_popkbd.get_result())
else:
# user canceled
self.en.set_text(origtxt)
def entry_changed(self,w):
v = w.get_text().lower()
if g_stat:
r = re.search('[xyzabcuvwd]',v)
if r:
char = r.group(0)
try:
w.set_text("%.4f" % coord_value(char))
except TypeError:
pass
except Exception, detail:
exception_show(Exception,detail,'entry_changed')
pass
if v == '':
w.set_style(g_ent_style_normal)
return
else:
try:
float(v)
w.set_style(g_ent_style_normal)
except ValueError:
w.set_style(g_ent_style_error)
return
try:
if ( (self.dv is not None)
and (float(v) == float(self.dv)) ):
w.set_style(g_ent_style_default)
return
except ValueError:
pass
w.set_style(g_ent_style_normal)
return
def getentry(self):
return(self.en.get_text())
def setentry(self,v):
self.en.set_text(v)
def clear_pentry(self):
self.ll.set_text('')
self.en.set_text('')
self.lr.set_text('')
self.ll.hide()
self.en.hide()
self.lr.hide()
def make_pentry(self,ll,dvalue,lr,emode='initial'):
# modes 'initial'
# 'keep'
self.dv = dvalue
if dvalue is None:
en = ''
else:
en = dvalue
if ll is None: ll=''
if lr is None: lr=''
self.ll.set_text(str(ll))
if emode == 'initial':
self.en.set_text(str(en))
# on reread, may be new parms with no text so use default
# if (emode == 'keep') and (not self.en.get_text()):
if (emode == 'keep') and (self.en.get_text() is None):
self.en.set_text(str(en))
self.lr.set_text(str(lr))
if dvalue is None or dvalue == '':
self.en.set_style(g_ent_style_normal) # normal (not a dvalue)
else:
self.en.set_style(g_ent_style_default) # a dvalue
self.ll.show()
self.en.show()
self.lr.show()
self.entry_changed(self.en)
class EntryFields():
"""EntryFields: Positional Parameters entry fields in a frame """
def __init__(self,nparms=INTERP_SUB_PARAMS):
if nparms > g_max_parm:
raise ValueError,(_(
'EntryFields:nparms=%d g_max_parm=%d')
% (nparms,g_max_parm))
self.ebox = gtk.Frame()
self.ebox.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.ebox.set_border_width(2)
efbox = gtk.VBox()
evb = gtk.VBox(homogeneous=0,spacing=2)
xpositionalp = gtk.Label('Positional Parameters')
xpositionalp.set_alignment(xalign=0.0,yalign=0.5) # left aligned
epositionalp = gtk.EventBox()
epositionalp.add(xpositionalp)
epositionalp.modify_bg(gtk.STATE_NORMAL,label_normal_color)
lpositionalp = gtk.Frame()
lpositionalp.set_shadow_type(gtk.SHADOW_IN)
lpositionalp.set_border_width(0)
lpositionalp.add(epositionalp)
self.boxofcolumns = gtk.HBox(homogeneous=0,spacing=2)
evb.pack_start(lpositionalp,expand=0,fill=1,padding=0)
evb.pack_start(self.boxofcolumns, expand=1,fill=1,padding=4)
efbox.pack_start(evb, expand=1,fill=1,padding=0)
self.ebox.add(efbox)
self.make_entryfields(nparms) # initialize for EntryFields
def make_entryfields(self,nparms):
self.no_of_entries = nparms
# make VBoxes as required to accomodate entries
# destroy them when starting over -- this occurs
# when a OnePg is reused for a different subfile
try:
type(self.columnbox) # test for existence
# destroy prior VBoxes packed in self.boxofcolumns
for c in self.boxofcolumns.children():
self.boxofcolumns.remove(c)
c.destroy()
del(c)
except AttributeError:
# first-time: create initial VBox for entries
self.columnbox = gtk.VBox(homogeneous=0,spacing=2)
self.boxofcolumns.pack_start(self.columnbox)
# try to use minimum height if less than 3 columns
if nparms > 20:
rowmax = 10
else:
rowmax = int(nparms/2 + 0.5)
self.pentries = {}
row = 0
idx = 1 # 1-based to agree with parm no.s
for i in range(0,nparms):
if row >= rowmax:
row = 0
# make a new VBox for next column of entries
self.columnbox = gtk.VBox(homogeneous=0,spacing=2)
self.boxofcolumns.pack_start(self.columnbox)
self.pentries[idx] = OneParmEntry('','','')
self.columnbox.pack_start(self.pentries[idx].box
,expand=0,fill=0,padding=0)
row += 1
idx += 1
self.boxofcolumns.show_all()
def getentry_byidx(self,idx):
return(self.pentries[idx].getentry())
def clear_pentry_byidx(self,idx):
self.pentries[idx].clear_pentry()
def make_pentry_byidx(self,idx,ll,en,lr,emode='initial'):
self.pentries[idx].make_pentry(ll,en,lr,emode)
def getstuff_byidx(self,idx):
print("1getstuff idx=",idx)
self.pentries[idx].getstuff()
def get_box(self):
return self.ebox
def clear_parm_entries(self):
for pidx in range(1,self.no_of_entries+1):
self.clear_pentry_byidx(pidx)
def set_parm_entries(self,parms,emode='initial'):
lastpidx = 0
for pidx in sorted(parms.sub_data.ndict):
name,dvalue,comment = parms.sub_data.ndict[pidx]
self.make_pentry_byidx(pidx
,str(pidx)
,dvalue
,comment
,emode
)
lastpidx = pidx
class TestButtons():
"""TestButtons: debugging buttons"""
def __init__(self,mypg):
self.box = gtk.HBox()
self.mypg = mypg
lbl = gtk.Label('Debug:')
lbl.set_alignment(xalign=0.9,yalign=0.5) # rt aligned
self.box.pack_start(lbl,expand=0,fill=0,padding=2)
for item in ('info'
,'intfc'
,'nset'
,'nb'
,'page'
,'fset'
,'pre'
,'sub'
,'pst'
,'ent'
,'cp'
,'lcnc'
,'hal'
,'pos'
,'glo'
,'loc'
,'tst'
):
button = gtk.Button(item)
if g_alive: button.connect("clicked", self.btest, item)
button.show_all()
self.box.pack_start(button,expand=0,fill=0,padding=2)
bclose = gtk.Button('Close')
if g_alive: bclose.connect("clicked", lambda x: self.delete())
self.box.pack_start(bclose,expand=0,fill=0,padding=2)
def btest(self,widget,v):
m = self.mypg
if v == 'info':
p = m.nset
print('INFO--------------------------------------------------')
print(' sys.argv = %s' % sys.argv)
print(' cwd = %s' % os.getcwd())
print(' sys.path = %s' % sys.path)
print(' ini_file = %s' % p.intfc.get_ini_file())
print(' auto_file = %s' % p.auto_file)
print('subroutine_path = %s' % p.intfc.get_subroutine_path())
print(' user_m_path = %s' % p.intfc.get_user_m_path())
print(' pre_file = %s' % p.intfc.get_preamble())
print(' sublist = %s' % p.intfc.get_subfiles())
print(' pst_file = %s' % p.intfc.get_postamble())
print(' startpage_idx = %s' % p.startpage_idx)
print('')
print(' __file__ = %s' % __file__)
print('g_send_function = %s' % g_send_function)
print(' g_popkbd = %s' % g_popkbd)
print(' g_stat = %s' % g_stat)
print(' g_progname = %s' % g_progname)
print(' g_verbose = %s' % g_verbose)
print(' g_debug = %s' % g_debug)
print(' g_tmode = %s' % g_tmode)
print(' g_label_id = %s' % g_label_id)
elif v == 'ent':
print('ENTRIES--------------------------------------------------')
x = m.efields.pentries
pmax = m.fset.sub_data.pdict['lastparm']
print('efields.pentries[]')
for pidx in range(1,pmax+1):
print("%2d: %4s %-8s %-20s" % (pidx
,x[pidx].ll.get_text()
,x[pidx].en.get_text()
,x[pidx].lr.get_text()
))
print('ENTRIES==================================================')
elif v == 'intfc': d = m.nset.intfc; show_dir(d,tag='intfc')
elif v == 'page':
d = m; show_dir(d,tag='mypg')
x=self.mypg.efields.pentries[1].en
print 'x=',x
print ' has_focus:',x.has_focus()
print ' is_focus:',x.is_focus()
print ' get_can_focus:',x.get_can_focus()
elif v == 'pre': d = m.fset.pre_data; show_dir(d,tag='pre_data')
elif v == 'sub': d = m.fset.sub_data; show_dir(d,tag='sub_data')
elif v == 'pst': d = m.fset.pst_data; show_dir(d,tag='pst_data')
elif v == 'fset': d = m.fset; show_dir(d,tag='fset')
elif v == 'nset': d = m.nset; show_dir(d,tag='nset')
elif v == 'cp': d = m.cpanel; show_dir(d,tag='cpanel')
elif v == 'loc': show_dir(locals(),tag='locals')
elif v == 'glo': show_dir(globals(),tag='globals')
elif v == 'lcnc': show_dir(linuxcnc,tag='lcnc')
elif v == 'hal': show_dir(hal,tag='hal')
elif v == 'pos': show_position()
elif v == 'tst':
print('cpanel size:',m.cpanel.box.size_request())
print('mtable size:',m.mtable.size_request())
elif v == 'nb':
print('NB--------------------------------------------------')
for pno in range(m.nset.startpage_idx
,m.mynb.get_n_pages()):
npage = m.mynb.get_nth_page(pno)
pg = m.nset.pg_for_npage[npage]
ltxt = pg.the_lbl.get_text()
print('%10s %s' % (ltxt,pg))
print('NB==================================================')
else: print('btest unknown:',v)
def delete(self):
gtk.main_quit()
return False
class ControlPanel():
"""ControlPanel: Controls and image display"""
def __init__(self
,mypg
,pre_file=''
,sub_file=''
,pst_file=''
):
self.mypg = mypg
frame = gtk.Frame()
frame.set_shadow_type(gtk.SHADOW_ETCHED_IN)
frame.set_border_width(2)
self.box = frame
cpbox = gtk.VBox()
# fixed width so it doesn't change when switching tabs
# fixed height to allow room for buttons below image
#cpbox.set_size_request(g_image_width,g_image_height)
bw = 1
bpre = gtk.Button(_('Preamble'))
bpre.set_border_width(bw)
mod_font_by_category(bpre)
bsub = gtk.Button(_('Subfile'))
bsub.set_border_width(bw)
mod_font_by_category(bsub)
bpst = gtk.Button(_('Postamble'))
bpst.set_border_width(bw)
mod_font_by_category(bpst)
self.pre_entry = gtk.Entry()
self.pre_entry.set_state(gtk.STATE_INSENSITIVE)
self.sub_entry = gtk.Entry()
self.sub_entry.set_state(gtk.STATE_INSENSITIVE)
self.pst_entry = gtk.Entry()
self.pst_entry.set_state(gtk.STATE_INSENSITIVE)
chars=10
self.pre_entry.set_width_chars(chars)
self.pre_entry.set_alignment(xalign=0.1)
self.pre_entry.set_text(os.path.basename(pre_file))
if g_alive: self.pre_entry.connect("activate", self.file_choose, 'pre')
self.sub_entry.set_width_chars(chars)
self.sub_entry.set_alignment(xalign=0.1)
self.sub_entry.set_text(os.path.basename(sub_file))
if g_alive: self.sub_entry.connect("activate", self.file_choose, 'sub')
self.pst_entry.set_width_chars(chars)
self.pst_entry.set_alignment(xalign=0.1)
self.pst_entry.set_text(os.path.basename(pst_file))
if g_alive: self.pst_entry.connect("activate", self.file_choose, 'pst')
xcontrol = gtk.Label('Controls')
xcontrol.set_alignment(xalign=0.0,yalign=0.5) # left aligned
econtrol = gtk.EventBox()
econtrol.add(xcontrol)
econtrol.modify_bg(gtk.STATE_NORMAL,label_normal_color)
lcontrol= gtk.Frame()
lcontrol.set_shadow_type(gtk.SHADOW_IN)
lcontrol.set_border_width(0)
lcontrol.add(econtrol)
tfiles = gtk.Table(rows=3, columns=2, homogeneous=0)
bx = gtk.FILL|gtk.EXPAND; by = 0
tfiles.attach(bpre,0,1,0,1,xoptions=bx,yoptions=by)
tfiles.attach(bsub,0,1,1,2,xoptions=bx,yoptions=by)
tfiles.attach(bpst,0,1,2,3,xoptions=bx,yoptions=by)
tfiles.attach(self.pre_entry,1,2,0,1,xoptions=bx,yoptions=by)
tfiles.attach(self.sub_entry,1,2,1,2,xoptions=bx,yoptions=by)
tfiles.attach(self.pst_entry,1,2,2,3,xoptions=bx,yoptions=by)
if g_alive: bpre.connect("clicked", self.file_choose, 'pre')
if g_alive: bsub.connect("clicked", self.file_choose, 'sub')
if g_alive: bpst.connect("clicked", self.file_choose, 'pst')
#bretain = gtk.CheckButton('Retain values on Subfile read')
self.bexpand = gtk.CheckButton('Expand Subroutine')
self.bexpand.set_active(self.mypg.expandsub)
if g_alive: self.bexpand.connect("toggled", self.toggle_expandsub)
self.bautosend = gtk.CheckButton('Autosend')
self.bautosend.set_active(self.mypg.autosend)
if g_alive: self.bautosend.connect("toggled", self.toggle_autosend)
tchkbs = gtk.Table(rows=3, columns=1, homogeneous=0)
bx = gtk.FILL|gtk.EXPAND; by = gtk.FILL|gtk.EXPAND
#tchkbs.attach(bretain, 0,1,0,1,xoptions=bx,yoptions=by)
tchkbs.attach(self.bexpand, 0,1,1,2,xoptions=bx,yoptions=by)
nopts = self.mypg.nset.intfc.get_ngcgui_options()
if (nopts is None) or ('noauto' not in nopts):
tchkbs.attach(self.bautosend,0,1,2,3,xoptions=bx,yoptions=by)
bw = 1
bcreate = gtk.Button(_('Create Feature'))
bcreate.set_border_width(bw)
if g_alive: bcreate.connect("clicked", lambda x: self.create_feature())
mod_font_by_category(bcreate)
bfinalize = gtk.Button(_('Finalize'))
bfinalize.set_border_width(bw)
if g_alive: bfinalize.connect("clicked"
,lambda x: self.finalize_features())
mod_font_by_category(bfinalize)
self.lfct = gtk.Label(str(mypg.feature_ct))
self.lfct.set_alignment(xalign=0.9,yalign=0.5) # right aligned
mod_font_by_category(self.lfct)
lfctf = gtk.Frame()
lfctf.set_shadow_type(gtk.SHADOW_IN)
lfctf.set_border_width(2)
lfctf.add(self.lfct)
self.breread = gtk.Button(_('Reread'))
self.breread.set_border_width(bw)
if g_alive: self.breread.connect("clicked"
,lambda x: self.reread_files())
mod_font_by_category(self.breread)
brestart = gtk.Button(_('Restart'))
brestart.set_border_width(bw)
if g_alive: brestart.connect("clicked"
,lambda x: self.restart_features())
mod_font_by_category(brestart)
self.lmsg = gtk.Label(_('Ctrl-k for key shortcuts'))
self.lmsg.set_alignment(xalign=0.05,yalign=0.5) # left aligned
lmsgf = gtk.Frame()
lmsgf.set_shadow_type(gtk.SHADOW_IN)
lmsgf.set_border_width(2)
lmsgf.add(self.lmsg)
tactions = gtk.Table(rows=3, columns=3, homogeneous=1)
bx = gtk.FILL|gtk.EXPAND; by = gtk.FILL|gtk.EXPAND
tactions.attach(bcreate, 0,2,0,1,xoptions=bx,yoptions=by)
tactions.attach(bfinalize,2,3,0,1,xoptions=bx,yoptions=by)
# only if image (see below)
# tactions.attach(self.breread ,0,1,1,2,xoptions=bx,yoptions=by)
tactions.attach(brestart, 2,3,1,2,xoptions=bx,yoptions=by)
bx = gtk.FILL|gtk.EXPAND; by = 0
#tactions.attach(self.lmsg,0,3,2,3,xoptions=bx,yoptions=by)
tactions.attach(lmsgf,0,3,2,3,xoptions=bx,yoptions=by)
nopts = self.mypg.nset.intfc.get_ngcgui_options()
image_file = find_image(sub_file)
if image_file:
img = sized_image(image_file)
if ( (not image_file)
or (nopts is not None and 'noiframe' in nopts)
or mypg.imageoffpage
):
# show all controls
bx = gtk.FILL|gtk.EXPAND; by = gtk.FILL|gtk.EXPAND
tactions.attach(self.breread, 0,1,1,2,xoptions=bx,yoptions=by)
tactions.attach(lfctf, 1,2,1,2,xoptions=bx,yoptions=by)
cpbox.pack_start(lcontrol,expand=0,fill=0,padding=0)
cpbox.pack_start(tfiles, expand=0,fill=0,padding=0)
cpbox.pack_start(tchkbs, expand=0,fill=0,padding=0)
if image_file:
self.separate_image(img,sub_file,show=False)
mypg.imageoffpage = True
else:
bx = gtk.FILL|gtk.EXPAND; by = gtk.FILL|gtk.EXPAND
tactions.attach(lfctf, 0,2,1,2,xoptions=bx,yoptions=by)
# show image instead of controls
if image_file:
cpbox.pack_start(img,expand=0,fill=0,padding=0)
mypg.imageoffpage = False
cpbox.pack_start(tactions,expand=1,fill=1,padding=0)
cpbox.show()
frame.add(cpbox)
def separate_image(self,img,fname='',show=True):
self.mypg.imgw = gtk.Window(gtk.WINDOW_TOPLEVEL)
w = self.mypg.imgw
w.hide()
w.iconify()
w.set_title(os.path.basename(fname))
w.add(img)
if g_alive: w.connect("destroy",self.wdestroy)
if show:
w.show_all()
w.deiconify()
def wdestroy(self,widget):
del self.mypg.imgw
def set_message(self,msg):
self.lmsg.set_label(msg)
def reread_files(self):
vprint('REREAD')
# user can edit file and use button to reread it
if self.mypg.sub_file == '':
vprint('reread_files NULL subfile')
return False
self.mypg.fset.pre_data.read()
self.mypg.fset.sub_data.re_read() # handle ngc or gcmc
self.mypg.fset.pst_data.read()
self.mypg.update_onepage('pre',self.mypg.pre_file)
self.mypg.update_onepage('sub',self.mypg.sub_file)
self.mypg.update_onepage('pst',self.mypg.pst_file)
self.set_message(_('Reread files'))
return True # success
def restart_features(self):
try:
type(self.mypg.savesec) # test for existence
self.mypg.savesec = []
except AttributeError:
pass
self.mypg.feature_ct = 0
self.lfct.set_label(str(self.mypg.feature_ct))
self.mypg.savesec = []
self.mypg.update_tab_label('default')
self.set_message(_('Restart'))
def toggle_autosend(self, widget):
self.mypg.autosend = (0,1)[widget.get_active()]
self.set_message(_('Toggle autosend %s ') % str(self.mypg.autosend))
def toggle_expandsub(self, widget):
self.mypg.expandsub = (0,1)[widget.get_active()]
self.set_message(_('Toggle expandsub %s') % str(self.mypg.expandsub))
def checkb_toggle(self, widget, var):
print('1T',var,type(var))
var = (0,1)[widget.get_active()]
print('2T',var,type(var))
def create_feature(self):
m=self.mypg
p=self.mypg.fset
fpre,fprestat = m.nset.intfc.find_file_in_path(m.pre_file)
fsub,fsubstat = m.nset.intfc.find_file_in_path(m.sub_file)
fpst,fpststat = m.nset.intfc.find_file_in_path(m.pst_file)
if fsubstat == 'NULLFILE':
vprint('create_feature: NULLFILE')
return
# the test for NOPATH is for special cases
if ( (fpre != p.pre_data.pre_file) and fprestat != 'NOPATH'
or (fsub != p.sub_data.sub_file) and fsubstat != 'NOPATH'
or (fpst != p.pst_data.pst_file) and fpststat != 'NOPATH'
):
print('\nUSER changed filename entry without loading\n')
try:
type(self.mypg.savesec) # test for existence
except AttributeError:
self.mypg.savesec = []
self.set_message(_('Create feature'))
# update for current entry filenames
p.pre_data = PreFile(m.pre_file) # may be ''
p.sub_data = SubFile(m.sub_file) # error for ''
p.pst_data = PstFile(m.pst_file) # may be ''
if p.sub_data.pdict.has_key('isgcmc'):
stat = self.savesection_gcmc()
else:
stat = self.savesection_ngc()
if stat:
if m.feature_ct > 0:
self.mypg.update_tab_label('multiple')
else:
self.mypg.update_tab_label('created')
m.feature_ct = m.feature_ct + 1
self.lfct.set_label(str(m.feature_ct))
self.set_message(_('Created Feature #%d') % m.feature_ct)
else:
#print "savesection fail"
pass
def savesection_ngc(self):
m=self.mypg
p=self.mypg.fset
force_expand = False
# if file not in path and got this far, force expand
fname,stat = m.nset.intfc.find_file_in_path(m.sub_file)
if stat == 'NOTFOUND':
force_expand = True
user_message(mtype=gtk.MESSAGE_INFO
,title=_('Expand Subroutine')
,msg=_('The selected file') + ':\n\n'
+ '%s\n\n'
+ _('is not in the linuxcnc path\n'
'Expanding in place.\n\n'
'Note: linuxcnc will fail if it calls\n'
'subfiles that are not in path\n')
% fname)
try:
self.mypg.savesec.append(
SaveSection(mypg = self.mypg
,pre_info = p.pre_data
,sub_info = p.sub_data
,pst_info = p.pst_data
,force_expand = force_expand
)
)
except ValueError:
dprint('SAVESECTION_ngc: failed')
return True # success
def savesection_gcmc(self):
m=self.mypg
p=self.mypg.fset
intfc = self.mypg.nset.intfc
global g_gcmc_exe
xcmd = []
xcmd.append(g_gcmc_exe)
global g_gcmc_funcname
global g_gcmc_id
g_gcmc_id += 1
# gcmc chars in funcname: (allowed: [a-z0-9_-])
funcname = "%s_%02d"%(g_gcmc_funcname,g_gcmc_id)
p.sub_data.pdict['subname'] = funcname
include_path = intfc.get_gcmc_include_path()
if include_path is not None:
for dir in include_path.split(":"):
xcmd.append("--include")
xcmd.append(os.path.expanduser(dir))
# maybe: xcmd.append("--include")
# maybe: xcmd.append(os.path.dirname(m.sub_file))
# note: gcmc also adds the current directory
# to the search path as last entry.
outdir = g_searchpath[0] # first in path
ofile = os.path.join(outdir,funcname) + ".ngc"
xcmd.append("--output")
xcmd.append(ofile)
xcmd.append('--gcode-function')
xcmd.append(funcname)
for opt in p.sub_data.gcmc_opts:
splitopts = opt.split(' ')
xcmd.append(str(splitopts[0]))
if len(splitopts) > 1:
xcmd.append(str(splitopts[1])) # presumes only one token
for k in p.sub_data.ndict.keys():
#print 'k=',k,p.sub_data.ndict[k]
name,dvalue,comment = p.sub_data.ndict[k]
# make all entry box values explicitly floating point
try:
fvalue = str(float(m.efields.pentries[k].getentry()))
except ValueError:
user_message(mtype=gtk.MESSAGE_ERROR
,title='gcmc input ERROR'
,msg=_('<%s> must be a number' % m.efields.pentries[k].getentry())
)
return False ;# fail
xcmd.append('--define=' + name + '=' + fvalue)
xcmd.append(m.sub_file)
print "xcmd=",xcmd
e_message = ".*Runtime message\(\): *(.*)"
e_warning = ".*Runtime warning\(\): *(.*)"
e_error = ".*Runtime error\(\): *(.*)"
s = subprocess.Popen(xcmd
,stdout=subprocess.PIPE
,stderr=subprocess.PIPE
)
sout,eout = s.communicate()
m_txt = ""
w_txt = ""
e_txt = ""
compile_txt = ""
if eout:
if (len(eout) > g_max_msg_len):
# limit overlong, errant msgs
eout = eout[0:g_max_msg_len] + "..."
for line in eout.split("\n"):
r_message = re.search(e_message,line)
r_warning = re.search(e_warning,line)
r_error = re.search(e_error,line)
if r_message:
m_txt += r_message.group(1) + "\n"
elif r_warning:
w_txt += r_warning.group(1) + "\n"
elif r_error:
e_txt += r_error.group(1) + "\n"
else:
compile_txt += line
if m_txt != "":
user_message(mtype=gtk.MESSAGE_INFO
,title='gcmc INFO'
,msg="gcmc File:\n%s\n\n%s"%(m.sub_file,m_txt)
)
if w_txt != "":
user_message(mtype=gtk.MESSAGE_WARNING
,title='gcmc WARNING'
,msg="gcmc File:\n%s\n\n%s"%(m.sub_file,w_txt)
)
if e_txt != "":
user_message(mtype=gtk.MESSAGE_ERROR
,title='gcmc ERROR'
,msg="gcmc File:\n%s\n\n%s"%(m.sub_file,e_txt)
)
if compile_txt != "":
user_message(mtype=gtk.MESSAGE_ERROR
,title='gcmc Compile ERROR'
,msg="gcmc File:%s"%(compile_txt)
)
if s.returncode:
return False ;# fail
self.mypg.savesec.append(
SaveSection(mypg = self.mypg
,pre_info = p.pre_data
,sub_info = p.sub_data
,pst_info = p.pst_data
,force_expand = False # never for gcmc
)
)
return True # success
def finalize_features(self):
mypg = self.mypg
nb = self.mypg.mynb
nset = self.mypg.nset
if mypg.feature_ct <= 0:
msg = _('No features specified on this page')
self.set_message(msg)
user_message(mtype=gtk.MESSAGE_WARNING
,title='No Features'
,msg=msg)
return
if len(mypg.savesec) == 0:
msg = 'finalize_features: Unexpected: No features'
self.set_message(_('No features'))
raise ValueError,msg
return
txt = ''
plist = []
sequence = ""
# these are in left-to-right order
for pno in range(nset.startpage_idx,nb.get_n_pages()):
npage = nb.get_nth_page(pno)
#Using EventBox for tabpage labels: dont use get_tab_label_text()
pg = nset.pg_for_npage[npage]
ltxt = pg.the_lbl.get_text()
howmany = len(pg.savesec)
if howmany > 0:
plist.append(pg)
sequence = sequence + " " + ltxt
txt = txt + "%s has %d features\n" % (ltxt,howmany)
vprint(txt)
if len(plist) > 1:
msg = (_('Finalize all Tabs?\n\n'
'No: Current page only\n'
'Yes: All pages\n'
'Cancel: Nevermind\n\n'
'Order:'
)
+ '\n<' + sequence + '>\n\n'
'You can Cancel and change the order with the\n'
'Forward and Back buttons\n'
)
popup = gtk.Dialog(title='Page Selection'
,parent=None
,flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT
,buttons=(gtk.STOCK_NO, gtk.RESPONSE_NO
,gtk.STOCK_YES, gtk.RESPONSE_YES
,gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL
)
)
finbox = popup.get_content_area()
l = gtk.Label(msg)
finbox.pack_start(l)
popup.show_all()
ans = popup.run()
popup.destroy()
if ans == gtk.RESPONSE_YES:
pass # use plist for all pages
elif ans == gtk.RESPONSE_NO:
pno = self.mypg.mynb.get_current_page()
npage = nb.get_nth_page(pno)
plist = [nset.pg_for_npage[npage]]
elif ( ans == gtk.RESPONSE_CANCEL
or ans == gtk.REXPONSE_DELETE_EVENT): # window close
return # do nothing
else:
raise ValueError, 'finalize_features:unknown ans<%d>'%ans
# make a unique filename
# (avoids problems with gremlin ignoring new file with same name)
global g_auto_file_ct
autoname = nset.auto_file
dirname = os.path.realpath(os.path.dirname(autoname))
basename = str(g_auto_file_ct) + "." + os.path.basename(autoname)
tmpname = os.path.join(dirname,basename)
if os.path.exists(tmpname):
os.remove(tmpname)
# hack: alternate names (0,1) to force gremlin file loading
# and touchy filechooser updates
g_auto_file_ct = (g_auto_file_ct + 1)%2
basename = str(g_auto_file_ct) + "." + os.path.basename(autoname)
tmpname = os.path.join(dirname,basename)
self.mypg.nset.last_file = tmpname
savename = None
f = open(tmpname,'w')
nopts = self.mypg.nset.intfc.get_ngcgui_options()
if (('nom2' in nopts) or g_nom2):
f.write("%\n")
f.write("(%s: nom2 option)\n" % g_progname)
featurect = 0
for pg in plist:
ct = self.write_to_file(f,pg,featurect)
featurect += ct
pg.feature_ct = 0
self.lfct.set_label(str(pg.feature_ct))
pg.savesec = []
if (('nom2' in nopts) or g_nom2):
f.write("%\n")
else:
f.write("(%s: m2 line added) m2 (g54 activated)\n" % g_progname)
f.close()
user_must_save = True # disprove with send_function
title_message = ''
if self.mypg.autosend:
if g_send_function(tmpname):
user_must_save = False
self.set_message(_('Finalize: Sent file'))
save_a_copy(tmpname)
print('%s:SENT: %s' % (g_progname,tmpname))
print('%s:SENT:using: %s' % (g_progname,g_send_function.__name__))
else:
title_message = (
_('Sending file failed using function: <%s>, user must save')
% g_send_function.__name__)
self.set_message(_('Finalize: Sent file failed'))
print('%s:SAVEDFILE: after send failed: %s'
% (g_progname,tmpname))
if user_must_save:
fname = os.path.abspath(nset.auto_file)
if self.mypg.nset.last_file is not None:
fname = self.mypg.nset.last_file # last user choice
savename = file_save(fname,title_message) # user may change name
if savename is not None:
shutil.move(tmpname,savename)
save_a_copy(savename)
self.mypg.nset.last_file = savename
for pg in plist:
pg.cpanel.restart_features()
pg.update_tab_label('default')
global g_label_id
g_label_id = 0 # reinitialize
return
def write_to_file(self,file,pg,featurect):
ct = 0
for i in range(0,len(pg.savesec) ):
ct += 1
for l in pg.savesec[i].sdata:
if l.find("#<_feature:>") == 0:
l = "(%s: feature line added) #<_feature:> = %d\n" \
% (g_progname,featurect)
featurect += 1
file.write(l)
return(ct)
def file_choose(self,widget,ftype):
mydiag = CandidateDialog(ftype=ftype)
while True:
response = mydiag.run()
fname,errmsg = mydiag.get_file_result()
if response == gtk.RESPONSE_ACCEPT:
vprint('file_choose: ACCEPT')
self.mypg.cpanel.set_message(_('file_choose ACCEPT'))
pass
elif response == gtk.RESPONSE_REJECT:
self.mypg.cpanel.set_message(_('file_choose REJECT'))
vprint('file_choose: REJECT')
mydiag.destroy()
return None
elif response == gtk.RESPONSE_NO:
self.mypg.cpanel.set_message(_('No File'))
fname = 'nofile' # allow pre,pst nofile
vprint('file_choose: No File')
else:
self.mypg.cpanel.set_message(_('file_choose OTHER'))
mydiag.destroy()
raise ValueError,_('file_choose OTHER %s') % str(response)
return None
if fname == 'TRYAGAIN':
user_message(mtype=gtk.MESSAGE_INFO
,title=_('Try Again')
,msg=errmsg
)
continue
break
mydiag.destroy()
if ftype == 'pre':
self.mypg.fset.pre_file = fname
elif ftype == 'sub':
self.mypg.fset.sub_file = fname
elif ftype == 'pst':
self.mypg.fset.pst_file = fname
else:
raise ValueError,"file_choose ftype?",ftype
# None for no file selected, null out field could be useful
if not fname:
self.mypg.cpanel.set_message(_('file_choose no file?'))
return None
if ftype == 'pre':
if fname == 'nofile':
fname = ''
self.pre_entry.set_text(os.path.basename(fname))
self.mypg.update_onepage('pre',fname)
elif ftype == 'sub':
image_file = find_image(fname)
if image_file:
img = sized_image(image_file)
self.separate_image(img,fname,show=True)
self.mypg.imageoffpage = True
if self.mypg.update_onepage('sub',fname):
self.sub_entry.set_text(os.path.basename(fname))
elif ftype == 'pst':
if fname == 'nofile':
fname = ''
self.pst_entry.set_text(os.path.basename(fname))
self.mypg.update_onepage('pst',fname)
else:
raise ValueError,'file_choose:Unexpected ftype <%s>' %ftype
self.mypg.cpanel.set_message(_('Read %s') % os.path.basename(fname))
return
class OnePg():
"""OnePg: ngcgui info for one tab page"""
def __init__(self
,pre_file
,sub_file
,pst_file
,mynb
,nset
,imageoffpage=False
):
self.imageoffpage = imageoffpage # for clone of Custom pages
self.garbagecollect = False
self.key_enable = False
self.pre_file,stat = nset.intfc.find_file_in_path(pre_file)
self.sub_file,stat = nset.intfc.find_file_in_path(sub_file)
self.pst_file,stat = nset.intfc.find_file_in_path(pst_file)
self.nset = nset
self.mynb = mynb
self.autosend = nset.autosend
self.expandsub = nset.expandsub
self.feature_ct = 0
self.savesec = []
self.cpanel = ControlPanel(mypg=self
,pre_file=self.pre_file
,sub_file=self.sub_file
,pst_file=self.pst_file
)
bw = 1
#bremove = gtk.Button(_('Remove'))
bremove = gtk.Button(stock=gtk.STOCK_DELETE)
bremove.set_border_width(bw)
if g_alive: bremove.connect("clicked", lambda x: self.remove_page())
#bclone = gtk.Button(_('Clone'))
bclone = gtk.Button(stock=gtk.STOCK_ADD)
bclone.set_border_width(bw)
if g_alive: bclone.connect("clicked", lambda x: self.clone_page())
#bnew = gtk.Button(_('New'))
bnew = gtk.Button(stock=gtk.STOCK_NEW)
bnew.set_border_width(bw)
if g_alive: bnew.connect("clicked", lambda x: self.new_empty_page())
#bmoveleft = gtk.Button(_('<==Move'))
bmoveleft = gtk.Button(stock=gtk.STOCK_GO_BACK,label='')
bmoveleft.set_border_width(bw)
if g_alive: bmoveleft.connect("clicked", lambda x: self.move_left())
#bmoveright = gtk.Button(_('Move==>'))
bmoveright = gtk.Button(stock=gtk.STOCK_GO_FORWARD,label='')
bmoveright.set_border_width(bw)
if g_alive: bmoveright.connect("clicked", lambda x: self.move_right())
# stock buttons notwork with mod_font_by_category
#mod_font_by_category(bremove)
#mod_font_by_category(bclone)
#mod_font_by_category(bnew)
#mod_font_by_category(bmoveleft)
#mod_font_by_category(bmoveright)
tabarrange_buttons = gtk.HBox() # main buttons
self.mtable = gtk.Table(rows=1, columns=2, homogeneous=0)
bx = gtk.FILL|gtk.EXPAND; by = 0
no_of_parms = g_max_parm
self.make_fileset()
no_of_parms = self.fset.sub_data.pdict['lastparm']
self.efields = EntryFields(no_of_parms) # uses MultipleParmEntries item
self.fill_entrypage(emode='initial')
bx = 0; by = gtk.FILL|gtk.EXPAND
self.mtable.attach(self.cpanel.box, 0,1,0,1,xoptions=bx,yoptions=by)
bx = gtk.FILL; by = gtk.FILL|gtk.EXPAND
bx = gtk.FILL|gtk.EXPAND ; by = gtk.FILL|gtk.EXPAND
entrystuff = self.efields.get_box()
self.mtable.attach(entrystuff, 1,2,0,1,xoptions=bx,yoptions=by)
tbtns = TestButtons(mypg=self) # TestButtons
nopts = nset.intfc.get_ngcgui_options()
if (nopts is None) or ('noremove' not in nopts):
tabarrange_buttons.pack_start(bremove)
if (nopts is None) or ('nonew' not in nopts):
tabarrange_buttons.pack_start(bclone)
tabarrange_buttons.pack_start(bnew)
tabarrange_buttons.pack_start(bmoveleft)
tabarrange_buttons.pack_start(bmoveright)
op_box = gtk.VBox()
if g_tab_controls_loc == 'top':
op_box.pack_start(tabarrange_buttons,expand=0,fill=0,padding=0)
elif g_tab_controls_loc == 'bottom':
op_box.pack_end(tabarrange_buttons,expand=0,fill=0,padding=0)
else:
raise ValueError,(g_progname
+ ' unknown tab_controls_loc %s' % g_tab_controls_loc)
op_box.pack_start(self.linfof, expand=0,fill=0,padding=0)
op_box.pack_start(self.mtable, expand=1,fill=1,padding=0)
if g_debug:
op_box.pack_end(tbtns.box, expand=0,fill=0,padding=5)
op_box.show_all()
self.pgbox = gtk.EventBox()
self.pgbox.add(op_box)
self.pgbox.show_all()
if g_alive: self.pgbox.connect('event',self.any_event)
# establish size with max no of entries
ww,hh = self.mtable.size_request()
#print('size for mtable:',ww,hh)
#self.mtable.set_size_request(ww,hh)
lastpidx = self.fset.sub_data.pdict['lastparm']
gobject.timeout_add_seconds(g_check_interval,self.periodic_check)
def periodic_check(self):
try:
for i in ('pre','sub','pst'):
o_entry = getattr(self.cpanel,i + '_entry')
if o_entry.get_text().strip() == '': continue
o_file = getattr(self, i + '_file')
o_data = getattr(self.fset, i + '_data')
o_md5 = getattr(o_data, 'md5')
o_mtime = getattr(o_data, 'mtime')
if ( (o_mtime != None)
and (o_mtime == os.path.getmtime(o_file))):
state = o_entry.get_state()
o_entry.modify_text(state,black_color)
continue
if (o_md5 != md5sum(o_file)):
#print('%s,%s>' % (o_md5,md5sum(o_file)))
#print(i,'CHANGED md5',o_file,o_md5)
state = o_entry.get_state()
o_entry.modify_text(state,purple_color)
else:
#print(i,'SAME md5',o_file,o_md5)
o_entry.modify_text(gtk.STATE_NORMAL,black_color)
except OSError, detail:
print(_('%s:periodic_check:OSError:%s') % detail)
pass # continue without checks after showing message
except Exception, detail:
exception_show(Exception,detail,'periodic_check')
raise Exception, detail # reraise
if self.garbagecollect:
return False # False to norepeat (respond to del for self)
return True # True to repeat
def any_event(self,widget,event):
if event.type == gtk.gdk.ENTER_NOTIFY:
#widget.set_can_focus(True)
self.key_enable = True
#print('ENTER enable')
return
elif event.type == gtk.gdk.LEAVE_NOTIFY:
#print "LEAVE can, is",widget.is_focus(),widget.get_can_focus(),'\n'
if widget.get_can_focus():
#widget.set_can_focus(False)
self.key_enable = False
#print('LEAVE disable')
return
elif event.type == gtk.gdk.EXPOSE:
widget.grab_focus()
return
elif event.type == gtk.gdk.KEY_PRESS:
if not self.key_enable:
#print('IGNORE')
return
keyname = gtk.gdk.keyval_name(event.keyval)
kl = keyname.lower()
# ignore special keys (until they modify)
if kl in ['alt_r','alt_l'] : return
if kl in ['control_r','control_l'] : return
if kl in ['shift_r','shift_l'] : return
pre = ''
if event.state & gtk.gdk.CONTROL_MASK:
pre = "Control-"
elif event.state & gtk.gdk.MOD1_MASK:
pre = "Alt-"
elif event.state & gtk.gdk.SHIFT_MASK:
pre = "Shift-"
k = pre + keyname
#print("%10s (%03d=%#2X)" % (k, event.keyval,event.keyval))
self.handle_key(k)
return False # allow other handlers
def handle_key(self,k):
if k == 'Control-d':
self.make_fileset()
self.fill_entrypage(emode='initial')
if k == 'Control-a':
self.cpanel.bautosend.clicked()
if k == 'Control-#':
self.cpanel.bexpand.clicked()
if k == 'Control-k':
self.show_special_keys()
if k == 'Control-r':
# was ctrl-p,P,r in ngcgui
self.cpanel.breread.clicked()
if k == 'Control-e':
self.edit_any_file(self.nset.last_file,'last')
if k == 'Control-E':
self.cpanel.bexpand.clicked()
if k == 'Control-u':
self.edit_std_file('sub')
if k == 'Control-U':
self.edit_std_file('pre')
#else:
# print('handle_key: k=',k)
return False # False: allow more handlers
def edit_any_file(self,fname,ftype=''):
if not fname:
user_message(mtype=gtk.MESSAGE_ERROR
,title=_('No file')
,msg=_('No %s file specified') % ftype
)
return
subprocess.Popen([self.nset.intfc.editor, fname])
def edit_std_file(self,which):
o_file = getattr(self, which + '_file')
self.edit_any_file(o_file,which)
#NB some key bindings are claimed on touchy
def show_special_keys(self):
msg = []
msg.append('Control-a ' + _('Toggle autosend') + '\n')
msg.append('Control-e ' + _('Edit last result file') + '\n')
msg.append('Control-E ' + _('Toggle expandsubroutines') + '\n')
msg.append('Control-d ' + _('Set Entry defaults') + '\n')
msg.append('Control-k ' + _('Show keys (this)') + '\n')
msg.append('Control-r ' + _('Reread files') + '\n')
msg.append('Control-u ' + _('Edit sub file') + '\n')
msg.append('Control-U ' + _('Edit preamble file') + '\n')
user_message(mtype=gtk.MESSAGE_INFO
,title=_('Special Keys')
,flags=0 #still MODAL ??
,msg=msg)
def set_page_label(self,lbl):
self.lbl = lbl
def save_onepage_tablabel(self,eb_lbl,the_lbl):
self.eb_lbl = eb_lbl
self.the_lbl = the_lbl
def update_tab_label(self,umode):
if umode == 'created':
newcolor = fg_created_color
newstyle = g_lbl_style_created
elif umode == 'multiple':
newcolor = fg_multiple_color
newstyle = g_lbl_style_multiple
elif umode == 'default':
newcolor = fg_normal_color
newstyle = g_lbl_style_default
else:
newstyle = g_lbl_style_default
newcolor = fg_normal_color
self.eb_lbl.set_style(newstyle)
self.the_lbl.modify_fg(gtk.STATE_NORMAL, newcolor)
self.the_lbl.modify_fg(gtk.STATE_ACTIVE, newcolor)
def make_fileset(self):
try:
self.fset = FileSet(pre_file=self.pre_file
,sub_file=self.sub_file
,pst_file=self.pst_file
)
except OSError,detail:
print(_('%s:make_fileset:%s' % (g_progname,detail) ))
raise OSError,detail # reraise
def fill_entrypage(self,emode='initial'):
self.efields.set_parm_entries(self.fset,emode)
try:
type(self.info_label) # test for existence
except AttributeError:
self.info_label = gtk.Label()
self.linfof = gtk.Frame()
self.linfof.set_shadow_type(gtk.SHADOW_IN)
self.linfof.set_border_width(2)
self.linfof.add(self.info_label)
self.info_label.set_label(self.fset.sub_data.pdict['info'])
self.info_label.set_alignment(xalign=0.0,yalign=0.5) # left aligned
self.cpanel.set_message(_('Set Entry defaults'))
def clear_entrypage(self):
self.efields.clear_parm_entries()
self.info_label.set_label('')
def update_onepage(self,type,fname):
vprint('UPDATE_PAGE %s file=%s' % (type,fname))
if type == 'pre':
foundname,stat = self.nset.intfc.find_file_in_path(fname)
if stat == 'NOTFOUND':
self.clear_entries('pre')
return
self.pre_file = foundname
self.fset.pre_data = PreFile(self.pre_file)
elif type == 'sub':
foundname,stat = self.nset.intfc.find_file_in_path(fname)
if stat == 'NOTFOUND':
self.clear_entries('sub')
return
self.sub_file = foundname
try:
self.make_fileset()
lastparm = self.fset.sub_data.pdict['lastparm']
self.efields.make_entryfields(lastparm) # update_onepage
self.fill_entrypage()
self.info_label.set_label(self.fset.sub_data.pdict['info'])
lbltxt = self.fset.sub_data.pdict['subname']
lbltxt = self.nset.make_unique_tab_name(lbltxt)
self.the_lbl.set_text(lbltxt)
return True
except Exception, detail:
exception_show(Exception,detail,'update_onepage')
return False
elif type == 'pst':
foundname,stat = self.nset.intfc.find_file_in_path(fname)
if stat == 'NOTFOUND':
self.clear_entries('pst')
return
self.pst_file = foundname
self.fset.pst_data = PstFile(self.pst_file)
else:
raise ValueError,'update_onepage unexpected type <%s>' % type
return
def clear_entries(self,fmode):
if fmode == 'pre':
self.pre_file = ''
self.cpanel.pre_entry.set_text('')
self.fset.pre_data.clear()
elif fmode == 'sub':
self.sub_file = ''
self.cpanel.sub_entry.set_text('')
self.clear_entrypage()
self.fset.sub_data.clear()
elif fmode == 'pst':
self.pst_file = ''
self.cpanel.pst_entry.set_text('')
self.fset.pst_data.clear()
else:
raise ValueError,'clear_entries:unexpected fmode= %s' % fmode
def move_left(self):
page_idx = self.mynb.get_current_page()
page_ct = self.mynb.get_n_pages()
page = self.mynb.get_nth_page(page_idx)
new_pg_idx = page_idx - 1
if new_pg_idx < self.nset.startpage_idx:
new_pg_idx = page_ct -1
self.mynb.reorder_child(page,new_pg_idx%page_ct)
def move_right(self):
page_idx = self.mynb.get_current_page()
page_ct = self.mynb.get_n_pages()
page = self.mynb.get_nth_page(page_idx)
new_pg_idx = (page_idx + 1)%page_ct
if new_pg_idx < self.nset.startpage_idx:
new_pg_idx = self.nset.startpage_idx
self.mynb.reorder_child(page,new_pg_idx%page_ct)
def clone_page(self):
newpage = self.nset.add_page(self.pre_file
,self.sub_file
,self.pst_file
,self.imageoffpage #preserve for clone
)
for idx in self.efields.pentries:
ev = self.efields.pentries[idx].getentry()
newpage.efields.pentries[idx].setentry(ev)
def new_empty_page(self):
self.nset.add_page('','','')
def remove_page(self):
page_ct = self.mynb.get_n_pages()
if page_ct - self.nset.startpage_idx == 1:
user_message(mtype=gtk.MESSAGE_INFO
,title=_('Remove not allowed')
,msg=_('One tabpage must remain')
)
else:
current_pno = self.mynb.get_current_page()
npage = self.mynb.get_nth_page(current_pno)
self.mynb.remove_page(current_pno)
thispg = self.nset.pg_for_npage[npage]
thispg.garbagecollect = True
del thispg
del npage
class NgcGui():
"""NgcGui: set of ngcgui OnePg items"""
# make a set of pages in parent that depends on type(w)
def __init__(self,w=None
,verbose=False
,debug=False
,noauto=False
,keyboardfile='' # None | ['default'|'yes'] | fullfilename
,tmode=0
,send_function=default_send # prototype: (fname)
,ini_file=''
,auto_file=''
,pre_file=''
,sub_files=''
,pst_file=''
,tab_controls_loc='top' # option for touchy
,control_font=None # option for touchy
,gtk_theme_name=None # option for touchy
,max_parm=None # for small display, reject some subs
,image_width=None # for small display
):
global g_send_function; g_send_function = send_function
global g_tmode; g_tmode = tmode
global g_verbose; g_verbose = verbose
global g_debug; g_debug = debug
global g_tab_controls_loc; g_tab_controls_loc = tab_controls_loc
global g_control_font; g_control_font = control_font
try:
type(g_send_function) # test existence
if g_send_function == None:
g_send_function = dummy_send
except AttributeError:
print 'INVALID send_function, using dummy'
g_send_function = dummy_send
if max_parm is not None:
global g_max_parm
g_max_parm = max_parm
if image_width is not None:
global g_image_width
if image_width > g_image_width:
raise ValueError,(_('NgcGui image_width=%d too big, max=%d')
% (image_width,g_image_width))
g_image_width = image_width
if g_max_parm > INTERP_SUB_PARAMS:
raise ValueError,(_('max_parms=%d exceeds INTERP_SUB_PARAMS=%d')
% (g_max_parm,INTERP_SUB_PARAMS) )
ct_of_pages = 0
try:
import popupkeyboard
import glib # for glib.GError
if keyboardfile is not None:
global g_popkbd
if (keyboardfile in ('default','yes') ):
keyboardfile = g_keyboardfile
g_popkbd = popupkeyboard.PopupKeyboard(glade_file=keyboardfile
,use_coord_buttons=True
)
global g_entry_height
g_entry_height = g_big_height # bigger for popupkeyboard
except ImportError, msg:
print('\nImportError:\n%s', msg)
print('keyboardfile=%s' % keyboardfile)
print('popup keyboard unavailable\n')
except glib.GError, msg:
# can occur for toohigh version in ui file
print('\nglib.GError:\n%s' % msg)
print('keyboardfile=%s' % keyboardfile)
print('popup keyboard unavailable\n')
self.last_file = None
self.nb = None
self.autosend = not noauto
self.expandsub = False
self.nextpage_idx = 0
self.startpage_idx = 0
self.pg_for_npage = {}
if w is None:
# standalone operation
self.nb = gtk.Notebook()
w = gtk.Window(gtk.WINDOW_TOPLEVEL)
if g_alive: w.connect("destroy", gtk.main_quit)
w.set_title(sys.argv[0])
w.add(self.nb)
self.nb.show()
w.show()
elif type(w) == gtk.Frame:
# demo -- embed as a notebook in a provider's frame
self.nb = gtk.Notebook()
w.add(self.nb)
self.nb.show()
w.show()
elif type(w) == gtk.Notebook:
# demo -- embed as additional pages in a provider's notebook
self.nb = w
self.startpage_idx = self.nb.get_n_pages()
else:
raise ValueError,'NgcGui:bogus w= %s' % type(w)
self.nb.set_scrollable(True)
self.set_theme(w,tname=gtk_theme_name)
self.intfc = LinuxcncInterface(ini_file)
if len(self.intfc.subroutine_path) == 0:
self.intfc.addto_spath(
spath_from_files(pre_file,sub_files,pst_file))
if len(self.intfc.subroutine_path) != 0:
user_message(mtype=gtk.MESSAGE_WARNING
,title=_('Simulated subroutine path')
,msg=_('No subroutine path available.\n'
'Simulating subroutine path:\n\n')
+ str(self.intfc.subroutine_path)
+ '\n'
+ _('Generated results may not be usable with linuxcnc')
)
if len(self.intfc.subroutine_path) == 0:
if g_alive:
# no message if glade designer is running:
user_message(mtype=gtk.MESSAGE_ERROR
,title=_('No Subroutine Paths')
,msg='\n' +
_('No paths available!\n'
'Make sure there is a valid\n'
' [RS274]SUBROUTINE_PATH\n\n'
' 1) Start linuxcnc\n'
'or\n'
' 2) Specify an ini file\n'
'or\n'
' 3) Specify at least one subfile\n'
'\n')
)
sys.exit(1)
global g_searchpath; g_searchpath = self.intfc.subroutine_path
# multiple pages can be specified with __init__()
initsublist= []
if type(sub_files) == StringType and sub_files:
initsublist.append(sub_files)
else:
initsublist = sub_files
nogo_l = []
for sub_file in initsublist:
if not g_alive: continue
if os.path.dirname(sub_file) in self.intfc.subroutine_path:
self.add_page(pre_file,sub_file,pst_file)
ct_of_pages += 1
else:
nogo_l.append(sub_file)
if nogo_l:
user_message(mtype=gtk.MESSAGE_INFO
,title=_('Cannot use files not in subroutine path')
,msg=_('Files not in subroutine path:\n')
+ str(nogo_l) +
'\n\n'
+ _('Subroutine path is:\n')
+ str(self.intfc.subroutine_path)
)
nogo_l = []
# multiple pages can be specified with an ini_file
sublist = self.intfc.get_subfiles() #returns list
pre_file = self.intfc.get_preamble()
pst_file = self.intfc.get_postamble()
# auto_file directory:
# if specified, verify in path, give message if not
# if nil
# if PROGRAM_PREFIX put there
# else put in cwd
if auto_file:
dir = os.path.abspath(os.path.dirname(auto_file))
spath = self.intfc.get_subroutine_path()
try:
spath.index(dir) # check that auto_file dir is in path
# auto_file ok
except ValueError:
# it's called autofile in --help
pass
#user_message(mtype=gtk.MESSAGE_WARNING
# ,title=_('Warning: autofile not in path')
# ,msg=_('autofile==%s is not in linuxcnc\n'
# 'subroutine search path:\n'
# ' %s\n') % (auto_file,spath)
# )
self.auto_file = auto_file
else:
pprefix = self.intfc.get_program_prefix()
if pprefix:
self.auto_file = os.path.join(pprefix,'auto.ngc')
else:
self.auto_file = os.path.join(os.path.curdir,'auto.ngc')
dprint('input for auto_file=%s\nfinal auto_file=%s'
% (auto_file,self.auto_file))
if pre_file is None: pre_file = ''
if pst_file is None: pst_file = ''
# vprint('SAVE_FILE: %s' % self.auto_file)
if sublist and g_alive:
for sub_file in sublist:
if sub_file == '""': #beware code for custom is '""'
sub_file = ''
try:
self.add_page(pre_file,sub_file,pst_file)
ct_of_pages += 1
except Exception,detail:
exception_show(Exception,detail,src='NgcGui init')
print(_('CONTINUING without %s') % sub_file)
else:
if not sub_files:
vprint('NgcGui: no ini_file with sublist '
'and no cmdline sub_file:'
'making Custom page')
self.add_page('','','')
ct_of_pages += 1
pass
self.current_page = None
# self.nb.set_current_page(self.startpage_idx)
# start at page 0 to respect caller's ordering
self.nb.set_current_page(0)
if g_alive: self.nb.connect('switch-page', self.page_switched)
w.show_all()
if ct_of_pages == 0:
usage()
print(_('No valid subfiles specified'))
sys.exit(1)
return
def update_fonts(self,fontname):
update_fonts(fontname)
def set_theme(self,w,tname=None):
screen = w.get_screen()
settings = gtk.settings_get_for_screen(screen)
if (tname is None) or (tname == "") or (tname == "Follow System Theme"):
tname = settings.get_property("gtk-theme-name")
settings.set_string_property('gtk-theme-name',tname,"")
def page_switched(self,notebook,npage,pno):
if self.current_page:
curpage = self.current_page
if hasattr(curpage,'imgw'):
w = getattr(curpage,'imgw')
w.iconify()
try:
mypg = self.pg_for_npage[self.nb.get_nth_page(pno)]
if hasattr(mypg,'imgw'):
w = getattr(mypg,'imgw')
w.deiconify()
w.show_all()
self.current_page = mypg
except KeyError,msg:
# can occur when embedded in providers notebook
# print('page_switched: Caught KeyError')
pass
def add_page(self,pre_file,sub_file,pst_file,imageoffpage=False):
# look for gcmc on first request for .gcmc file:
if os.path.splitext(sub_file)[-1] in ['.gcmc','.GCMC']:
if not find_gcmc(): return None
self.nextpage_idx = self.nextpage_idx + 1
opage = OnePg(pre_file=pre_file
,sub_file=sub_file
,pst_file=pst_file
,mynb=self.nb
,nset=self # an NgcGui set of pages
,imageoffpage=imageoffpage
)
if opage.fset.sub_data.pdict['subname'] == '':
ltxt = 'Custom'
else:
ltxt = opage.fset.sub_data.pdict['subname']
ltxt = self.make_unique_tab_name(ltxt)
eb_lbl = gtk.EventBox()
mylbl = gtk.Label(ltxt)
if g_popkbd is not None:
mylbl.set_size_request(-1,g_big_height)
eb_lbl.add(mylbl)
mylbl.show()
eb_lbl.set_style(g_lbl_style_default)
pno = self.nb.append_page(opage.pgbox,eb_lbl)
if g_control_font is not None:
mod_font_by_category(mylbl)
# An EventBox is needed to change bg of tabpage label
# When using EventBox:
# don't use get_tab_label_text()
opage.save_onepage_tablabel(eb_lbl,mylbl)
self.pg_for_npage[self.nb.get_nth_page(pno)] = opage
self.nb.set_current_page(pno) # move to the new page
return opage
def make_unique_tab_name(self,name):
l = []
if not name: return None
for pno in range(self.startpage_idx,self.nb.get_n_pages()):
npage = self.nb.get_nth_page(pno)
pg = self.pg_for_npage[npage]
# using EventBox for label, dont use get_tab_label_text()
ltxt = pg.the_lbl.get_text()
if ltxt.find(name) == 0:
l.append(ltxt)
if len(l) == 0:
return(name)
if len(l) == 1:
return(name + '-1')
last = l[-1]
idx = last.find('-')
return(name + '-' + str(int(last[idx+1:]) + 1) )
class SaveSection():
"""SaveSection: lines ready for result file"""
def __init__(self,mypg,pre_info,sub_info,pst_info,force_expand=False):
global g_label_id
g_label_id += 1
self.sdata=[]
self.sdata.append("(%s: FEATURE %s)\n"% (g_progname,dt() ))
self.sdata.append("(%s: files: <%s,%s,%s>)\n"
% (g_progname
,pre_info.pre_file
,sub_info.sub_file
,pst_info.pst_file
)
)
# note: this line will be replaced on file output with a count
# that can span multiple pages
self.sdata.append("#<_feature:> = 0\n")
self.sdata.append("(%s: preamble file: %s)\n" % (
g_progname,pre_info.pre_file))
self.sdata.extend(pre_info.inputlines)
emsg = '' # accumulate errors for emsg
calltxt = 'o<%s> call ' % sub_info.pdict['subname']
parmlist = []
tmpsdata = []
for idx in sub_info.ndict:
name,dvalue,comment = sub_info.ndict[idx]
value=mypg.efields.getentry_byidx(idx)
try:
v = float(value)
except ValueError:
emsg = emsg + (
_('Entry for parm %2d is not a number\n <%s>\n')
% (int(idx),value))
#note: e formats not accepted by linuxcnc (like 1e2)
# but using float(value) --->mmm.nnnnn everywhere
# makes long call line
# so try to send entry value, but if it has e, use float
if 'e' in value:
value = str(float(value.lower() ))
parmlist.append(value)
if sub_info.pdict.has_key('isgcmc'):
# just print value of gcmc parm embedded in gcmc result
# the call requires no parms
pass
else:
calltxt = calltxt + '[%s]' % value
# these appear only for not-expandsub
tmpsdata.append("(%11s = %12s = %12s)\n" % (
'#'+str(idx),name,value))
if emsg:
user_message(mtype=gtk.MESSAGE_ERROR
,title=_('SaveSection Error')
,msg=emsg)
mypg.cpanel.set_message(_('Failed to create feature'))
raise ValueError
calltxt = calltxt + '\n'
# expandsub not honored for gcmc
if (mypg.expandsub and sub_info.pdict.has_key('isgcmc')):
print(_('expandsub not honored for gcmc file: %s')%
os.path.basename(sub_info.sub_file))
mypg.expandsub = 0
#---------------------------------------------------------------------
if (not mypg.expandsub) and (not force_expand):
self.sdata.append("(%s: call subroutine file: %s)\n" % (
g_progname,sub_info.sub_file) )
self.sdata.append("(%s: positional parameters:)\n"% g_progname)
self.sdata.extend(tmpsdata)
self.sdata.append(calltxt) # call the subroutine
else:
# expand the subroutine in place with unique labels
self.sdata.append('(Positional parameters for %s)\n'
% mypg.sub_file)
for i in range(0,idx):
self.sdata.append(' #%d = %s\n' % (i+1,parmlist[i]))
self.sdata.append('(expanded file: %s)\n' % mypg.sub_file)
blank = ''
idx = 0
for line in sub_info.inputlines:
idx += 1
if line.strip() == '':
continue
if idx in sub_info.ldict:
modlabel = sub_info.ldict[idx]
if modlabel == 'ignoreme':
continue
modlabel = 'o<%03d%s>' % (g_label_id,modlabel)
r = re.search(r'^o<(.*?)>(.*)',line)
if r:
modline = r.group(2) + '\n'
else:
print('SaveSection__init__:unexpected:',line)
self.sdata.append('%11s %s' % (modlabel,modline))
else:
theline = '%11s %s' % (blank,line)
# hack: try to reduce long line length so linuxcnc wont
# choke on files that work otherwise but fail
# when expanded here
# example: 246 chars observed for
# qpex --> the call to qpocket uses many named parms
# hardcoded for # config.h.in #define LINELEN 255
# hardcoded 252 empiracally determined
if len(theline) >= 252:
theline = line
self.sdata.append(theline)
#---------------------------------------------------------------------
if pst_info.inputlines:
self.sdata.append("(%s: postamble file: %s)\n" % (
g_progname,pst_info.pst_file))
self.sdata.extend(pst_info.inputlines)
#for line in self.sdata:
# print('line:',line,)
def usage():
print("""
Usage:
%s [Options] [sub_filename]
Options requiring values:
[-d | --demo] [0|1|2] (0: DEMO standalone toplevel)
(1: DEMO embed new notebook)
(2: DEMO embed within existing notebook)
[-S | --subfile sub_filename]
[-p | --preamble preamble_filename]
[-P | --postamble postamble_filename]
[-i | --ini inifile_name]
[-a | --autofile autoauto_filename]
[-t | --test testno]
[-H | --height height_of_entry widget] (typ 20-40)
[-K | --keyboardfile glade_file] (use custom popupkeyboard glade file)
Solo Options:
[-v | --verbose]
[-D | --debug]
[-N | --nom2] (no m2 terminator (use %%))
[-n | --noauto] (save but do not automatically send result)
[-k | --keyboard] (use default popupkeybaord)
[-s | --sendtoaxis] (send generated ngc file to axis gui)
Notes:
A set of files is comprised of a preamble, subfile, postamble.
The preamble and postamble are optional.
One set of files can be specified from cmdline.
Multiple sets of files can be specified from an inifile.
If --ini is NOT specified:
search for a running linuxcnc and use it's inifile
""" % g_progname)
#-----------------------------------------------------------------------------
# Standalone (and demo) usage:
def standalone_pyngcgui():
# make widgets for test cases:
top = gtk.Window(gtk.WINDOW_TOPLEVEL)
top.set_title('top')
hbox = gtk.HBox()
top.add(hbox)
l1 = gtk.Label('LABEL')
hbox.pack_start(l1,expand=0,fill=0)
e1 = gtk.Entry()
hbox.pack_start(e1,expand=0,fill=0)
e1.set_width_chars(4)
f1 = gtk.Frame()
hbox.pack_start(f1,expand=0,fill=0)
f2 = gtk.Frame()
hbox.pack_start(f2,expand=0,fill=0)
n = gtk.Notebook()
n.set_scrollable(True)
b1 = gtk.Button('b1-filler')
b2 = gtk.Button('b2-filler')
n.append_page(b1,gtk.Label('Mb1-filler'))
n.append_page(b2,gtk.Label('Mb2-filler'))
f1.add(n)
top.show_all()
demo = 0 # 0 ==> standalone operation
subfilenames = ''
prefilename = ''
pstfilename = ''
vbose = False
dbg = False
noauto = False
keyboard = False
keyboardfile = 'default'
ini_file = ''
auto_file = ''
tmode = 0
send_f = default_send
try:
options,remainder = getopt.getopt(sys.argv[1:]
,'a:Dd:hi:kK:Nnp:P:sS:t:v'
, ['autofile'
,'demo='
,'debug'
,'help'
,'ini='
,'keyboard'
,'keyboardfile='
,'noauto'
,'preamble='
,'postamble='
,'subfile='
,'verbose'
,'sendtoaxis'
,'nom2'
]
)
except getopt.GetoptError,msg:
usage()
print('\nGetoptError:%s' % msg)
sys.exit(1)
except Exception, detail:
exception_show(Exception,detail,'__main__')
sys.exit(1)
for opt,arg in options:
#print('#opt=%s arg=%s' % (opt,arg))
if opt in ('-h','--help'): usage(),sys.exit(0)
if opt in ('-d','--demo'): demo = arg
if opt in ('-i','--ini'): ini_file = arg
if opt in ('-a','--autofile'): auto_file = arg
if opt in ('-p','--preamble'): prefilename=arg
if opt in ('-P','--postamble'): pstfilename=arg
if opt in ('-S','--subfile'): subfilenames=arg
if opt in ('-t','--test'): tmode=arg
if opt in ('-k','--keyboard'): keyboard=True
if opt in ('-K','--keyboardfile'):
keyboard=True
keyboardfile=arg
if opt in ('-N','--nom2'): dbg = g_nom2 = True
if opt in ('-D','--debug'): dbg = True
if opt in ('-n','--noauto'): noauto = True
if opt in ('-v','--verbose'):
vbose = True
continue
if opt in ('-s','--sendtoaxis'):
send_f = send_to_axis
continue
if remainder: subfilenames = remainder # ok for shell glob e.g., *.ngc
demo = int(demo)
if not keyboard: keyboardfile=None
if (dbg):
print(g_progname + ' BEGIN-----------------------------------------------')
print(' __file__= %s' % __file__)
print(' ini_file= %s' % ini_file)
print(' sys.argv= %s' % sys.argv)
print(' os.getcwd= %s' % os.getcwd())
print(' sys.path= %s' % sys.path)
print(' demo= %s' % demo)
print(' prefilename= %s' % prefilename)
print('subfilenames= %s' % subfilenames)
print(' pstfilename= %s' % pstfilename)
print(' keyboard= %s, keyboardfile= <%s>' % (keyboard,keyboardfile))
try:
if demo == 0:
top.hide()
NgcGui(w=None
,verbose=vbose,debug=dbg,noauto=noauto
,keyboardfile=keyboardfile
,tmode=tmode
,send_function=send_f # prototype: (fname)
,ini_file=ini_file,auto_file=auto_file
,pre_file=prefilename,sub_files=subfilenames,pst_file=pstfilename
)
elif demo == 1:
NgcGui(w=f2
,verbose=vbose,debug=dbg,noauto=noauto
,keyboardfile=keyboardfile
,tmode=tmode
,send_function=send_f # prototype: (fname)
,ini_file=ini_file,auto_file=auto_file
,pre_file=prefilename,sub_files=subfilenames,pst_file=pstfilename
)
top.set_title('Create OnePg inside a new frame')
elif demo == 2:
NgcGui(w=n
,verbose=vbose,debug=dbg,noauto=noauto
,keyboardfile=keyboardfile
,tmode=tmode
,send_function=send_f # prototype: (fname)
,ini_file=ini_file,auto_file=auto_file
,pre_file=prefilename,sub_files=subfilenames,pst_file=pstfilename
)
top.set_title('Create OnePg inside an existing notebook')
else:
print('unknown demo',demo)
usage()
sys.exit(1)
except Exception, detail:
exception_show(Exception,detail,'__main__')
print('in main()')
sys.exit(11)
try:
gtk.main()
except KeyboardInterrupt:
sys.exit(0)
# vim: sts=4 sw=4 et
| bmwiedemann/linuxcnc-mirror | lib/python/pyngcgui.py | Python | lgpl-2.1 | 128,675 | 0.015924 |
from __future__ import (absolute_import, division, print_function)
import unittest
import os
import shutil
import sys
import mantid.kernel.plugins as plugins
from mantid.api import AlgorithmFactory, AlgorithmManager
__TESTALG__ = \
"""from mantid.api import PythonAlgorithm, AlgorithmFactory
class TestPyAlg(PythonAlgorithm):
def PyInit(self):
pass
def PyExec(self):
pass
AlgorithmFactory.subscribe(TestPyAlg)
"""
class PythonPluginsTest(unittest.TestCase):
def setUp(self):
# Make a test directory and test plugin
self._testdir = os.path.join(os.getcwd(), 'PythonPluginsTest_TmpDir')
try:
os.mkdir(self._testdir)
except OSError:
pass # Already exists, maybe it was not removed when a test failed?
filename = os.path.join(self._testdir, 'TestPyAlg.py')
if not os.path.exists(filename):
plugin = open(filename, 'w')
plugin.write(__TESTALG__)
plugin.close()
def tearDown(self):
try:
shutil.rmtree(self._testdir)
except shutil.Error:
pass
def test_loading_python_algorithm_increases_registered_algs_by_one(self):
loaded = plugins.load(self._testdir)
self.assertTrue(len(loaded) > 0)
expected_name = 'TestPyAlg'
# Has the name appear in the module dictionary
self.assertTrue(expected_name in sys.modules)
# Do we have the registered algorithm
algs = AlgorithmFactory.getRegisteredAlgorithms(True)
self.assertTrue(expected_name in algs)
# Can it be created?
try:
test_alg = AlgorithmManager.createUnmanaged(expected_name)
self.assertEquals(expected_name, test_alg.name())
self.assertEquals(1, test_alg.version())
except RuntimeError as exc:
self.fail("Failed to create plugin algorithm from the manager: '%s' " %s)
if __name__ == '__main__':
unittest.main()
| dymkowsk/mantid | Framework/PythonInterface/test/python/mantid/kernel/PythonPluginsTest.py | Python | gpl-3.0 | 1,990 | 0.002513 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from apache_beam.metrics.cells import DistributionData
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricsContainer
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.metrics.metric import MetricResults
from apache_beam.metrics.metric import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.metrics.metricbase import MetricName
class NameTest(unittest.TestCase):
def test_basic_metric_name(self):
name = MetricName('namespace1', 'name1')
self.assertEqual(name.namespace, 'namespace1')
self.assertEqual(name.name, 'name1')
self.assertEqual(name, MetricName('namespace1', 'name1'))
key = MetricKey('step1', name)
self.assertEqual(key.step, 'step1')
self.assertEqual(key.metric.namespace, 'namespace1')
self.assertEqual(key.metric.name, 'name1')
self.assertEqual(key, MetricKey('step1', MetricName('namespace1', 'name1')))
class MetricResultsTest(unittest.TestCase):
def test_metric_filter_namespace_matching(self):
filter = MetricsFilter().with_namespace('ns1')
name = MetricName('ns1', 'name1')
key = MetricKey('step1', name)
self.assertTrue(MetricResults.matches(filter, key))
def test_metric_filter_name_matching(self):
filter = MetricsFilter().with_name('name1').with_namespace('ns1')
name = MetricName('ns1', 'name1')
key = MetricKey('step1', name)
self.assertTrue(MetricResults.matches(filter, key))
filter = MetricsFilter().with_name('name1')
name = MetricName('ns1', 'name1')
key = MetricKey('step1', name)
self.assertTrue(MetricResults.matches(filter, key))
def test_metric_filter_step_matching(self):
filter = MetricsFilter().with_step('Top1/Outer1/Inner1')
name = MetricName('ns1', 'name1')
key = MetricKey('Top1/Outer1/Inner1', name)
self.assertTrue(MetricResults.matches(filter, key))
filter = MetricsFilter().with_step('step1')
name = MetricName('ns1', 'name1')
key = MetricKey('step1', name)
self.assertTrue(MetricResults.matches(filter, key))
filter = MetricsFilter().with_step('Top1/Outer1')
name = MetricName('ns1', 'name1')
key = MetricKey('Top1/Outer1/Inner1', name)
self.assertTrue(MetricResults.matches(filter, key))
filter = MetricsFilter().with_step('Top1/Inner1')
name = MetricName('ns1', 'name1')
key = MetricKey('Top1/Outer1/Inner1', name)
self.assertFalse(MetricResults.matches(filter, key))
class MetricsTest(unittest.TestCase):
def test_get_namespace_class(self):
class MyClass(object):
pass
self.assertEqual('{}.{}'.format(MyClass.__module__, MyClass.__name__),
Metrics.get_namespace(MyClass))
def test_get_namespace_string(self):
namespace = 'MyNamespace'
self.assertEqual(namespace, Metrics.get_namespace(namespace))
def test_get_namespace_error(self):
with self.assertRaises(ValueError):
Metrics.get_namespace(object())
def test_counter_empty_name(self):
with self.assertRaises(ValueError):
Metrics.counter("namespace", "")
def test_counter_empty_namespace(self):
with self.assertRaises(ValueError):
Metrics.counter("", "names")
def test_distribution_empty_name(self):
with self.assertRaises(ValueError):
Metrics.distribution("namespace", "")
def test_distribution_empty_namespace(self):
with self.assertRaises(ValueError):
Metrics.distribution("", "names")
def test_create_counter_distribution(self):
MetricsEnvironment.set_current_container(MetricsContainer('mystep'))
counter_ns = 'aCounterNamespace'
distro_ns = 'aDistributionNamespace'
name = 'a_name'
counter = Metrics.counter(counter_ns, name)
distro = Metrics.distribution(distro_ns, name)
counter.inc(10)
counter.dec(3)
distro.update(10)
distro.update(2)
self.assertTrue(isinstance(counter, Metrics.DelegatingCounter))
self.assertTrue(isinstance(distro, Metrics.DelegatingDistribution))
del distro
del counter
container = MetricsEnvironment.current_container()
self.assertEqual(
container.counters[MetricName(counter_ns, name)].get_cumulative(),
7)
self.assertEqual(
container.distributions[MetricName(distro_ns, name)].get_cumulative(),
DistributionData(12, 2, 2, 10))
if __name__ == '__main__':
unittest.main()
| staslev/incubator-beam | sdks/python/apache_beam/metrics/metric_test.py | Python | apache-2.0 | 5,205 | 0.003842 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (C) 2005-2007 Juan David Ibáñez Palomar <jdavid@itaapy.com>
# Copyright (C) 2007 Sylvain Taverne <sylvain@itaapy.com>
# Copyright (C) 2008 David Versmisse <david.versmisse@itaapy.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from the Standard Library
from sys import exit
from optparse import OptionParser
# Import from itools
import itools
# Import from ikaaro
from ikaaro.server import stop_server
if __name__ == '__main__':
# The command line parser
usage = '%prog TARGET [TARGET]*'
version = 'itools %s' % itools.__version__
description = ('Stops the web server that is publishing the TARGET'
' ikaaro instance (if it is running). Accepts'
' several TARGETs at once, to stop several servers.')
parser = OptionParser(usage, version=version, description=description)
parser.add_option(
'--force', action="store_true", default=False,
help="Emits SIGTERM instead of SIGINT signal.")
options, args = parser.parse_args()
if len(args) == 0:
parser.error('incorrect number of arguments')
# Action!
for target in args:
try:
stop_server(target)
except LookupError:
print('Error: {} instance do not exists'.format(target))
exit(1)
# Ok
exit(0)
| hforge/ikaaro | scripts/icms-stop.py | Python | gpl-3.0 | 1,970 | 0 |
import math
def digitFactorialSum(n):
return sum([math.factorial(int(x)) for x in str(n)])
def repeatedLength(n):
repeatedList = []
while n not in repeatedList:
repeatedList.append(n)
n = digitFactorialSum(n)
return len(repeatedList)
if __name__ == "__main__":
cnt = 0
for i in range(1, 1000000):
if repeatedLength(i) == 60:
cnt += 1
print cnt
| python27/AlgorithmSolution | ProjectEuler/51_100/Problem#74.py | Python | agpl-3.0 | 411 | 0.007299 |
class NotifyCenter(object):
"""docstring for NotifyCenter"""
def __init__(self, arg):
super(NotifyCenter, self).__init__()
self.arg = arg
| Bargetor/chestnut | bargetor/notifiction/__init.py | Python | gpl-2.0 | 163 | 0.006135 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes implementing a multi-worker ps DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import device_setter
from tensorflow.python.util import nest
_LOCAL_CPU = "/device:CPU:0"
_LOCAL_GPU_0 = "/device:GPU:0"
# TODO(yuefengz): maybe cache variables on local CPU.
# TODO(yuefengz): we may want to set session options to disallow communication
# between workers.
class ParameterServerStrategy(distribute_lib.DistributionStrategy):
"""A parameter server DistributionStrategy.
This strategy class works for both local training and between-graph replicated
training for multiple workers. If `cluster_spec` is specified, either passed
in to __init__() method or parsed from the
["TF_CONFIG" environment
variable](https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig),
variables and updates to those variables are assigned to parameter servers and
other operations are assigned to workers. If `cluster_spec` is not set, it
becomes local training where variables are assigned to local CPU or the only
GPU. When each worker has more than one GPU, operations will be replicated on
these GPUs. In both cases, operations are replicated but variables are not and
these workers share a common view for which paramater server a variable is
assigned to.
This class assumes between-graph replication will be used and works on a graph
for a particular worker. Note that each graph and worker is independent.
This means that while each worker will synchronously compute a single gradient
update across all GPUs, updates between workers proceed asynchronously.
Operations that occur only on the first replica (such as incrementing the
global step), will occur on the first replica *of every worker*.
It is expected to call `call_for_each_replica(fn, ...)` for any
operations which potentially can be replicated across replicas (i.e. multiple
GPUs) even if there is only CPU or one GPU. When defining the `fn`, extra
caution needs to be taken:
1) Always use `tf.get_variable` instead of `tf.Variable` which is not able
to refer to the same variable on different replicas.
2) It is generally not recommended to open a device scope under the strategy's
scope. A device scope (i.e. calling `tf.device`) will be merged with or
override the device for operations but will not change the device for
variables.
3) It is also not recommended to open a colocation scope (i.e. calling
`tf.colocate_with`) under the strategy's scope. For colocating variables,
use `distribution.colocate_vars_with` instead. Colocation of ops will possibly
create conflicts of device assignment.
"""
def __init__(self, num_gpus_per_worker=0):
"""Initializes this strategy.
Args:
num_gpus_per_worker: number of local GPUs or GPUs per worker, the default
is 0 meaning CPU only.
Raises:
ValueError: if `cluster_spec` is given but `task_type` or `task_id` is
not.
"""
super(ParameterServerStrategy, self).__init__(
ParameterServerExtended(self, num_gpus_per_worker))
class ParameterServerExtended(distribute_lib.DistributionStrategyExtended):
"""Implementation of ParameterServerStrategy."""
def __init__(self, container_strategy, num_gpus_per_worker):
super(ParameterServerExtended, self).__init__(container_strategy)
self._num_gpus_per_worker = num_gpus_per_worker
self._initialize_local(num_gpus_per_worker)
# We typically don't need to do all-reduce in this strategy.
self._cross_device_ops = (
cross_device_ops_lib.ReductionToOneDeviceCrossDeviceOps(
reduce_to_device=_LOCAL_CPU))
def _initialize_multi_worker(self, num_gpus_per_worker, cluster_spec,
task_type, task_id):
"""Initialize devices for multiple workers.
It creates variable devices and compute devices. Variables and operations
will be assigned to them respectively. We have one compute device per
replica. The variable device is a device function or device string. The
default variable device assigns variables to parameter servers in a
round-robin fashion.
Args:
num_gpus_per_worker: number of local GPUs or GPUs per worker.
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
task_type: the current task type.
task_id: the current task id.
Raises:
ValueError: if the cluster_spec doesn't have ps jobs.
"""
assert cluster_spec
if not task_type or task_id is None:
raise ValueError("When `cluster_spec` is given, you must also specify "
"`task_type` and `task_id`")
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
self._worker_device = "/job:%s/task:%d" % (self._task_type, self._task_id)
# Define compute devices which is a list of device strings and one for each
# replica. When there are GPUs, replicate operations on these GPUs.
# Otherwise, place operations on CPU.
if num_gpus_per_worker > 0:
self._compute_devices = tuple(
"%s/device:GPU:%d" % (self._worker_device, i)
for i in range(num_gpus_per_worker)
)
else:
self._compute_devices = (self._worker_device,)
self._compute_devices = tuple(
map(device_util.resolve, self._compute_devices))
self._canonical_compute_device_set = set(self._compute_devices)
# In distributed mode, place variables on ps jobs in a round-robin fashion.
# Note that devices returned from `replica_device_setter` are not
# canonical and therefore we don't canonicalize all variable devices to
# make them consistent.
# TODO(yuefengz): support passing a strategy object to control variable
# assignment.
# TODO(yuefengz): merge the logic of replica_device_setter into this
# class.
num_ps_replicas = len(cluster_spec.as_dict().get("ps", []))
if num_ps_replicas == 0:
raise ValueError("The cluster spec needs to have `ps` jobs.")
self._variable_device = device_setter.replica_device_setter(
ps_tasks=num_ps_replicas,
worker_device=self._worker_device,
merge_devices=True,
cluster=cluster_spec)
# The `_parameter_devices` is needed for the `parameter_devices` property
# and is a list of all variable devices. Here parameter devices are all
# tasks of the "ps" job.
self._parameter_devices = tuple(map("/job:ps/task:{}".format,
range(num_ps_replicas)))
# Add a default device so that ops without specified devices will not end up
# on other workers.
self._default_device = self._worker_device
self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,
task_id)
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
logging.info(
"Multi-worker ParameterServerStrategy with "
"cluster_spec = %r, task_type = %r, task_id = %r, "
"num_ps_replicas = %r, is_chief = %r, compute_devices = %r, "
"variable_device = %r", cluster_spec.as_dict(), task_type, task_id,
num_ps_replicas, self._is_chief, self._compute_devices,
self._variable_device)
def _initialize_local(self, num_gpus_per_worker):
"""Initialize internal devices for local training."""
self._worker_device = device_util.canonicalize("/device:CPU:0")
# Define compute devices which is a list of device strings and one for each
# replica. When there are GPUs, replicate operations on these GPUs.
# Otherwise, place operations on CPU.
if num_gpus_per_worker > 0:
self._compute_devices = tuple(
map("/device:GPU:{}".format, range(num_gpus_per_worker)))
else:
self._compute_devices = (_LOCAL_CPU,)
self._compute_devices = tuple(
map(device_util.resolve, self._compute_devices))
self._canonical_compute_device_set = set(self._compute_devices)
# If there is only one GPU, put everything on that GPU. Otherwise, place
# variables on CPU.
if num_gpus_per_worker == 1:
assert len(self._compute_devices) == 1
self._variable_device = _LOCAL_GPU_0
self._parameter_devices = (_LOCAL_GPU_0,)
else:
self._variable_device = _LOCAL_CPU
self._parameter_devices = (_LOCAL_CPU,)
self._is_chief = True
self._cluster_spec = None
self._task_type = None
self._task_id = None
logging.info(
"ParameterServerStrategy with compute_devices = %r, "
"variable_device = %r", self._compute_devices, self._variable_device)
def _distribute_dataset(self, dataset_fn):
"""Distributes the dataset to each local GPU."""
return values.PerReplicaDataset(
self._call_dataset_fn(dataset_fn), self._compute_devices, True)
def _make_dataset_iterator(self, dataset):
worker_device_pairs = [(self._worker_device, self._compute_devices)]
return values.DatasetIterator(dataset, worker_device_pairs,
self._num_replicas_in_sync)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
"""Distributes the dataset to each local GPU."""
if self._cluster_spec:
input_pipeline_id = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
num_input_pipelines = multi_worker_util.worker_count(
self._cluster_spec, self._task_type)
else:
input_pipeline_id = 0
num_input_pipelines = 1
input_context = distribute_lib.InputContext(
num_input_pipelines=num_input_pipelines,
input_pipeline_id=input_pipeline_id,
num_replicas_in_sync=self._num_replicas_in_sync)
worker_device_pairs = [(self._worker_device, self._compute_devices)]
return values.InputFunctionIterator(
input_fn, worker_device_pairs, [input_context])
def _broadcast_to(self, tensor, destinations):
# This is both a fast path for Python constants, and a way to delay
# converting Python values to a tensor until we know what type it
# should be converted to. Otherwise we have trouble with:
# global_step.assign_add(1)
# since the `1` gets broadcast as an int32 but global_step is int64.
if isinstance(tensor, (float, int)):
return tensor
if not cross_device_ops_lib.check_destinations(destinations):
destinations = self._compute_devices
return self._cross_device_ops.broadcast(tensor, destinations)
def _allow_variable_partition(self):
return not context.executing_eagerly()
# TODO(yuefengz): not all ops in device_setter.STANDARD_PS_OPS will go through
# this creator, such as "MutableHashTable".
def _create_variable(self, next_creator, *args, **kwargs):
if self._num_replicas_in_sync > 1:
aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE)
if aggregation not in (
vs.VariableAggregation.NONE,
vs.VariableAggregation.SUM,
vs.VariableAggregation.MEAN,
vs.VariableAggregation.ONLY_FIRST_REPLICA
):
raise ValueError("Invalid variable aggregation mode: " + aggregation +
" for variable: " + kwargs["name"])
def var_creator(*args, **kwargs):
"""Create an AggregatingVariable and fix up collections."""
# Record what collections this variable should be added to.
collections = kwargs.pop("collections", None)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
kwargs["collections"] = []
# Create and wrap the variable.
v = next_creator(*args, **kwargs)
wrapped = values.AggregatingVariable(v, aggregation)
# Add the wrapped variable to the requested collections.
# The handling of eager mode and the global step matches
# ResourceVariable._init_from_args().
if not context.executing_eagerly():
g = ops.get_default_graph()
# If "trainable" is True, next_creator() will add the contained
# variable to the TRAINABLE_VARIABLES collection, so we manually
# remove it and replace with the wrapper. We can't set "trainable"
# to False for next_creator() since that causes functions like
# implicit_gradients to skip those variables.
if kwargs.get("trainable", True):
collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)
l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)
l.remove(v)
g.add_to_collections(collections, wrapped)
elif ops.GraphKeys.GLOBAL_STEP in collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, wrapped)
return wrapped
else:
var_creator = next_creator
if "colocate_with" in kwargs:
with ops.device(None):
with ops.colocate_with(kwargs["colocate_with"]):
return var_creator(*args, **kwargs)
with ops.colocate_with(None, ignore_existing=True):
with ops.device(self._variable_device):
return var_creator(*args, **kwargs)
def _call_for_each_replica(self, fn, args, kwargs):
# pylint: disable=protected-access
return mirrored_strategy._call_for_each_replica(
self._container_strategy(), fn, args, kwargs)
def _verify_destinations_not_different_worker(self, destinations):
if not self._cluster_spec:
return
if destinations is None:
return
for d in cross_device_ops_lib.get_devices_from(destinations):
d_spec = tf_device.DeviceSpec.from_string(d)
if d_spec.job == self._task_type and d_spec.task != self._task_id:
raise ValueError(
"Cannot reduce to another worker: %r, current worker is %r" %
(d, self._worker_device))
def _reduce_to(self, reduce_op, value, destinations):
self._verify_destinations_not_different_worker(destinations)
if not isinstance(value, values.DistributedValues):
# pylint: disable=protected-access
return cross_device_ops_lib.reduce_non_distributed_value(
self, reduce_op, value, destinations)
return self._cross_device_ops.reduce(
reduce_op, value, destinations=destinations)
def _batch_reduce_to(self, reduce_op, value_destination_pairs):
for _, destinations in value_destination_pairs:
self._verify_destinations_not_different_worker(destinations)
return self._cross_device_ops.batch_reduce(reduce_op,
value_destination_pairs)
def _select_single_value(self, structured):
"""Select any single values in `structured`."""
def _select_fn(x): # pylint: disable=g-missing-docstring
if isinstance(x, values.Mirrored):
if len(x.devices) == 1:
return list(x._index.values())[0] # pylint: disable=protected-access
else:
raise ValueError(
"You cannot update variable with a Mirrored object with multiple "
"components %r when using ParameterServerStrategy. You must "
"specify a single value or a Mirrored with a single value." % x)
elif isinstance(x, values.PerReplica):
raise ValueError(
"You cannot update variable with a PerReplica object %r when using "
"ParameterServerStrategy. You must specify a single value or a "
"Mirrored with a single value" % x)
else:
return x
return nest.map_structure(_select_fn, structured)
def _update(self, var, fn, args, kwargs, group):
if isinstance(var, values.AggregatingVariable):
var = var.get()
if not isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(
"You can not update `var` %r. It must be a Variable." % var)
with ops.colocate_with(var), distribute_lib.UpdateContext(var.device):
result = fn(var, *self._select_single_value(args),
**self._select_single_value(kwargs))
if group:
return result
else:
return nest.map_structure(self._unwrap, result)
# TODO(yuefengz): does it need to call _select_single_value?
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
with ops.device(
colocate_with.device), distribute_lib.UpdateContext(colocate_with):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._unwrap, result)
def _unwrap(self, val):
if isinstance(val, values.DistributedValues):
# Return in a deterministic order.
if set(val.devices) == self._canonical_compute_device_set:
return tuple(val.get(device=d) for d in self._compute_devices)
return tuple(val.get(device=d) for d in sorted(val.devices))
return (val,)
def value_container(self, val):
if (hasattr(val, "_aggregating_container") and
not isinstance(val, values.AggregatingVariable)):
wrapper = val._aggregating_container() # pylint: disable=protected-access
if wrapper is not None:
return wrapper
return val
def read_var(self, var):
# No need to distinguish between normal variables and replica-local
# variables.
return array_ops.identity(var)
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
"""Configures the strategy class.
The strategy object will be re-initialized if `cluster_spec` is given but
was not passed in the constructor.
Args:
session_config: not used currently.
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
task_type: the current task type.
task_id: the current task id.
Raises:
ValueError: if `cluster_spec` is given but `task_type` or `task_id` is
not.
"""
if not self._cluster_spec and cluster_spec:
# If a `cluster_spec` is already passed in, do nothing here.
# TODO(yuefengz): check `cluster_spec` is the same if this object has
# already been initialized with a `cluster_spec`.
if task_type is None or task_id is None:
raise ValueError("When `cluster_spec` is given, must also specify "
"`task_type` and `task_id`.")
self._cluster_spec = multi_worker_util.normalize_cluster_spec(
cluster_spec)
self._task_type = task_type
self._task_id = task_id
self._initialize_multi_worker(self._num_gpus_per_worker,
self._cluster_spec, task_type, task_id)
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
if not self._cluster_spec:
updated_config.isolate_session_state = True
return updated_config
updated_config.isolate_session_state = False
assert self._task_type
assert self._task_id is not None
# The device filters prevent communication between workers.
if self._task_type not in ["chief", "worker"]:
return updated_config
del updated_config.device_filters[:]
updated_config.device_filters.extend(
["/job:%s/task:%d" % (self._task_type, self._task_id), "/job:ps"])
return updated_config
@property
def _num_replicas_in_sync(self):
return len(self._compute_devices)
@property
def worker_devices(self):
return self._compute_devices
@property
def parameter_devices(self):
return self._parameter_devices
def non_slot_devices(self, var_list):
return min(var_list, key=lambda x: x.name)
@property
def experimental_between_graph(self):
# TODO(yuefengz): Should this return False in the local case?
return True
@property
def experimental_should_init(self):
return self._is_chief
@property
def should_checkpoint(self):
return self._is_chief
@property
def should_save_summary(self):
return self._is_chief
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
return False
| asimshankar/tensorflow | tensorflow/contrib/distribute/python/parameter_server_strategy.py | Python | apache-2.0 | 21,809 | 0.006098 |
from setuptools import setup
from sample import __version__, __author__
setup(name='sample',
version=__version__,
description='an example of python module',
url='http://github.com/josanly/python-module-project',
author=__author__,
author_email='josso.adrien@gmail.com',
license='GPL v3.0',
packages=['sample'],
python_requires='>=PYTHON_VERSION',
zip_safe=False)
| josanly/python-module-project | resources/setup.template.py | Python | gpl-3.0 | 419 | 0.002387 |
# -*- coding: utf-8 -*-
#
#
# File to preform some standard tasks on a neuroConstruct project
#
# Author: Padraig Gleeson
#
# This file has been developed as part of the neuroConstruct project
# This work has been funded by the Medical Research Council and the
# Wellcome Trust
#
#
import sys
import time
import subprocess
from java.io import File
from ucl.physiol.neuroconstruct.cell.utils import CellTopologyHelper
from ucl.physiol.neuroconstruct.cell.compartmentalisation import GenesisCompartmentalisation
from ucl.physiol.neuroconstruct.cell.compartmentalisation import OriginalCompartmentalisation
from ucl.physiol.neuroconstruct.gui.plotter import PlotManager
from ucl.physiol.neuroconstruct.gui.plotter import PlotCanvas
from ucl.physiol.neuroconstruct.dataset import DataSet
from ucl.physiol.neuroconstruct.neuron import NeuronFileManager
from ucl.physiol.neuroconstruct.neuron.NeuronSettings import DataSaveFormat
from ucl.physiol.neuroconstruct.nmodleditor.processes import ProcessManager
from ucl.physiol.neuroconstruct.neuroml import NeuroMLConstants
from ucl.physiol.neuroconstruct.neuroml import LemsConstants
from ucl.physiol.neuroconstruct.project import SimPlot
from ucl.physiol.neuroconstruct.project import ProjectManager
from ucl.physiol.neuroconstruct.simulation import SimulationData
from ucl.physiol.neuroconstruct.simulation import SpikeAnalyser
from ucl.physiol.neuroconstruct.utils.units import UnitConverter
from ucl.physiol.neuroconstruct.utils import NumberGenerator
from ucl.physiol.neuroconstruct.hpc.mpi import MpiSettings
from ucl.physiol.neuroconstruct.pynn.PynnFileManager import PynnSimulator
from ucl.physiol.neuroconstruct.neuroml import NeuroMLFileManager
def loadMepFile(mepFile, scale=1):
# Load an OMV mep file, see https://github.com/OpenSourceBrain/osb-model-validation
spike_times = {}
mep_file = open(mepFile)
exp_name = ""
for line in mep_file:
line = line.strip()
if line.startswith('system:'):
pass
elif line.startswith('expected:'):
pass
elif line.startswith('spike times: ['):
times = line[14:-1].split(',')
tt = []
for time in times:
tt.append(float(time.strip())*scale)
spike_times[exp_name] = tt
else:
exp_name = line[:-1]
return spike_times
def generateNeuroML2(projFile,
simConfigs,
neuroConstructSeed = 1234,
seed = 1234,
verbose = True):
projectManager = ProjectManager()
project = projectManager.loadProject(projFile)
nmlfm = NeuroMLFileManager(project)
genDir = File(projFile.getParentFile(), "generatedNeuroML2")
if verbose: print("Generating NeuroML 2 files for project %s, sim configs: %s, into %s"%(project.getProjectName(), str(simConfigs), genDir.getAbsolutePath()))
for simConfigName in simConfigs:
projectManager.doGenerate(simConfigName, neuroConstructSeed)
while projectManager.isGenerating():
if verbose: print("Waiting for the project to be generated with Simulation Configuration: "+simConfigName)
time.sleep(5)
simConfig = project.simConfigInfo.getSimConfig(simConfigName)
nmlfm.generateNeuroMLFiles(simConfig,
NeuroMLConstants.NeuroMLVersion.getLatestVersion(),
LemsConstants.LemsOption.LEMS_WITHOUT_EXECUTE_MODEL,
OriginalCompartmentalisation(),
seed,
False,
True,
genDir,
"GENESIS Physiological Units",
False)
info = "These files are not the source files for the model, they have been generated from the source of the model in the neuroConstruct directory.\n"+ \
"These have been added to provide examples of valid NeuroML files for testing applications & the OSB website and may be removed at any time."
readme = open(genDir.getAbsolutePath()+'/README--GENERATED-FILES', 'w')
readme.write(info)
readme.close()
def generateNeuroML1(projFile,
simConfigs,
neuroConstructSeed = 1234,
seed = 1234,
verbose = True):
projectManager = ProjectManager()
project = projectManager.loadProject(projFile)
nmlfm = NeuroMLFileManager(project)
genDir = File(projFile.getParentFile(), "generatedNeuroML")
if verbose: print("Generating NeuroML v1.x files for project %s, sim configs: %s, into %s"%(project.getProjectName(), str(simConfigs), genDir.getAbsolutePath()))
for simConfigName in simConfigs:
projectManager.doGenerate(simConfigName, neuroConstructSeed)
while projectManager.isGenerating():
if verbose: print("Waiting for the project to be generated with Simulation Configuration: "+simConfigName)
time.sleep(5)
simConfig = project.simConfigInfo.getSimConfig(simConfigName)
nmlfm.generateNeuroMLFiles(simConfig,
NeuroMLConstants.NeuroMLVersion.NEUROML_VERSION_1,
LemsConstants.LemsOption.LEMS_WITHOUT_EXECUTE_MODEL,
OriginalCompartmentalisation(),
seed,
False,
True,
genDir,
"GENESIS Physiological Units",
False)
info = "These files are not the source files for the model, they have been generated from the source of the model in the neuroConstruct directory.\n"+ \
"These have been added to provide examples of valid NeuroML files for testing applications & the OSB website and may be removed at any time."
readme = open(genDir.getAbsolutePath()+'/README--GENERATED-FILES', 'w')
readme.write(info)
readme.close()
def getUnusedSimRef(project, simRefPrefix="P_Sim_"):
index = 0
while File( "%s/simulations/%s%i"%(project.getProjectMainDirectory().getCanonicalPath(), simRefPrefix,index)).exists():
index = index+1
simRef = "%s%i"%(simRefPrefix,index)
return simRef
def generateAndRunGenesis(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=True,
quitAfterRun=False,
runInBackground=False,
units=-1,
symmetricComps=None):
prefix = "--- GENESIS gen: "
if verbose: print prefix+"Going to generate GENESIS files for: "+simRef
if runInBackground:
project.genesisSettings.setNoConsole()
if units == UnitConverter.GENESIS_SI_UNITS or units == UnitConverter.GENESIS_PHYSIOLOGICAL_UNITS:
project.genesisSettings.setUnitSystemToUse(units) # else leave it as the units set in the proj
project.genesisSettings.setMooseCompatMode(False)
if symmetricComps is not None:
project.genesisSettings.setSymmetricCompartments(symmetricComps)
project.genesisFileManager.setQuitAfterRun(quitAfterRun)
compartmentalisation = GenesisCompartmentalisation()
project.genesisFileManager.generateTheGenesisFiles(simConfig,
None,
compartmentalisation,
simulatorSeed)
success = projectManager.doRunGenesis(simConfig)
if success:
print prefix+"Set running GENESIS simulation: "+simRef
else:
print prefix+"Problem running GENESIS simulation: "+simRef
return success
def generateAndRunMoose(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=True,
quitAfterRun=False,
runInBackground=False,
units=-1):
prefix = "--- MOOSE gen: "
if verbose: print prefix+"Going to generate MOOSE files for: "+simRef
if runInBackground:
project.genesisSettings.setNoConsole()
project.genesisFileManager.setQuitAfterRun(quitAfterRun)
if units == UnitConverter.GENESIS_SI_UNITS or units == UnitConverter.GENESIS_PHYSIOLOGICAL_UNITS:
project.genesisSettings.setUnitSystemToUse(units) # else leave it as the units set in the proj
project.genesisSettings.setMooseCompatMode(True)
compartmentalisation = GenesisCompartmentalisation()
project.genesisFileManager.generateTheGenesisFiles(simConfig,
None,
compartmentalisation,
simulatorSeed)
success = projectManager.doRunGenesis(simConfig)
if success:
print prefix+"Set running MOOSE simulation: "+simRef
else:
print prefix+"Problem running MOOSE simulation: "+simRef
return success
def generateAndRunPsics(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=True,
runInBackground=False):
prefix = "--- PSICS gen: "
if verbose: print prefix+"Going to generate PSICS files for: "+simRef
project.psicsFileManager.generateThePsicsFiles(simConfig,
simulatorSeed)
success = projectManager.doRunPsics(simConfig, (not runInBackground))
if success:
print prefix+"Set running PSICS simulation: "+simRef
else:
print prefix+"Problem running PSICS simulation: "+simRef
return success
def generateAndRunLems(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=True,
runInBackground=False,
version=NeuroMLConstants.NeuroMLVersion.getLatestVersion()):
prefix = "--- LEMS/NeuroML 2 gen: "
if verbose: print prefix+"Going to generate LEMS/NeuroML 2 files for: "+simRef
compartmentalisation = OriginalCompartmentalisation()
project.neuromlFileManager.generateNeuroMLFiles(simConfig,
version,
LemsConstants.LemsOption.EXECUTE_MODEL,
compartmentalisation,
simulatorSeed,
False,
False,
runInBackground)
return 1 # Call above will throw error if it fails
def generateAndRunPyNN(pynnSim,
project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=True,
runInBackground=False):
prefix = "--- PyNN_"+pynnSim+" gen: "
if verbose: print prefix+"Going to generate PyNN_"+pynnSim+" files for: "+simRef
pynnSimulator = None
if "NEST" in pynnSim:
pynnSimulator = PynnSimulator.NEST
elif "NEURON" in pynnSim:
pynnSimulator = PynnSimulator.NEURON
elif "BRIAN" in pynnSim:
pynnSimulator = PynnSimulator.BRIAN
else:
print pynnSim
#if verbose: print prefix+"Going to generate PyNN_"+str(pynnSimulator)+" files for: "+simRef
project.pynnFileManager.generateThePynnFiles(simConfig,
pynnSimulator,
simulatorSeed)
project.pynnFileManager.runFile(True)
return 1
def generateNeuron(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose= True,
quitAfterRun= False,
runInBackground= False,
varTimestep= False,
varTimestepTolerance= None,
saveAsHdf5 = False,
runMode = NeuronFileManager.RUN_HOC):
prefix = "--- NEURON gen: "
if verbose: print prefix+"Going to generate NEURON files for simulation: "+simRef
project.neuronFileManager.setQuitAfterRun(quitAfterRun)
if runInBackground:
project.neuronSettings.setNoConsole()
if saveAsHdf5:
project.neuronSettings.setDataSaveFormat(DataSaveFormat.HDF5_NC)
else:
project.neuronSettings.setDataSaveFormat(DataSaveFormat.TEXT_NC)
project.neuronSettings.setVarTimeStep(varTimestep)
if varTimestepTolerance is not None:
project.neuronSettings.setVarTimeAbsTolerance(varTimestepTolerance)
project.neuronFileManager.generateTheNeuronFiles(simConfig,
None,
runMode,
simulatorSeed)
if verbose: print prefix+"Generated hoc files for simulation: "+simRef
compileProcManager = ProcessManager(project.neuronFileManager.getMainHocFile())
compileSuccess = compileProcManager.compileFileWithNeuron(0,0)
if verbose: print prefix+"Compiled NEURON files for: "+simRef
return compileSuccess
def generateAndRunNeuron(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose= True,
quitAfterRun= False,
runInBackground= False,
varTimestep= False,
varTimestepTolerance= None,
saveAsHdf5 = False,
runMode = NeuronFileManager.RUN_HOC):
### Set simulation running
prefix = "--- NEURON gen: "
compileSuccess = generateNeuron(project, projectManager, simConfig, simRef,
simulatorSeed, verbose=verbose,
quitAfterRun=quitAfterRun,
runInBackground=runInBackground,
varTimestep=varTimestep,
varTimestepTolerance=varTimestepTolerance,
saveAsHdf5=saveAsHdf5,runMode=runMode)
if compileSuccess:
success = projectManager.doRunNeuron(simConfig)
if success:
print prefix+"Set running NEURON simulation: "+simRef
else:
print prefix+"Problem running NEURON simulation: "+simRef
return success
else:
return False
class SimulationManager():
knownSimulators = ["NEURON", "GENESIS", "GENESIS_SI", "GENESIS_PHYS", "MOOSE", "MOOSE_PHYS", "MOOSE_SI", "PSICS", "LEMS", "LEMSalpha", "PYNN_NEST", "PYNN_NEURON", "PYNN_BRIAN"]
plotFrames = {}
dataSets = {}
def __init__(self,
projFile,
numConcurrentSims = 1,
verbose = True):
self.allRunningSims = []
self.allRecentlyFinishedSims = []
self.allFinishedSims = []
self.projectManager = ProjectManager()
self.project = self.projectManager.loadProject(projFile)
self.numConcurrentSims = numConcurrentSims
self.verbose = verbose
self.printver("Starting Simulation Manager for project: "+self.project.getProjectFullFileName(), True)
self.printver("This will run up to %i simulations concurrently"%numConcurrentSims)
def printver(self, message, forcePrint=False):
if self.verbose or forcePrint:
print "--- SimMgr: "+ str(message)
def updateSimsRunning(self):
self.updateSimsRunningR(True)
def updateSimsRunningR(self, checkRemote):
remoteChecked = False
for sim in self.allRunningSims:
completed = False
timeFile = File(self.project.getProjectMainDirectory(), "simulations/"+sim+"/time.dat")
timeFile2 = File(self.project.getProjectMainDirectory(), "simulations/"+sim+"/time.txt") # for PSICS...
self.printver("Checking file: "+timeFile.getCanonicalPath() +", exists: "+ str(timeFile.exists()))
if (timeFile.exists()):
self.allFinishedSims.append(sim)
self.allRecentlyFinishedSims.append(sim)
self.allRunningSims.remove(sim)
completed = True
else:
self.printver("Checking file: "+timeFile2.getCanonicalPath() +", exists: "+ str(timeFile2.exists()))
if (timeFile2.exists()):
self.allFinishedSims.append(sim)
self.allRecentlyFinishedSims.append(sim)
self.allRunningSims.remove(sim)
completed = True
if checkRemote and not completed:
pullFile = File(self.project.getProjectMainDirectory(), "simulations/"+sim+"/pullsim.sh")
checkingRemoteFile = File(self.project.getProjectMainDirectory(), "simulations/"+sim+"/checkingRemote")
if pullFile.exists() and not checkingRemoteFile.exists():
pullCmd = ''+pullFile.getAbsolutePath()
self.printver("Going to run: "+pullCmd)
subprocess.call(pullCmd,shell=True)
remoteChecked = True
if remoteChecked:
self.printver("Waiting while remote simulations are checked...")
time.sleep(5)
self.updateSimsRunningR(False)
else:
self.printver("allRecentlyFinishedSims: "+str(self.allRecentlyFinishedSims))
self.printver("allFinishedSims: "+str(self.allFinishedSims))
self.printver("allRunningSims: "+str(self.allRunningSims))
def doCheckNumberSims(self):
self.printver("%i simulations out of max %s currently running: %s"%(len(self.allRunningSims), self.numConcurrentSims, str(self.allRunningSims)))
while (len(self.allRunningSims)>=self.numConcurrentSims):
self.printver("Waiting for another simulation slot to become available...")
time.sleep(4) # wait a while...
self.updateSimsRunning()
def reloadSims(self,
waitForAllSimsToFinish = True,
plotSims = True,
analyseSims = True,
plotVoltageOnly = False):
self.printver("Trying to reload simulations: "+str(self.allFinishedSims))
plottedSims = []
for simRef in self.allRecentlyFinishedSims:
simDir = File(self.project.getProjectMainDirectory(), "/simulations/"+simRef)
timeFile = File(simDir, "time.dat")
timeFile2 = File(simDir,"time.txt") # for PSICS...
if timeFile.exists() or timeFile2.exists():
self.printver("--- Reloading data from simulation in directory: %s"%simDir.getCanonicalPath())
time.sleep(1) # wait a while...
try:
simData = SimulationData(simDir)
simData.initialise()
times = simData.getAllTimes()
if plotSims:
simConfigName = simData.getSimulationProperties().getProperty("Sim Config")
if simConfigName.find('(')>=0:
simConfigName = simConfigName[0:simConfigName.find('(')]
for dataStore in simData.getAllLoadedDataStores():
ds = simData.getDataSet(dataStore.getCellSegRef(), dataStore.getVariable(), False)
#self.printver("Found data store: "+str(dataStore)+", plotting volts only: "+str(plotVoltageOnly))
if not plotVoltageOnly or dataStore.getVariable() == SimPlot.VOLTAGE:
plotFrame = PlotManager.getPlotterFrame("Behaviour of "+dataStore.getVariable() \
+" for sim config: %s"%(simConfigName))
plotFrame.addDataSet(ds)
if analyseSims:
volts = ds.getYValues()
analyseStartTime = 0
analyseStopTime = times[-1]
analyseThreshold = 0 # mV
spikeTimes = SpikeAnalyser.getSpikeTimes(volts, times, analyseThreshold, analyseStartTime, analyseStopTime)
self.printver("Spike times in %s for sim %s: %s"%(dataStore.getCellSegRef(), simRef, str(spikeTimes)), True)
plottedSims.append(simRef)
except:
self.printver("Error analysing simulation data from: %s"%simDir.getCanonicalPath(), True)
self.printver(sys.exc_info(), True)
for simRef in plottedSims:
self.allRecentlyFinishedSims.remove(simRef)
if waitForAllSimsToFinish and len(self.allRunningSims)>0:
self.printver("Waiting for sims: %s to finish..."%str(self.allRunningSims))
time.sleep(2) # wait a while...
self.updateSimsRunning()
self.reloadSims(waitForAllSimsToFinish,
plotSims,
analyseSims,
plotVoltageOnly)
def checkSims(self,
spikeTimesToCheck = {},
spikeTimeAccuracy = 0.01,
threshold = 0 ): # mV
self.updateSimsRunning()
self.printver( "Trying to check simulations: %s against: %s, with a threshold: %s" % (str(self.allFinishedSims), str(spikeTimesToCheck), str(threshold)))
report = ""
numPassed = 0
numFailed = 0
checksUnused = spikeTimesToCheck.keys()
for simRef in self.allFinishedSims:
simDir = File(self.project.getProjectMainDirectory(), "/simulations/"+simRef)
try:
simData = SimulationData(simDir)
simData.initialise()
times = simData.getAllTimes()
simConfigName = simData.getSimulationProperties().getProperty("Sim Config")
if simConfigName.find('(')>=0:
simConfigName = simConfigName[0:simConfigName.find('(')]
for dataStore in simData.getAllLoadedDataStores():
self.printver("Checking dataStore: "+str(dataStore)+" ("+dataStore.getCellSegRef()+")")
ds = simData.getDataSet(dataStore.getCellSegRef(), dataStore.getVariable(), False)
if dataStore.getVariable() == SimPlot.VOLTAGE:
if spikeTimesToCheck is not None:
volts = ds.getYValues()
analyseStartTime = 0
analyseStopTime = times[-1]
threshToUse = threshold
if type(threshold) is dict:
threshToUse = float(threshold[dataStore.getCellSegRef()])
spikeTimes = SpikeAnalyser.getSpikeTimes(volts, times, threshToUse, analyseStartTime, analyseStopTime)
self.printver("Spike times (crossing %f) from %f to %f in %s for sim %s: %s"%(threshToUse, analyseStartTime, analyseStopTime, dataStore.getCellSegRef(), simRef, str(spikeTimes)))
if spikeTimesToCheck.has_key(dataStore.getCellSegRef()):
self.printver("Removing %s from %s"%(str(dataStore.getCellSegRef()), str(checksUnused)))
if dataStore.getCellSegRef() in checksUnused:
checksUnused.remove(dataStore.getCellSegRef())
fail = False
spikeTimesTarget = spikeTimesToCheck[dataStore.getCellSegRef()]
if len(spikeTimes) != len(spikeTimesTarget):
report = report + "ERROR: Number of spikes of %s (%i) not same as target list for %s (%i)!\n"% \
(dataStore.getCellSegRef(), len(spikeTimes), simRef, len(spikeTimesTarget))
fail = True
for spikeNum in range(0, min(len(spikeTimesTarget),len(spikeTimes))):
delta = spikeTimesTarget[spikeNum] - spikeTimes[spikeNum]
if float(abs(delta)) > float(spikeTimeAccuracy):
report = report + "ERROR: Spike time: %f not within %f of %f (delta = %f) for %s in %s!\n" % \
(spikeTimes[spikeNum], spikeTimeAccuracy, spikeTimesTarget[spikeNum], delta, dataStore.getCellSegRef(), simRef)
fail = True
if fail:
numFailed=numFailed+1
else:
numPassed=numPassed+1
except:
self.printver("Error analysing simulation data from: %s"%simDir.getCanonicalPath())
raise
self.printver(sys.exc_info())
numFailed=numFailed+1
ignored = "" if len(checksUnused) == 0 else ", %i test conditions ignored"%(len(checksUnused))
report = report+"\n %i tests passed, %i tests failed%s!\n"%(numPassed, numFailed, ignored)
return report
def runMultipleSims(self,
simConfigs = ["Default Simulation Configuration"],
maxElecLens = [-1],
simDt = None,
simDtOverride = None,
simDuration = None,
neuroConstructSeed = 12345,
simulatorSeed = 11111,
simulators = ["NEURON", "GENESIS_PHYS"],
runSims = True,
generateSims = True,
verboseSims = True,
runInBackground = False,
varTimestepNeuron = None,
varTimestepTolerance = None,
simRefGlobalSuffix = '',
simRefGlobalPrefix = '',
mpiConfig = MpiSettings.LOCAL_SERIAL,
mpiConfigs = [],
suggestedRemoteRunTime = -1,
saveAsHdf5 = False,
saveOnlySpikes = False,
saveAllContinuous = False,
runMode = NeuronFileManager.RUN_HOC):
for sim in simulators:
if sim not in self.knownSimulators:
print "Unknown simulator: "+sim+"!"
print "Known simulators: "+str(self.knownSimulators)
sys.exit(1)
allSimsSetRunning = []
for simConfigName in simConfigs:
simConfig = self.project.simConfigInfo.getSimConfig(simConfigName)
self.printver("Going to generate network for Simulation Configuration: "+str(simConfig))
if saveOnlySpikes:
for simPlotName in simConfig.getPlots():
simPlot = self.project.simPlotInfo.getSimPlot(simPlotName)
if simPlot.getValuePlotted() == SimPlot.VOLTAGE:
simPlot.setValuePlotted(SimPlot.SPIKE)
if saveAllContinuous:
for simPlotName in simConfig.getPlots():
simPlot = self.project.simPlotInfo.getSimPlot(simPlotName)
#print simPlot
if SimPlot.SPIKE in simPlot.getValuePlotted():
simPlot.setValuePlotted(SimPlot.VOLTAGE)
#print simPlot
if len(mpiConfigs) == 0:
mpiConfigs = [mpiConfig]
for mpiConfigToUse in mpiConfigs:
mpiSettings = MpiSettings()
simConfig.setMpiConf(mpiSettings.getMpiConfiguration(mpiConfigToUse))
self.printver("Using Parallel Configuration: "+ str(simConfig.getMpiConf()))
if suggestedRemoteRunTime > 0:
self.project.neuronFileManager.setSuggestedRemoteRunTime(suggestedRemoteRunTime)
self.project.genesisFileManager.setSuggestedRemoteRunTime(suggestedRemoteRunTime)
for maxElecLen in maxElecLens:
if simDt is not None:
self.project.simulationParameters.setDt(simDt)
else:
simDt = self.project.simulationParameters.getDt() # for later if simDtOverride used...
if simDuration is not None:
simConfig.setSimDuration(simDuration)
recompSuffix = ""
if maxElecLen > 0:
cellGroup = simConfig.getCellGroups().get(0)
cell = self.project.cellManager.getCell(self.project.cellGroupsInfo.getCellType(cellGroup))
self.printver("Recompartmentalising cell in: "+cellGroup+" which is: "+str(cell))
info = CellTopologyHelper.recompartmentaliseCell(cell, maxElecLen, self.project)
self.printver("*** Recompartmentalised cell: "+info)
if len(maxElecLens) > 1 or maxElecLen > 0 : recompSuffix = "_"+str(maxElecLen)
self.projectManager.doGenerate(simConfig.getName(), neuroConstructSeed)
while self.projectManager.isGenerating():
self.printver("Waiting for the project to be generated with Simulation Configuration: "+str(simConfig))
time.sleep(15)
self.printver("Generated network with %i cell(s)" % self.project.generatedCellPositions.getNumberInAllCellGroups())
simRefPrefix = (simConfigName+"_").replace(' ', '').replace(':', '')
if len(mpiConfigs) > 1:
simRefPrefix = simRefPrefix+(mpiConfigToUse+"_").replace(' ', '').replace('(', '_').replace(')', '_')
self.doCheckNumberSims()
self.printver("Going to generate for simulators: "+str(simulators))
if simulators.count("NEURON")>0:
if simDtOverride is not None:
if simDtOverride.has_key("NEURON"):
self.project.simulationParameters.setDt(simDtOverride["NEURON"])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_N"+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if varTimestepNeuron is None:
varTimestepNeuron = self.project.neuronSettings.isVarTimeStep()
if varTimestepTolerance is None:
varTimestepTolerance = self.project.neuronSettings.getVarTimeAbsTolerance()
if generateSims or runSims:
func = generateAndRunNeuron if runSims else generateNeuron
print("Using function %s" % str(func))
success = func(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose= verboseSims,
runInBackground= runInBackground,
varTimestep= varTimestepNeuron,
varTimestepTolerance= varTimestepTolerance,
saveAsHdf5 = saveAsHdf5,
runMode = runMode)
if success and runSims:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
self.doCheckNumberSims()
if simulators.count("PSICS")>0:
if simDtOverride is not None:
if simDtOverride.has_key("PSICS"):
self.project.simulationParameters.setDt(simDtOverride["PSICS"])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_P"+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunPsics(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
runInBackground=runInBackground)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
self.doCheckNumberSims()
if simulators.count("LEMSalpha")>0:
if simDtOverride is not None:
if simDtOverride.has_key("LEMS"):
self.project.simulationParameters.setDt(simDtOverride["LEMS"])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_L"+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunLems(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
runInBackground=runInBackground,
version=NeuroMLConstants.NeuroMLVersion.NEUROML_VERSION_2_ALPHA)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
if simulators.count("LEMS")>0:
if simDtOverride is not None:
if simDtOverride.has_key("LEMS"):
self.project.simulationParameters.setDt(simDtOverride["LEMS"])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_L"+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunLems(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
runInBackground=runInBackground,
version=NeuroMLConstants.NeuroMLVersion.getLatestVersion())
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
self.doCheckNumberSims()
for sim in simulators:
if "PYNN_" in sim:
if simDtOverride is not None:
if simDtOverride.has_key(sim):
self.project.simulationParameters.setDt(simDtOverride[sim])
else:
self.project.simulationParameters.setDt(simDt)
pynnSim = sim[5:]
simRef = simRefGlobalPrefix + simRefPrefix+"_Py_"+pynnSim+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunPyNN(pynnSim,
self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
runInBackground=runInBackground)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
self.printver("Waiting a while before running next sim...")
time.sleep(2) # wait a while before running PyNN...
self.doCheckNumberSims()
for sim in simulators:
if "MOOSE" in sim:
if simDtOverride is not None:
if simDtOverride.has_key(sim):
self.project.simulationParameters.setDt(simDtOverride[sim])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_M"+recompSuffix + simRefGlobalSuffix
units = -1 # leave as what's set in project
if "_SI" in sim:
simRef = simRef+"_SI"
units = UnitConverter.GENESIS_SI_UNITS
if "_PHYS" in sim:
simRef = simRef+"_PHYS"
units = UnitConverter.GENESIS_PHYSIOLOGICAL_UNITS
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunMoose(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
quitAfterRun=runInBackground,
runInBackground=runInBackground,
units=units)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
time.sleep(1) # wait a while before running GENESIS...
self.doCheckNumberSims()
for sim in simulators:
if "GENESIS" in sim:
if simDtOverride is not None:
if simDtOverride.has_key(sim):
self.project.simulationParameters.setDt(simDtOverride[sim])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_G"+recompSuffix + simRefGlobalSuffix
units = -1 # leave as what's set in project
if "_SI" in sim:
simRef = simRef+"_SI"
units = UnitConverter.GENESIS_SI_UNITS
if "_PHYS" in sim:
simRef = simRef+"_PHYS"
units = UnitConverter.GENESIS_PHYSIOLOGICAL_UNITS
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunGenesis(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
quitAfterRun=runInBackground,
runInBackground=runInBackground,
units=units,
symmetricComps=None)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
time.sleep(1) # wait a while before running GENESISsym...
self.doCheckNumberSims()
if simulators.count("GENESISsym")>0:
simRef = simRefGlobalPrefix + simRefPrefix+"_Gs"+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunGenesis(self.project,
self.projectManagerm,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
quitAfterRun=runInBackground,
runInBackground=runInBackground,
symmetricComps=True)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
self.updateSimsRunningR(False)
self.printver("Finished setting running all simulations for ParallelConfig: "+mpiConfigToUse)
self.printver("Finished setting running all simulations for sim config: "+simConfigName)
return allSimsSetRunning
def generateFICurve(self,
simulator,
simConfigName,
stimAmpLow,
stimAmpInc,
stimAmpHigh,
stimDel,
stimDur,
simDuration,
analyseStartTime,
analyseStopTime,
analyseThreshold,
simDt = None,
simPrefix = 'FI_',
neuroConstructSeed = 1234,
plotAllTraces = False,
verboseSims = True,
varTimestepNeuron = None,
mpiConfig = MpiSettings.LOCAL_SERIAL,
suggestedRemoteRunTime = -1):
simConfig = self.project.simConfigInfo.getSimConfig(simConfigName)
self.printver("Going to generate F-I curve on %s for sim config: %s with amplitude of stim: (%f -> %f ; %f)" % (simulator, simConfigName, stimAmpLow, stimAmpHigh, stimAmpInc))
if simConfig == None:
raise NameError('No such Simulation configuration as: '+ simConfigName+'. \nExisting sim configs: '+str(self.project.simConfigInfo.getAllSimConfigNames()))
simConfig.setSimDuration(simDuration)
self.projectManager.doGenerate(simConfig.getName(), neuroConstructSeed)
while self.projectManager.isGenerating():
self.printver("Waiting for the project to be generated with Simulation Configuration: "+str(simConfig))
time.sleep(1)
numGenerated = self.project.generatedCellPositions.getNumberInAllCellGroups()
self.printver("Number of cells generated: " + str(numGenerated))
if numGenerated > 0:
self.printver("Generating scripts for simulator: %s..."%simulator)
if simulator == 'NEURON':
self.project.neuronFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
self.project.neuronSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
self.project.neuronSettings.setGraphicsMode(0) # 0 hides graphs during execution
if simulator.count('GENESIS')>0 or simulator.count('MOOSE')>0:
self.project.genesisFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
self.project.genesisSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
self.project.genesisSettings.setGraphicsMode(0) # 0 hides graphs during execution
stimAmp = stimAmpLow
simRefsVsStims = {}
while (stimAmp - stimAmpHigh) < (stimAmpInc/1e9): # to avoid floating point errors
######## Adjusting the amplitude of the current clamp ###############
stim = self.project.elecInputInfo.getStim(simConfig.getInputs().get(0))
if stim.getElectricalInput().getType() != "IClamp":
raise Exception('Simulation config: '+ simConfigName+' has a non IClamp input: '+str(stim)+'!')
if simConfig.getInputs()>1:
for stimIndex in range(1, simConfig.getInputs().size()):
stimOther = self.project.elecInputInfo.getStim(simConfig.getInputs().get(stimIndex))
if stimOther.getElectricalInput().getType() != "IClamp":
raise Exception('Simulation config: '+ simConfigName+' has a non IClamp input: '+str(stimOther)+'!')
else:
stimOther.setAmp(NumberGenerator(0))
stimOther.setDel(NumberGenerator(0))
stimOther.setDur(NumberGenerator(0))
stim.setAmp(NumberGenerator(stimAmp))
stim.setDel(NumberGenerator(stimDel))
stim.setDur(NumberGenerator(stimDur))
self.project.elecInputInfo.updateStim(stim)
self.printver("Next stim: "+ str(stim))
simRefs = self.runMultipleSims(simConfigs = [simConfig.getName()],
simulators = [simulator],
simDt = simDt,
verboseSims = verboseSims,
runInBackground = True,
simRefGlobalPrefix = simPrefix,
simRefGlobalSuffix = ("_"+str(float(stimAmp))),
varTimestepNeuron = varTimestepNeuron,
mpiConfig = mpiConfig,
suggestedRemoteRunTime = suggestedRemoteRunTime)
simRefsVsStims[simRefs[0]] = stimAmp # should be just one simRef returned...
stimAmp = stimAmp + stimAmpInc
if abs(stimAmp) < stimAmpInc/1e9: stimAmp = 0
while (len(self.allRunningSims)>0):
self.printver("Waiting for all simulations to finish...")
time.sleep(1) # wait a while...
self.updateSimsRunning()
self.printver("Going to plot traces from recorded sims: %s"%str(simRefsVsStims))
plotFrameFI = PlotManager.getPlotterFrame("F-I curve from project: "+str(self.project.getProjectFile())+" on "+simulator , 0, 1)
plotFrameVolts = PlotManager.getPlotterFrame("Voltage traces from project: "+str(self.project.getProjectFile())+" on "+simulator , 0, plotAllTraces)
plotFrameFI.setViewMode(PlotCanvas.INCLUDE_ORIGIN_VIEW)
info = "F-I curve for Simulation Configuration: "+str(simConfig)
dataSet = DataSet(info, info, "nA", "Hz", "Current injected", "Firing frequency")
dataSet.setGraphFormat(PlotCanvas.USE_CIRCLES_FOR_PLOT)
simList = simRefsVsStims.keys()
simList.sort()
for sim in simList:
simDir = File(self.project.getProjectMainDirectory(), "/simulations/"+sim)
self.printver("--- Reloading data from simulation in directory: %s"%simDir.getCanonicalPath())
try:
simData = SimulationData(simDir)
simData.initialise()
self.printver("Data loaded: ")
self.printver(simData.getAllLoadedDataStores())
times = simData.getAllTimes()
cellSegmentRef = simConfig.getCellGroups().get(0)+"_0"
volts = simData.getVoltageAtAllTimes(cellSegmentRef)
traceInfo = "Voltage at: %s in simulation: %s"%(cellSegmentRef, sim)
dataSetV = DataSet(traceInfo, traceInfo, "mV", "ms", "Membrane potential", "Time")
for i in range(len(times)):
dataSetV.addPoint(times[i], volts[i])
if plotAllTraces:
plotFrameVolts.addDataSet(dataSetV)
spikeTimes = SpikeAnalyser.getSpikeTimes(volts, times, analyseThreshold, analyseStartTime, analyseStopTime)
stimAmp = simRefsVsStims[sim]
self.printver("Number of spikes at %f nA in sim %s: %i"%(stimAmp, sim, len(spikeTimes)))
avgFreq = 0
if len(spikeTimes)>1:
avgFreq = len(spikeTimes)/ ((analyseStopTime - analyseStartTime)/1000.0)
dataSet.addPoint(stimAmp,avgFreq)
else:
dataSet.addPoint(stimAmp,0)
except:
self.printver("Error analysing simulation data from: %s"%simDir.getCanonicalPath())
self.printver(sys.exc_info()[0])
plotFrameFI.addDataSet(dataSet)
def generateBatchCurve(self,
simulator,
simConfigName,
stimAmpLow,
stimAmpInc,
stimAmpHigh,
stimDel,
stimDur,
simDuration,
analyseStartTime,
analyseStopTime,
analyseThreshold,
simDt = None,
simPrefix = 'FI_',
neuroConstructSeed = 1234,
plotAllTraces = False,
verboseSims = True,
varTimestepNeuron = None,
mpiConfig = MpiSettings.LOCAL_SERIAL,
suggestedRemoteRunTime = -1,
curveType = 'F-I'):
simConfig = self.project.simConfigInfo.getSimConfig(simConfigName)
self.printver("Going to generate %s curve on %s for sim config: %s with amplitude of stim: (%f -> %f ; %f)" % (curveType, simulator, simConfigName, stimAmpLow, stimAmpHigh, stimAmpInc))
# can generate differetn categories of simulationType F-I also SS-I
if simConfig == None:
raise NameError('No such Simulation configuration as: '+ simConfigName+'. \nExisting sim configs: '+str(self.project.simConfigInfo.getAllSimConfigNames()))
simConfig.setSimDuration(simDuration)
self.projectManager.doGenerate(simConfig.getName(), neuroConstructSeed)
while self.projectManager.isGenerating():
self.printver("Waiting for the project to be generated with Simulation Configuration: "+str(simConfig))
time.sleep(1)
numGenerated = self.project.generatedCellPositions.getNumberInAllCellGroups()
self.printver("Number of cells generated: " + str(numGenerated))
if numGenerated > 0:
self.printver("Generating scripts for simulator: %s..."%simulator)
if simulator == 'NEURON':
self.project.neuronFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
self.project.neuronSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
self.project.neuronSettings.setGraphicsMode(0) # 0 hides graphs during execution
if simulator.count('GENESIS')>0 or simulator.count('MOOSE')>0:
self.project.genesisFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
self.project.genesisSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
self.project.genesisSettings.setGraphicsMode(0) # 0 hides graphs during execution
stimAmp = stimAmpLow
simRefsVsStims = {}
while (stimAmp - stimAmpHigh) < (stimAmpInc/1e9): # to avoid floating point errors
######## Adjusting the amplitude of the current clamp ###############
stim = self.project.elecInputInfo.getStim(simConfig.getInputs().get(0))
if stim.getElectricalInput().getType() != "IClamp":
raise Exception('Simulation config: '+ simConfigName+' has a non IClamp input: '+str(stim)+'!')
if simConfig.getInputs()>1:
for stimIndex in range(1, simConfig.getInputs().size()):
stimOther = self.project.elecInputInfo.getStim(simConfig.getInputs().get(stimIndex))
if stimOther.getElectricalInput().getType() != "IClamp":
raise Exception('Simulation config: '+ simConfigName+' has a non IClamp input: '+str(stimOther)+'!')
else:
stimOther.setAmp(NumberGenerator(0))
stimOther.setDel(NumberGenerator(0))
stimOther.setDur(NumberGenerator(0))
stim.setAmp(NumberGenerator(stimAmp))
stim.setDel(NumberGenerator(stimDel))
stim.setDur(NumberGenerator(stimDur))
self.project.elecInputInfo.updateStim(stim)
self.printver("Next stim: "+ str(stim))
simRefs = self.runMultipleSims(simConfigs = [simConfig.getName()],
simulators = [simulator],
simDt = simDt,
verboseSims = verboseSims,
runInBackground = True,
simRefGlobalPrefix = simPrefix,
simRefGlobalSuffix = ("_"+str(float(stimAmp))),
varTimestepNeuron = varTimestepNeuron,
mpiConfig = mpiConfig,
suggestedRemoteRunTime = suggestedRemoteRunTime)
simRefsVsStims[simRefs[0]] = stimAmp # should be just one simRef returned...
stimAmp = stimAmp + stimAmpInc
if abs(stimAmp) < stimAmpInc/1e9: stimAmp = 0
while (len(self.allRunningSims)>0):
self.printver("Waiting for all simulations to finish...")
time.sleep(1) # wait a while...
self.updateSimsRunning()
self.generatePlotAnalisys(simulator,simConfigName,analyseStartTime,analyseStopTime,analyseThreshold,plotAllTraces,curveType,simRefsVsStims)
def generatePlotAnalisys(self,
simulator,
simConfigName,
analyseStartTime,
analyseStopTime,
analyseThreshold,
plotAllTraces,
curveType,
simRefsVsStims):
simConfig = self.project.simConfigInfo.getSimConfig(simConfigName)
self.printver("Going to plot traces from recorded sims: %s"%str(simRefsVsStims))
self.plotFrames[curveType] = PlotManager.getPlotterFrame(curveType+" curve from project: "+str(self.project.getProjectFile())+" on "+simulator , 0, 1)
self.plotFrames["Volts"] = PlotManager.getPlotterFrame("Voltage traces from project: "+str(self.project.getProjectFile())+" on "+simulator , 0, plotAllTraces)
self.plotFrames[curveType].setViewMode(PlotCanvas.INCLUDE_ORIGIN_VIEW)
info = curveType+" curve for Simulation Configuration: "+str(simConfig)
if (curveType == "F-I") :
self.dataSets[curveType] = DataSet(info, info, "nA", "Hz", "Current injected", "Firing frequency")
elif (curveType == "SS-I") :
self.dataSets[curveType] = DataSet(info, info, "nA", "V", "Current injected", "Steady state Voltage")
self.dataSets[curveType].setGraphFormat(PlotCanvas.USE_CIRCLES_FOR_PLOT)
simList = simRefsVsStims.keys()
simList.sort()
for sim in simList:
simDir = File(self.project.getProjectMainDirectory(), "/simulations/"+sim)
self.printver("--- Reloading data from simulation in directory: %s"%simDir.getCanonicalPath())
try:
simData = SimulationData(simDir)
simData.initialise()
self.printver("Data loaded: ")
self.printver(simData.getAllLoadedDataStores())
times = simData.getAllTimes()
cellSegmentRef = simConfig.getCellGroups().get(0)+"_0"
volts = simData.getVoltageAtAllTimes(cellSegmentRef)
traceInfo = "Voltage at: %s in simulation: %s"%(cellSegmentRef, sim)
self.dataSets["V"] = DataSet(traceInfo, traceInfo, "mV", "ms", "Membrane potential", "Time")
for i in range(len(times)):
self.dataSets["V"].addPoint(times[i], volts[i])
if plotAllTraces:
self.plotFrames["V"].addDataSet(self.dataSets["V"])
if (curveType == "F-I") :
spikeTimes = SpikeAnalyser.getSpikeTimes(volts, times, analyseThreshold, analyseStartTime, analyseStopTime)
stimAmp = simRefsVsStims[sim]
self.printver("Number of spikes at %f nA in sim %s: %i"%(stimAmp, sim, len(spikeTimes)))
avgFreq = 0
if len(spikeTimes)>1:
avgFreq = len(spikeTimes)/ ((analyseStopTime - analyseStartTime)/1000.0)
self.dataSets["F-I"].addPoint(stimAmp,avgFreq)
else:
self.dataSets["F-I"].addPoint(stimAmp,0)
elif (curveType == "SS-I") :
# check within analyseStartTime and analyseStopTime if we deviate by more than +/- analyseThreshold
steadyStateVoltageFound = False
stimAmp = simRefsVsStims[sim]
minVolt = 99999999
maxVolt = -99999999
for i in range(len(volts)) :
if times[i] >= analyseStartTime and times[i] <= analyseStopTime :
if steadyStateVoltageFound == False:
self.printver("Data start time found for SS-I")
minVolt = volts[i]
maxVolt = volts[i]
self.printver(" i:", i, " times_i:",times[i]," minVolt:",minVolt," maxVolt:",maxVolt," delta:",maxVolt - minVolt," threshold:",analyseThreshold)
steadyStateVoltageFound = True
if volts[i] < minVolt :
minVolt = volts[i]
elif volts[i] > maxVolt :
maxVolt = volts[i]
if (maxVolt - minVolt) > analyseThreshold :
self.printver("Data outside the threshold for steady state voltage, Error")
self.printver(" i:", i, " times_i:",times[i]," minVolt:",minVolt," maxVolt:",maxVolt," delta:",maxVolt - minVolt," threshold:",analyseThreshold)
steadyStateVoltageFound = False
break
if (steadyStateVoltageFound) :
midVoltage = (minVolt + maxVolt) / 2
self.dataSets["SS-I"].addPoint(stimAmp,midVoltage)
except:
self.printver("Error analysing simulation data from: %s"%simDir.getCanonicalPath())
self.printver(sys.exc_info()[0])
self.plotFrames[curveType].addDataSet(self.dataSets[curveType])
| rgerkin/neuroConstruct | pythonNeuroML/nCUtils/ncutils.py | Python | gpl-2.0 | 66,510 | 0.014705 |
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
from PyQt5.Qt import QDialog, QUrl
from calibre import url_slash_cleaner
from calibre.gui2.store.web_store_dialog_ui import Ui_Dialog
class WebStoreDialog(QDialog, Ui_Dialog):
def __init__(self, gui, base_url, parent=None, detail_url=None, create_browser=None):
QDialog.__init__(self, parent=parent)
self.setupUi(self)
self.gui = gui
self.base_url = base_url
self.view.set_gui(self.gui)
self.view.create_browser = create_browser
self.view.loadStarted.connect(self.load_started)
self.view.loadProgress.connect(self.load_progress)
self.view.loadFinished.connect(self.load_finished)
self.home.clicked.connect(self.go_home)
self.reload.clicked.connect(self.view.reload)
self.back.clicked.connect(self.view.back)
self.go_home(detail_url=detail_url)
def set_tags(self, tags):
self.view.set_tags(tags)
def load_started(self):
self.progress.setValue(0)
def load_progress(self, val):
self.progress.setValue(val)
def load_finished(self, ok=True):
self.progress.setValue(100)
def go_home(self, checked=False, detail_url=None):
if detail_url:
url = detail_url
else:
url = self.base_url
# Reduce redundant /'s because some stores
# (Feedbooks) and server frameworks (cherrypy)
# choke on them.
url = url_slash_cleaner(url)
self.view.load(QUrl(url))
| ashang/calibre | src/calibre/gui2/store/web_store_dialog.py | Python | gpl-3.0 | 1,721 | 0.001743 |
"""Convert ASTs into L{pydoctor.model.Documentable} instances."""
from pydoctor import model, ast_pp
from compiler import visitor, transformer, ast
import symbol, token
class str_with_orig(str):
"""Hack to allow recovery of the literal that gave rise to a docstring in an AST.
We do this to allow the users to edit the original form of the docstring in the
editing server defined in the L{server} module.
@ivar orig: The literal that gave rise to this constant in the AST.
"""
pass
class MyTransformer(transformer.Transformer):
"""Custom transformer that creates Nodes with L{str_with_orig} instances for docstrings."""
def get_docstring(self, node, n=None):
"""Override C{transformer.Transformer.get_docstring} to return a L{str_with_orig} object."""
if n is None:
n = node[0]
node = node[1:]
if n == symbol.suite:
if len(node) == 1:
return self.get_docstring(node[0])
for sub in node:
if sub[0] == symbol.stmt:
return self.get_docstring(sub)
return None
if n == symbol.file_input:
for sub in node:
if sub[0] == symbol.stmt:
return self.get_docstring(sub)
return None
if n == symbol.atom:
if node[0][0] == token.STRING:
s = ''
for t in node:
s = s + eval(t[1])
r = str_with_orig(s)
r.orig = ''.join(t[1] for t in node)
r.linenumber = node[0][2]
return r
return None
if n == symbol.stmt or n == symbol.simple_stmt \
or n == symbol.small_stmt:
return self.get_docstring(node[0])
if n in transformer._doc_nodes and len(node) == 1:
return self.get_docstring(node[0])
return None
def parseFile(path):
"""Duplicate of L{compiler.parseFile} that uses L{MyTransformer}."""
f = open(path, "U")
src = f.read() + "\n"
f.close()
return parse(src)
def parse(buf):
"""Duplicate of L{compiler.parse} that uses L{MyTransformer}."""
return MyTransformer().parsesuite(buf)
def node2dottedname(node):
parts = []
while isinstance(node, ast.Getattr):
parts.append(node.attrname)
node = node.expr
if isinstance(node, ast.Name):
parts.append(node.name)
else:
return None
parts.reverse()
return parts
class ModuleVistor(object):
def __init__(self, builder, module):
self.builder = builder
self.system = builder.system
self.module = module
def default(self, node):
for child in node.getChildNodes():
self.visit(child)
def visitModule(self, node):
assert self.module.docstring is None
self.module.docstring = node.doc
self.builder.push(self.module)
self.default(node)
self.builder.pop(self.module)
def visitClass(self, node):
rawbases = []
bases = []
baseobjects = []
for n in node.bases:
str_base = ast_pp.pp(n)
rawbases.append(str_base)
full_name = self.builder.current.expandName(str_base)
bases.append(full_name)
baseobj = self.system.objForFullName(full_name)
if not isinstance(baseobj, model.Class):
baseobj = None
baseobjects.append(baseobj)
cls = self.builder.pushClass(node.name, node.doc)
cls.decorators = []
cls.rawbases = rawbases
cls.bases = bases
cls.baseobjects = baseobjects
def node2data(node):
dotted_name = node2dottedname(node)
if dotted_name is None:
return None
dotted_name = '.'.join(dotted_name)
full_name = self.builder.current.expandName(dotted_name)
obj = self.system.objForFullName(full_name)
return (dotted_name, full_name, obj)
if node.decorators:
for decnode in node.decorators:
if isinstance(decnode, ast.CallFunc):
args = []
for arg in decnode.args:
args.append(node2data(arg))
base = node2data(decnode.node)
else:
base = node2data(decnode)
args = None
cls.decorators.append((base, args))
if node.lineno is not None:
cls.linenumber = node.lineno
if cls.parentMod.sourceHref:
cls.sourceHref = cls.parentMod.sourceHref + '#L' + \
str(cls.linenumber)
for b in cls.baseobjects:
if b is not None:
b.subclasses.append(cls)
self.default(node)
self.builder.popClass()
def visitFrom(self, node):
if not isinstance(self.builder.current, model.CanContainImportsDocumentable):
self.warning("processing import statement in odd context")
return
modname = self.builder.expandModname(node.modname)
mod = self.system.getProcessedModule(modname)
if mod is not None:
assert mod.state in [model.PROCESSING, model.PROCESSED]
expandName = mod.expandName
else:
expandName = lambda name: modname + '.' + name
_localNameToFullName = self.builder.current._localNameToFullName_map
for fromname, asname in node.names:
if fromname == '*':
if mod is None:
self.builder.warning("import * from unknown", modname)
return
self.builder.warning("import *", modname)
if mod.all is not None:
names = mod.all
else:
names = mod.contents.keys() + mod._localNameToFullName_map.keys()
names = [k for k in names if not k.startswith('_')]
for n in names:
_localNameToFullName[n] = expandName(n)
return
if asname is None:
asname = fromname
if isinstance(self.builder.current, model.Module) and \
self.builder.current.all is not None and \
asname in self.builder.current.all and \
modname in self.system.allobjects:
mod = self.system.allobjects[modname]
if isinstance(mod, model.Module) and \
fromname in mod.contents and \
(mod.all is None or fromname not in mod.all):
self.system.msg(
"astbuilder",
"moving %r into %r"
% (mod.contents[fromname].fullName(),
self.builder.current.fullName()))
ob = mod.contents[fromname]
ob.reparent(self.builder.current, asname)
continue
if isinstance(
self.system.objForFullName(modname), model.Package):
self.system.getProcessedModule(modname + '.' + fromname)
_localNameToFullName[asname] = expandName(fromname)
def visitImport(self, node):
"""Process an import statement.
The grammar for the statement is roughly:
mod_as := DOTTEDNAME ['as' NAME]
import_stmt := 'import' mod_as (',' mod_as)*
and this is translated into a node which is an instance of Import wih
an attribute 'names', which is in turn a list of 2-tuples
(dotted_name, as_name) where as_name is None if there was no 'as foo'
part of the statement.
"""
if not isinstance(self.builder.current, model.CanContainImportsDocumentable):
self.warning("processing import statement in odd context")
return
_localNameToFullName = self.builder.current._localNameToFullName_map
for fromname, asname in node.names:
fullname = self.builder.expandModname(fromname)
mod = self.system.getProcessedModule(fullname)
if mod is not None:
assert mod.state in [model.PROCESSING, model.PROCESSED]
expandName = mod.expandName
else:
expandName = lambda name: name
if asname is None:
asname = fromname.split('.', 1)[0]
# aaaaargh! python sucks.
parts = fullname.split('.')
for i, part in enumerate(fullname.split('.')[::-1]):
if part == asname:
fullname = '.'.join(parts[:len(parts)-i])
_localNameToFullName[asname] = expandName(fullname)
break
else:
fullname = '.'.join(parts)
_localNameToFullName[asname] = '.'.join(parts)
else:
_localNameToFullName[asname] = fullname
def _handleOldSchoolDecoration(self, target, expr):
if isinstance(self.builder.current, model.Class):
if not isinstance(expr, ast.CallFunc):
return
func = expr.node
if not isinstance(func, ast.Name):
return
func = func.name
args = expr.args
if len(args) != 1:
return
arg, = args
if not isinstance(arg, ast.Name):
return
arg = arg.name
if target == arg and func in ['staticmethod', 'classmethod']:
target = self.builder.current.contents.get(target)
if target and isinstance(target, model.Function):
if target.kind != 'Method':
self.system.msg('ast', 'XXX')
else:
target.kind = func.title().replace('m', ' M')
def _handleAliasing(self, target, expr):
dottedname = node2dottedname(expr)
if dottedname is None:
return
if not isinstance(self.builder.current, model.CanContainImportsDocumentable):
return
c = self.builder.current
base = None
if dottedname[0] in c._localNameToFullName_map:
base = c._localNameToFullName_map[dottedname[0]]
elif dottedname[0] in c.contents:
base = c.contents[dottedname[0]].fullName()
if base:
c._localNameToFullName_map[target] = '.'.join([base] + dottedname[1:])
def visitAssign(self, node):
if len(node.nodes) != 1:
return
if not isinstance(node.nodes[0], ast.AssName):
return
target = node.nodes[0].name
self._handleOldSchoolDecoration(target, node.expr)
self._handleAliasing(target, node.expr)
def visitFunction(self, node):
func = self.builder.pushFunction(node.name, node.doc)
func.decorators = node.decorators
if isinstance(func.parent, model.Class) and node.decorators:
isclassmethod = False
isstaticmethod = False
for d in node.decorators.nodes:
if isinstance(d, ast.Name):
if d.name == 'classmethod':
isclassmethod = True
elif d.name == 'staticmethod':
isstaticmethod = True
if isstaticmethod:
if isclassmethod:
self.system.msg(
'ast', '%r is both class- and static-method?'%(
func.fullName(),), thresh=-1)
else:
func.kind = 'Static Method'
elif isclassmethod:
func.kind = 'Class Method'
if node.lineno is not None:
func.linenumber = node.lineno
if func.parentMod.sourceHref:
func.sourceHref = func.parentMod.sourceHref + '#L' + \
str(func.linenumber)
# ast.Function has a pretty lame representation of
# arguments. Let's convert it to a nice concise format
# somewhat like what inspect.getargspec returns
argnames = node.argnames[:]
kwname = starargname = None
if node.kwargs:
kwname = argnames.pop(-1)
if node.varargs:
starargname = argnames.pop(-1)
defaults = []
for default in node.defaults:
try:
defaults.append(ast_pp.pp(default))
except (KeyboardInterrupt, SystemExit):
raise
except Exception, e:
self.builder.warning("unparseable default",
"%s: %s %r"%(e.__class__.__name__,
e, default))
defaults.append('???')
# argh, convert unpacked-arguments from tuples to lists,
# because that's what getargspec uses and the unit test
# compares it
argnames2 = []
for argname in argnames:
if isinstance(argname, tuple):
argname = list(argname)
argnames2.append(argname)
func.argspec = (argnames2, starargname, kwname, tuple(defaults))
#self.postpone(func, node.code)
self.builder.popFunction()
class ASTBuilder(object):
ModuleVistor = ModuleVistor
def __init__(self, system):
self.system = system
self.current = None
self.currentMod = None
self._stack = []
self.ast_cache = {}
def _push(self, cls, name, docstring):
obj = cls(self.system, name, docstring, self.current)
self.system.addObject(obj)
self.push(obj)
return obj
def _pop(self, cls):
assert isinstance(self.current, cls)
self.pop(self.current)
def push(self, obj):
self._stack.append(self.current)
self.current = obj
if isinstance(obj, model.Module):
assert self.currentMod is None
obj.parentMod = self.currentMod = obj
elif self.currentMod is not None:
if obj.parentMod is not None:
assert obj.parentMod is self.currentMod
else:
obj.parentMod = self.currentMod
else:
assert obj.parentMod is None
# Method-level import to avoid a circular dependency.
from pydoctor import epydoc2stan
for attrobj in epydoc2stan.extract_fields(obj):
self.system.addObject(attrobj)
def pop(self, obj):
assert self.current is obj, "%r is not %r"%(self.current, obj)
self.current = self._stack.pop()
if isinstance(obj, model.Module):
self.currentMod = None
def pushClass(self, name, docstring):
return self._push(self.system.Class, name, docstring)
def popClass(self):
self._pop(self.system.Class)
def pushModule(self, name, docstring):
return self._push(self.system.Module, name, docstring)
def popModule(self):
self._pop(self.system.Module)
def pushFunction(self, name, docstring):
return self._push(self.system.Function, name, docstring)
def popFunction(self):
self._pop(self.system.Function)
def pushPackage(self, name, docstring):
return self._push(self.system.Package, name, docstring)
def popPackage(self):
self._pop(self.system.Package)
def warning(self, type, detail):
self.system._warning(self.current, type, detail)
def processModuleAST(self, ast, mod):
findAll(ast, mod)
visitor.walk(ast, self.ModuleVistor(self, mod))
def expandModname(self, modname):
if '.' in modname:
prefix, suffix = modname.split('.', 1)
suffix = '.' + suffix
else:
prefix, suffix = modname, ''
package = self.current.parentMod.parent
while package is not None:
if prefix in package.contents:
self.warning("local import", modname)
return package.contents[prefix].fullName() + suffix
package = package.parent
return modname
def parseFile(self, filePath):
if filePath in self.ast_cache:
return self.ast_cache[filePath]
try:
ast = parseFile(filePath)
except (SyntaxError, ValueError):
self.warning("cannot parse", filePath)
ast = None
self.ast_cache[filePath] = ast
return ast
model.System.defaultBuilder = ASTBuilder
def findAll(modast, mod):
"""Find and attempt to parse into a list of names the __all__ of a module's AST."""
for node in modast.node.nodes:
if isinstance(node, ast.Assign) and \
len(node.nodes) == 1 and \
isinstance(node.nodes[0], ast.AssName) and \
node.nodes[0].name == '__all__':
if mod.all is not None:
mod.system.msg('all', "multiple assignments to %s.__all__ ??"%(mod.fullName(),))
if not isinstance(node.expr, (ast.List, ast.Tuple)):
mod.system.msg('all', "couldn't parse %s.__all__"%(mod.fullName(),))
continue
items = node.expr.nodes
names = []
for item in items:
if not isinstance(item, ast.Const) or not isinstance(item.value, str):
mod.system.msg('all', "couldn't parse %s.__all__"%(mod.fullName(),))
continue
names.append(item.value)
mod.all = names
| jelmer/pydoctor | pydoctor/astbuilder.py | Python | isc | 17,654 | 0.002322 |
"""empty message
Revision ID: 7ab3e266f711
Revises: 0152d9c6e677
Create Date: 2016-08-09 20:14:58.552655
"""
# revision identifiers, used by Alembic.
revision = '7ab3e266f711'
down_revision = '0152d9c6e677'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(u'co2_id_fkey', 'co2', type_='foreignkey')
op.create_foreign_key(None, 'co2', 'scivalue', ['id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.drop_constraint(u'density_id_fkey', 'density', type_='foreignkey')
op.create_foreign_key(None, 'density', 'scivalue', ['id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.drop_constraint(u'edb_product_id_fkey', 'edb_product', type_='foreignkey')
op.create_foreign_key(None, 'edb_product', 'product', ['id'], ['id'], ondelete='CASCADE')
op.drop_constraint(u'foodwaste_id_fkey', 'foodwaste', type_='foreignkey')
op.create_foreign_key(None, 'foodwaste', 'scivalue', ['id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.drop_constraint(u'location_prod_association_product_id_fkey', 'location_prod_association', type_='foreignkey')
op.create_foreign_key(None, 'location_prod_association', 'product', ['product_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.drop_constraint(u'prod_allergene_association_id_fkey', 'prod_allergene_association', type_='foreignkey')
op.create_foreign_key(None, 'prod_allergene_association', 'scivalue', ['id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.drop_constraint(u'prod_nutrient_association_id_fkey', 'prod_nutrient_association', type_='foreignkey')
op.create_foreign_key(None, 'prod_nutrient_association', 'scivalue', ['id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.drop_constraint(u'prod_process_association_id_fkey', 'prod_process_association', type_='foreignkey')
op.create_foreign_key(None, 'prod_process_association', 'scivalue', ['id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.drop_constraint(u'prod_process_co2_association_id_fkey', 'prod_process_co2_association', type_='foreignkey')
op.create_foreign_key(None, 'prod_process_co2_association', 'scivalue', ['id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.drop_constraint(u'scivalue_product_id_fkey', 'scivalue', type_='foreignkey')
op.create_foreign_key(None, 'scivalue', 'product', ['product_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.drop_constraint(u'synonym_prod_association_product_id_fkey', 'synonym_prod_association', type_='foreignkey')
op.create_foreign_key(None, 'synonym_prod_association', 'product', ['product_id'], ['id'], ondelete='CASCADE')
op.drop_constraint(u'template_id_fkey', 'template', type_='foreignkey')
op.create_foreign_key(None, 'template', 'product', ['id'], ['id'], ondelete='CASCADE')
op.drop_constraint(u'unit_weight_id_fkey', 'unit_weight', type_='foreignkey')
op.create_foreign_key(None, 'unit_weight', 'scivalue', ['id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'unit_weight', type_='foreignkey')
op.create_foreign_key(u'unit_weight_id_fkey', 'unit_weight', 'scivalue', ['id'], ['id'])
op.drop_constraint(None, 'template', type_='foreignkey')
op.create_foreign_key(u'template_id_fkey', 'template', 'product', ['id'], ['id'])
op.drop_constraint(None, 'synonym_prod_association', type_='foreignkey')
op.create_foreign_key(u'synonym_prod_association_product_id_fkey', 'synonym_prod_association', 'product', ['product_id'], ['id'])
op.drop_constraint(None, 'scivalue', type_='foreignkey')
op.create_foreign_key(u'scivalue_product_id_fkey', 'scivalue', 'product', ['product_id'], ['id'])
op.drop_constraint(None, 'prod_process_co2_association', type_='foreignkey')
op.create_foreign_key(u'prod_process_co2_association_id_fkey', 'prod_process_co2_association', 'scivalue', ['id'], ['id'])
op.drop_constraint(None, 'prod_process_association', type_='foreignkey')
op.create_foreign_key(u'prod_process_association_id_fkey', 'prod_process_association', 'scivalue', ['id'], ['id'])
op.drop_constraint(None, 'prod_nutrient_association', type_='foreignkey')
op.create_foreign_key(u'prod_nutrient_association_id_fkey', 'prod_nutrient_association', 'scivalue', ['id'], ['id'])
op.drop_constraint(None, 'prod_allergene_association', type_='foreignkey')
op.create_foreign_key(u'prod_allergene_association_id_fkey', 'prod_allergene_association', 'scivalue', ['id'], ['id'])
op.drop_constraint(None, 'location_prod_association', type_='foreignkey')
op.create_foreign_key(u'location_prod_association_product_id_fkey', 'location_prod_association', 'product', ['product_id'], ['id'])
op.drop_constraint(None, 'foodwaste', type_='foreignkey')
op.create_foreign_key(u'foodwaste_id_fkey', 'foodwaste', 'scivalue', ['id'], ['id'])
op.drop_constraint(None, 'edb_product', type_='foreignkey')
op.create_foreign_key(u'edb_product_id_fkey', 'edb_product', 'product', ['id'], ['id'])
op.drop_constraint(None, 'density', type_='foreignkey')
op.create_foreign_key(u'density_id_fkey', 'density', 'scivalue', ['id'], ['id'])
op.drop_constraint(None, 'co2', type_='foreignkey')
op.create_foreign_key(u'co2_id_fkey', 'co2', 'scivalue', ['id'], ['id'])
### end Alembic commands ###
| schinke/solid-fortnight-ba | flask/migrations/versions/7ab3e266f711_.py | Python | mit | 5,512 | 0.007438 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Dimitrios Tydeas Mengidis <tydeas.dr@gmail.com>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: composer
author: Dimitrios Tydeas Mengidis
short_description: Dependency Manager for PHP
version_added: "1.6"
description:
- Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs and it will install them in your project for you
options:
command:
version_added: "1.8"
description:
- Composer command like "install", "update" and so on
required: false
default: install
working_dir:
description:
- Directory of your project ( see --working-dir )
required: true
default: null
aliases: [ "working-dir" ]
prefer_source:
description:
- Forces installation from package sources when possible ( see --prefer-source )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "prefer-source" ]
prefer_dist:
description:
- Forces installation from package dist even for dev versions ( see --prefer-dist )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "prefer-dist" ]
no_dev:
description:
- Disables installation of require-dev packages ( see --no-dev )
required: false
default: "yes"
choices: [ "yes", "no" ]
aliases: [ "no-dev" ]
no_scripts:
description:
- Skips the execution of all scripts defined in composer.json ( see --no-scripts )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "no-scripts" ]
no_plugins:
description:
- Disables all plugins ( see --no-plugins )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "no-plugins" ]
optimize_autoloader:
description:
- Optimize autoloader during autoloader dump ( see --optimize-autoloader ). Convert PSR-0/4 autoloading to classmap to get a faster autoloader. This is recommended especially for production, but can take a bit of time to run so it is currently not done by default.
required: false
default: "yes"
choices: [ "yes", "no" ]
aliases: [ "optimize-autoloader" ]
requirements:
- php
- composer installed in bin path (recommended /usr/local/bin)
notes:
- Default options that are always appended in each execution are --no-ansi, --no-progress, and --no-interaction
'''
EXAMPLES = '''
# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock
- composer: command=install working_dir=/path/to/project
'''
import os
import re
def parse_out(string):
return re.sub("\s+", " ", string).strip()
def has_changed(string):
if "Nothing to install or update" in string:
return False
else:
return True
def composer_install(module, command, options):
php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
cmd = "%s %s %s %s" % (php_path, composer_path, command, " ".join(options))
return module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec = dict(
command = dict(default="install", type="str", required=False),
working_dir = dict(aliases=["working-dir"], required=True),
prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]),
prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]),
no_dev = dict(default="yes", type="bool", aliases=["no-dev"]),
no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]),
no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]),
optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]),
),
supports_check_mode=True
)
options = []
# Default options
options.append('--no-ansi')
options.append('--no-progress')
options.append('--no-interaction')
options.extend(['--working-dir', os.path.abspath(module.params['working_dir'])])
# Get composer command with fallback to default
command = module.params['command']
# Prepare options
if module.params['prefer_source']:
options.append('--prefer-source')
if module.params['prefer_dist']:
options.append('--prefer-dist')
if module.params['no_dev']:
options.append('--no-dev')
if module.params['no_scripts']:
options.append('--no-scripts')
if module.params['no_plugins']:
options.append('--no-plugins')
if module.params['optimize_autoloader']:
options.append('--optimize-autoloader')
if module.check_mode:
options.append('--dry-run')
rc, out, err = composer_install(module, command, options)
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output)
else:
# Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
output = parse_out(out + err)
module.exit_json(changed=has_changed(output), msg=output)
# import module snippets
from ansible.module_utils.basic import *
main()
| mscherer/ansible-modules-extras | packaging/language/composer.py | Python | gpl-3.0 | 6,165 | 0.008597 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.transfers.imap_attachment_to_s3`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.transfers.imap_attachment_to_s3 import ImapAttachmentToS3Operator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.transfers.imap_attachment_to_s3`.",
DeprecationWarning,
stacklevel=2,
)
| airbnb/airflow | airflow/contrib/operators/imap_attachment_to_s3_operator.py | Python | apache-2.0 | 1,222 | 0.001637 |
# Copyright (c) 2017 The sqlalchemy-bigquery Authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import importlib
# https://docs.sqlalchemy.org/en/13/core/type_basics.html#vendor-specific-types
def test_types_import():
"""Demonstrate behavior of importing types independent of any other import."""
dialect_module = importlib.import_module("sqlalchemy_bigquery")
_types_module = importlib.import_module("sqlalchemy_bigquery._types")
custom_types = getattr(_types_module, "_type_map")
for type_name, type_value in custom_types.items():
assert getattr(dialect_module, type_name) == type_value
| mxmzdlv/pybigquery | tests/unit/test_dialect_types.py | Python | mit | 1,632 | 0.004902 |
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" BVT tests for Virtual Machine Life Cycle
"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.sshClient import SshClient
from testcase.libs.utils import *
from testcase.libs.base import *
from testcase.libs.common import *
#Import System modules
import time
class Services:
"""Test VM Life Cycle Services
"""
def __init__(self):
self.services = {
"disk_offering":{
"displaytext": "Small",
"name": "Small",
"disksize": 1
},
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended in create account to
# ensure unique username generated each time
"password": "password",
},
"small":
# Create a small virtual machine instance with disk offering
{
"displayname": "testserver",
"username": "root", # VM creds for SSH
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"medium": # Create a medium virtual machine instance
{
"displayname": "testserver",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"service_offerings":
{
"tiny":
{
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 64, # In MBs
},
"small":
{
# Small service offering ID to for change VM
# service offering from medium to small
"name": "Small Instance",
"displaytext": "Small Instance",
"cpunumber": 1,
"cpuspeed": 500,
"memory": 256
},
"medium":
{
# Medium service offering ID to for
# change VM service offering from small to medium
"name": "Medium Instance",
"displaytext": "Medium Instance",
"cpunumber": 1,
"cpuspeed": 1000,
"memory": 1024
}
},
"sleep": 60,
"timeout": 10,
#Migrate VM to hostid
"ostypeid": 'ccde7156-9b8b-4fb9-bf08-530dedf4dc61',
# CentOS 5.3 (64-bit)
"mode":'advanced',
}
class TestDeployVM(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
# Get Zone, Domain and templates
domain = get_domain(self.apiclient, self.services)
zone = get_zone(self.apiclient, self.services)
template = get_template(
self.apiclient,
zone.id,
self.services["ostypeid"]
)
# Set Zones and disk offerings
self.services["small"]["zoneid"] = zone.id
self.services["small"]["template"] = template.id
self.services["medium"]["zoneid"] = zone.id
self.services["medium"]["template"] = template.id
# Create Account, VMs, NAT Rules etc
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=domain.id
)
self.service_offering = ServiceOffering.create(
self.apiclient,
self.services["service_offerings"]["tiny"]
)
# Cleanup
self.cleanup = [
self.service_offering,
self.account
]
def test_deploy_vm(self):
"""Test Deploy Virtual Machine
"""
# Validate the following:
# 1. Virtual Machine is accessible via SSH
# 2. listVirtualMachines returns accurate information
# 3. The Cloud Database contains the valid information
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["small"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering.id
)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_vm_response),
0,
"Check VM available in List Virtual Machines"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.id,
self.virtual_machine.id,
"Check virtual machine id in listVirtualMachines"
)
self.assertEqual(
vm_response.displayname,
self.virtual_machine.displayname,
"Check virtual machine displayname in listVirtualMachines"
)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
class TestVMLifeCycle(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestVMLifeCycle, cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
domain = get_domain(cls.api_client, cls.services)
zone = get_zone(cls.api_client, cls.services)
template = get_template(
cls.api_client,
zone.id,
cls.services["ostypeid"]
)
# Set Zones and disk offerings
cls.services["small"]["zoneid"] = zone.id
cls.services["small"]["template"] = template.id
cls.services["medium"]["zoneid"] = zone.id
cls.services["medium"]["template"] = template.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=domain.id
)
cls.small_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offerings"]["small"]
)
cls.medium_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offerings"]["medium"]
)
#create small and large virtual machines
cls.small_virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["small"],
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.small_offering.id,
mode=cls.services["mode"]
)
cls.medium_virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["medium"],
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.medium_offering.id,
mode=cls.services["mode"]
)
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["small"],
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.small_offering.id,
mode=cls.services["mode"]
)
cls._cleanup = [
cls.small_offering,
cls.medium_offering,
cls.account
]
@classmethod
def tearDownClass(cls):
cls.api_client = super(TestVMLifeCycle, cls).getClsTestClient().getApiClient()
cleanup_resources(cls.api_client, cls._cleanup)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
#Clean up, terminate the created ISOs
cleanup_resources(self.apiclient, self.cleanup)
return
def test_01_stop_vm(self):
"""Test Stop Virtual Machine
"""
# Validate the following
# 1. Should Not be able to login to the VM.
# 2. listVM command should return
# this VM.State of this VM should be ""Stopped"".
self.debug("Stopping VM - ID: %s" % self.virtual_machine.id)
self.small_virtual_machine.stop(self.apiclient)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.small_virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_vm_response),
0,
"Check VM available in List Virtual Machines"
)
self.assertEqual(
list_vm_response[0].state,
"Stopped",
"Check virtual machine is in stopped state"
)
return
def test_02_start_vm(self):
"""Test Start Virtual Machine
"""
# Validate the following
# 1. listVM command should return this VM.State
# of this VM should be Running".
self.debug("Starting VM - ID: %s" % self.virtual_machine.id)
self.small_virtual_machine.start(self.apiclient)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.small_virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_vm_response),
0,
"Check VM avaliable in List Virtual Machines"
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.small_virtual_machine.id
)
self.assertEqual(
list_vm_response[0].state,
"Running",
"Check virtual machine is in running state"
)
return
def test_04_change_offering_small(self):
"""Change Offering to a small capacity
"""
# Validate the following
# 1. Log in to the Vm .We should see that the CPU and memory Info of
# this Vm matches the one specified for "Small" service offering.
# 2. Using listVM command verify that this Vm
# has Small service offering Id.
self.debug("Stopping VM - ID: %s" % self.medium_virtual_machine.id)
self.medium_virtual_machine.stop(self.apiclient)
# Poll listVM to ensure VM is stopped properly
timeout = self.services["timeout"]
while True:
time.sleep(self.services["sleep"])
# Ensure that VM is in stopped state
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.medium_virtual_machine.id
)
if isinstance(list_vm_response, list):
vm = list_vm_response[0]
if vm.state == 'Stopped':
self.debug("VM state: %s" % vm.state)
break
if timeout == 0:
raise Exception(
"Failed to stop VM (ID: %s) in change service offering" % vm.id)
timeout = timeout - 1
self.debug("Change Service offering VM - ID: %s" %
self.medium_virtual_machine.id)
cmd = changeServiceForVirtualMachine.changeServiceForVirtualMachineCmd()
cmd.id = self.medium_virtual_machine.id
cmd.serviceofferingid = self.small_offering.id
self.apiclient.changeServiceForVirtualMachine(cmd)
self.debug("Starting VM - ID: %s" % self.medium_virtual_machine.id)
self.medium_virtual_machine.start(self.apiclient)
# Poll listVM to ensure VM is started properly
timeout = self.services["timeout"]
while True:
time.sleep(self.services["sleep"])
# Ensure that VM is in running state
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.medium_virtual_machine.id
)
if isinstance(list_vm_response, list):
vm = list_vm_response[0]
if vm.state == 'Running':
self.debug("VM state: %s" % vm.state)
break
if timeout == 0:
raise Exception(
"Failed to start VM (ID: %s) after changing service offering" % vm.id)
timeout = timeout - 1
return
def test_06_destroy_vm(self):
"""Test destroy Virtual Machine
"""
# Validate the following
# 1. Should not be able to login to the VM.
# 2. listVM command should return this VM.State
# of this VM should be "Destroyed".
self.debug("Destroy VM - ID: %s" % self.small_virtual_machine.id)
self.small_virtual_machine.delete(self.apiclient)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.small_virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_vm_response),
0,
"Check VM avaliable in List Virtual Machines"
)
self.assertEqual(
list_vm_response[0].state,
"Destroyed",
"Check virtual machine is in destroyed state"
)
return
def test_07_restore_vm(self):
"""Test recover Virtual Machine
"""
# Validate the following
# 1. listVM command should return this VM.
# State of this VM should be "Stopped".
# 2. We should be able to Start this VM successfully.
self.debug("Recovering VM - ID: %s" % self.small_virtual_machine.id)
cmd = recoverVirtualMachine.recoverVirtualMachineCmd()
cmd.id = self.small_virtual_machine.id
self.apiclient.recoverVirtualMachine(cmd)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.small_virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_vm_response),
0,
"Check VM avaliable in List Virtual Machines"
)
self.assertEqual(
list_vm_response[0].state,
"Stopped",
"Check virtual machine is in Stopped state"
)
return
| ikoula/cloudstack | tools/marvin/marvin/sandbox/demo/simulator/testcase/test_vm_life_cycle.py | Python | gpl-2.0 | 20,302 | 0.003546 |
"""
Contains the relevant conversions of HTML and LaTeX entities that will not be
correctly converted elsewhere. This is taken from adsabs/adsdata authored by
J. Luke.
"""
__author__ = 'J. Luke'
__maintainer__ = 'J. Elliott'
__copyright__ = 'Copyright 2015'
__version__ = '1.0'
__email__ = 'ads@cfa.harvard.edu'
__status__ = 'Production'
__credit__ = ['V. Sudilovsky', 'A. Accomazzi', 'J. Luker']
__license__ = 'GPLv3'
import re
entitydefs = {
'nsqsupe': u'\u22e3',
'Pcy': u'\u041f',
'xharr': u'\u27f7',
'HumpDownHump': u'\u224e',
'asymp': u'\u2248',
'otimes': u'\u2297',
'Zopf': u'\u2124',
'bkarow': u'\u290d',
'lessapprox': u'\u2a85',
'angmsd': u'\u2221',
'gimel': u'\u2137',
'dollar': u'$',
'mstpos': u'\u223e',
'rsquor': u'\u2019',
'boxminus': u'\u229f',
'ThinSpace': u'\u2009',
'equivDD': u'\u2a78',
'pertenk': u'\u2031',
'Gt': u'\u226b',
'gscr': u'\u210a',
'Backslash': u'\u2216',
'Gg': u'\u22d9',
'nparallel': u'\u2226',
'quatint': u'\u2a16',
'Igr': u'\u0399',
'iinfin': u'\u29dc',
'nsubseteqq': u'\u2ac5\u0338',
'yacy': u'\u044f',
'cularr': u'\u21b6',
'nges': u'\u2a7e\u0338',
'ngeq': u'\u2271',
'rangle': u'\u232a',
'lparlt': u'\u2993',
'Scaron': u'\u0160',
'solbar': u'\u233f',
'elsdot': u'\u2a97',
'LessFullEqual': u'\u2266',
'lbbrk': u'\u3014',
'Cacute': u'\u0106',
'npolint': u'\u2a14',
'THORN': u'\u00de',
'ngsim': u'\u2275',
'equals': u'=',
'eqslantgtr': u'\u2a96',
'vltri': u'\u22b2',
'robrk': u'\u301b',
'cuepr': u'\u22de',
'nrightarrow': u'\u219b',
'glj': u'\u2aa4',
'gla': u'\u2aa5',
'Rcaron': u'\u0158',
'ohgr': u'\u03c9',
'permil': u'\u2030',
'angmsdac': u'\u29aa',
'angmsdab': u'\u29a9',
'angmsdaa': u'\u29a8',
'uharl': u'\u21bf',
'angmsdag': u'\u29ae',
'angmsdaf': u'\u29ad',
'Agrave': u'\u00c0',
'angmsdad': u'\u29ab',
'angmsdah': u'\u29af',
'rceil': u'\u2309',
'angrtvb': u'\u22be',
'rppolint': u'\u2a12',
'divide': u'\u00f7',
'omacr': u'\u014d',
'circleddash': u'\u229d',
'notinE': u'\u22f9\u0338',
'Ncy': u'\u041d',
'lesdotor': u'\u2a83',
'Star': u'\u22c6',
'Mellintrf': u'\u2133',
'therefore': u'\u2234',
'KHcy': u'\u0425',
'barwed': u'\u2305',
'gvertneqq': u'\u2269\ufe00',
'Jcy': u'\u0419',
'phone': u'\u260e',
'ssetmn': u'\u2216',
'excl': u'!',
'parsim': u'\u2af3',
'centerdot': u'\u00b7',
'nwarr': u'\u2196',
'nvle': u'\u2264\u20d2',
'mu': u'\u03bc',
'mp': u'\u2213',
'OverBracket': u'\u23b4',
'Barwed': u'\u2306',
'bsemi': u'\u204f',
'idigr': u'\u03ca',
'Ll': u'\u22d8',
'cong': u'\u2245',
'rpar': u')',
'Lt': u'\u226a',
'NotSuperset': u'\u2283\u20d2',
'topcir': u'\u2af1',
'smte': u'\u2aac',
'LeftDownVector': u'\u21c3',
'eng': u'\u014b',
'heartsuit': u'\u2665',
'roplus': u'\u2a2e',
'zigrarr': u'\u21dd',
'lobrk': u'\u301a',
'nharr': u'\u21ae',
'xnis': u'\u22fb',
'Hcirc': u'\u0124',
'Uarrocir': u'\u2949',
'lcedil': u'\u013c',
'lat': u'\u2aab',
'incare': u'\u2105',
'lap': u'\u2a85',
'parallel': u'\u2225',
'xhArr': u'\u27fa',
'tritime': u'\u2a3b',
'SubsetEqual': u'\u2286',
'order': u'\u2134',
'PlusMinus': u'\u00b1',
'approxeq': u'\u224a',
'varr': u'\u2195',
'ograve': u'\u00f2',
'becaus': u'\u2235',
'kappav': u'\u03f0',
'iprod': u'\u2a3c',
'otilde': u'\u00f5',
'njcy': u'\u045a',
'upharpoonleft': u'\u21bf',
'Odblac': u'\u0150',
'RightArrowBar': u'\u21e5',
'Rfr': u'\u211c',
'rbrack': u']',
'UnderBrace': u'\ufe38',
'napid': u'\u224b\u0338',
'gescc': u'\u2aa9',
'iukcy': u'\u0456',
'xrArr': u'\u27f9',
'Jukcy': u'\u0404',
'bsime': u'\u22cd',
'Cayleys': u'\u212d',
'leqq': u'\u2266',
'nwArr': u'\u21d6',
'rrarr': u'\u21c9',
'UpTee': u'\u22a5',
'nvDash': u'\u22ad',
'bigodot': u'\u2a00',
'searr': u'\u2198',
'looparrowleft': u'\u21ab',
'xgr': u'\u03be',
'Tstrok': u'\u0166',
'lcub': u'{',
'smt': u'\u2aaa',
'rx': u'\u211e',
'simplus': u'\u2a24',
'uplus': u'\u228e',
'smallsetminus': u'\u2216',
'notniva': u'\u220c',
'dotsquare': u'\u22a1',
'notnivc': u'\u22fd',
'notnivb': u'\u22fe',
'ijlig': u'\u0133',
'Egr': u'\u0395',
'infin': u'\u221e',
'DoubleRightTee': u'\u22a8',
'sqcaps': u'\u2293\ufe00',
'NotTildeTilde': u'\u2249',
'lsimg': u'\u2a8f',
'aogon': u'\u0105',
'GreaterLess': u'\u2277',
'nparsl': u'\u2afd\u20e5',
'ange': u'\u29a4',
'lneq': u'\u2a87',
'Escr': u'\u2130',
'Tilde': u'\u223c',
'Ugr': u'\u03a5',
'Kcedil': u'\u0136',
'rang': u'\u232a',
'hellip': u'\u2026',
'scedil': u'\u015f',
'mldr': u'\u2026',
'lthree': u'\u22cb',
'efDot': u'\u2252',
'top': u'\u22a4',
'ZeroWidthSpace': u'\u200b',
'thickapprox': u'\u2248',
'Ifr': u'\u2111',
'Aacgr': u'\u0386',
'Coproduct': u'\u2210',
'Rarr': u'\u21a0',
'bbrk': u'\u23b5',
'minusdu': u'\u2a2a',
'kjcy': u'\u045c',
'llarr': u'\u21c7',
'rthree': u'\u22cc',
'target': u'\u2316',
'thksim': u'\u223c',
'ltrPar': u'\u2996',
'nearhk': u'\u2924',
'udigr': u'\u03cb',
'minus': u'\u2212',
'tcaron': u'\u0165',
'YUcy': u'\u042e',
'SucceedsTilde': u'\u227f',
'xrarr': u'\u27f6',
'cwint': u'\u2231',
'subsim': u'\u2ac7',
'ImaginaryI': u'\u2148',
'NotLessEqual': u'\u2270',
'Uacute': u'\u00da',
'Dgr': u'\u0394',
'vBar': u'\u2ae8',
'ubrcy': u'\u045e',
'reals': u'\u211d',
'Omega': u'\u03a9',
'LessTilde': u'\u2272',
'YAcy': u'\u042f',
'gnapprox': u'\u2a8a',
'ldquo': u'\u201c',
'drcorn': u'\u231f',
'fnof': u'\u0192',
'cupbrcap': u'\u2a48',
'grave': u'`',
'Tgr': u'\u03a4',
'lopar': u'\u2985',
'nhArr': u'\u21ce',
'wedgeq': u'\u2259',
'gvnE': u'\u2269\ufe00',
'odsold': u'\u29bc',
'dot': u'\u02d9',
'Rightarrow': u'\u21d2',
'emptyv': u'\u2205',
'Rgr': u'\u03a1',
'Union': u'\u22c3',
'lnapprox': u'\u2a89',
'boxDl': u'\u2556',
'nlarr': u'\u219a',
'Atilde': u'\u00c3',
'radic': u'\u221a',
'frac78': u'\u215e',
'boxDr': u'\u2553',
'phgr': u'\u03c6',
'swnwar': u'\u292a',
'nLeftarrow': u'\u21cd',
'vArr': u'\u21d5',
'yen': u'\u00a5',
'hoarr': u'\u21ff',
'ocy': u'\u043e',
'Eacute': u'\u00c9',
'xotime': u'\u2a02',
'rtrie': u'\u22b5',
'mapstoup': u'\u21a5',
'xlArr': u'\u27f8',
'dd': u'\u2146',
'sup': u'\u2283',
'nesim': u'\u2242\u0338',
'mapstoleft': u'\u21a4',
'circeq': u'\u2257',
'subseteq': u'\u2286',
'strns': u'\u00af',
'OHgr': u'\u03a9',
'nLl': u'\u22d8\u0338',
'OHacgr': u'\u038f',
'ulcrop': u'\u230f',
'Proportion': u'\u2237',
'Dstrok': u'\u0110',
'num': u'#',
'ddotseq': u'\u2a77',
'lescc': u'\u2aa8',
'bigvee': u'\u22c1',
'iexcl': u'\u00a1',
'circledcirc': u'\u229a',
'seArr': u'\u21d8',
'gneqq': u'\u2269',
'atilde': u'\u00e3',
'Nopf': u'\u2115',
'CircleMinus': u'\u2296',
'nesear': u'\u2928',
'squf': u'\u25aa',
'lrarr': u'\u21c6',
'capand': u'\u2a44',
'glE': u'\u2a92',
'ccaps': u'\u2a4d',
'Bgr': u'\u0392',
'wr': u'\u2240',
'wp': u'\u2118',
'zacute': u'\u017a',
'Hacek': u'\u02c7',
'vprop': u'\u221d',
'backcong': u'\u224c',
'rpargt': u'\u2994',
'ffilig': u'\ufb03',
'zhcy': u'\u0436',
'plustwo': u'\u2a27',
'ncap': u'\u2a43',
'RightVectorBar': u'\u2953',
'ohacgr': u'\u03ce',
'and': u'\u2227',
'Egrave': u'\u00c8',
'DiacriticalDot': u'\u02d9',
'nGtv': u'\u226b\u0338',
'igrave': u'\u00ec',
'nvlArr': u'\u2902',
'ETH': u'\u00d0',
'sqsupset': u'\u2290',
'esim': u'\u2242',
'intcal': u'\u22ba',
'lAtail': u'\u291b',
'tint': u'\u222d',
'lurdshar': u'\u294a',
'mapsto': u'\u21a6',
'propto': u'\u221d',
'bemptyv': u'\u29b0',
'lpar': u'(',
'nlArr': u'\u21cd',
'dbkarow': u'\u290f',
'lbrksld': u'\u298f',
'puncsp': u'\u2008',
'mcy': u'\u043c',
'gnE': u'\u2269',
'notindot': u'\u22f5\u0338',
'lbrkslu': u'\u298d',
'TildeTilde': u'\u2248',
'diamondsuit': u'\u2666',
'gne': u'\u2a88',
'rharul': u'\u296c',
'udblac': u'\u0171',
'succeq': u'\u2ab0',
'jukcy': u'\u0454',
'it': u'\u2062',
'ii': u'\u2148',
'rcub': u'}',
'telrec': u'\u2315',
'in': u'\u2208',
'ic': u'\u2063',
'leftleftarrows': u'\u21c7',
'triplus': u'\u2a39',
'DoubleUpArrow': u'\u21d1',
'leftarrow': u'\u2190',
'NJcy': u'\u040a',
'lrcorner': u'\u231f',
'cupcup': u'\u2a4a',
'lagran': u'\u2112',
'angmsdae': u'\u29ac',
'longmapsto': u'\u27fc',
'rotimes': u'\u2a35',
'gtrarr': u'\u2978',
'nis': u'\u22fc',
'lcy': u'\u043b',
'niv': u'\u220b',
'hcirc': u'\u0125',
'eeacgr': u'\u03ae',
'iuml': u'\u00ef',
'lhblk': u'\u2584',
'ApplyFunction': u'\u2061',
'larrhk': u'\u21a9',
'hearts': u'\u2665',
'rsqb': u']',
'gtcc': u'\u2aa7',
'apacir': u'\u2a6f',
'comp': u'\u2201',
'tilde': u'\u02dc',
'coprod': u'\u2210',
'LeftTeeArrow': u'\u21a4',
'midast': u'*',
'ell': u'\u2113',
'imacr': u'\u012b',
'prsim': u'\u227e',
'gtcir': u'\u2a7a',
'gtquest': u'\u2a7c',
'els': u'\u2a95',
'uuarr': u'\u21c8',
'Kcy': u'\u041a',
'bne': u'=\u20e5',
'nexist': u'\u2204',
'gesdot': u'\u2a80',
'andslope': u'\u2a58',
'caps': u'\u2229\ufe00',
'smtes': u'\u2aac\ufe00',
'sqsubset': u'\u228f',
'lcaron': u'\u013e',
'xlarr': u'\u27f5',
'laquo': u'\u00ab',
'scsim': u'\u227f',
'simgE': u'\u2aa0',
'lsquo': u'\u2018',
'leftrightarrow': u'\u2194',
'srarr': u'\u2192',
'Ycy': u'\u042b',
'rarrsim': u'\u2974',
'gjcy': u'\u0453',
'Amacr': u'\u0100',
'clubs': u'\u2663',
'olt': u'\u29c0',
'cirE': u'\u29c3',
'congdot': u'\u2a6d',
'zcy': u'\u0437',
'dzcy': u'\u045f',
'awconint': u'\u2233',
'bottom': u'\u22a5',
'Supset': u'\u22d1',
'circ': u'\u02c6',
'icy': u'\u0438',
'cire': u'\u2257',
'uml': u'\u00a8',
'napos': u'\u0149',
'zcaron': u'\u017e',
'thgr': u'\u03b8',
'Uparrow': u'\u21d1',
'numero': u'\u2116',
'KJcy': u'\u040c',
'DZcy': u'\u040f',
'nwarrow': u'\u2196',
'hkswarow': u'\u2926',
'egsdot': u'\u2a98',
'plus': u'+',
'profalar': u'\u232e',
'Cup': u'\u22d3',
'rHar': u'\u2964',
'sol': u'/',
'shcy': u'\u0448',
'shortparallel': u'\u2225',
'sube': u'\u2286',
'osol': u'\u2298',
'HorizontalLine': u'\u2500',
'nsupseteq': u'\u2289',
'sup1': u'\u00b9',
'sup2': u'\u00b2',
'sup3': u'\u00b3',
'ngtr': u'\u226f',
'imagline': u'\u2110',
'subE': u'\u2ac5',
'nwnear': u'\u2927',
'Downarrow': u'\u21d3',
'Cedilla': u'\u00b8',
'fork': u'\u22d4',
'lnsim': u'\u22e6',
'kgreen': u'\u0138',
'egrave': u'\u00e8',
'rhard': u'\u21c1',
'cylcty': u'\u232d',
'DownLeftVector': u'\u21bd',
'Bumpeq': u'\u224e',
'rharu': u'\u21c0',
'boxtimes': u'\u22a0',
'supE': u'\u2ac6',
'itilde': u'\u0129',
'NotVerticalBar': u'\u2224',
'plusdu': u'\u2a25',
'twixt': u'\u226c',
'scaron': u'\u0161',
'isin': u'\u2208',
'supe': u'\u2287',
'Tcy': u'\u0422',
'nacute': u'\u0144',
'trie': u'\u225c',
'olcir': u'\u29be',
'rfloor': u'\u230b',
'Sub': u'\u22d0',
'notin': u'\u2209',
'exist': u'\u2203',
'Hstrok': u'\u0126',
'backsim': u'\u223d',
'tosa': u'\u2929',
'triangleq': u'\u225c',
'check': u'\u2713',
'Euml': u'\u00cb',
'Iacute': u'\u00cd',
'raquo': u'\u00bb',
'ne': u'\u2260',
'nu': u'\u03bd',
'swarhk': u'\u2926',
'searrow': u'\u2198',
'loarr': u'\u21fd',
'xodot': u'\u2a00',
'larr': u'\u2190',
'models': u'\u22a7',
'rarrhk': u'\u21aa',
'squ': u'\u25a1',
'rfisht': u'\u297d',
'InvisibleComma': u'\u2063',
'eacute': u'\u00e9',
'edot': u'\u0117',
'omega': u'\u03c9',
'spades': u'\u2660',
'prurel': u'\u22b0',
'Sgr': u'\u03a3',
'Colon': u'\u2237',
'pointint': u'\u2a15',
'aleph': u'\u2135',
'doteq': u'\u2250',
'boxdl': u'\u2510',
'NotPrecedes': u'\u2280',
'boxdr': u'\u250c',
'Yacute': u'\u00dd',
'ldrdhar': u'\u2967',
'primes': u'\u2119',
'lAarr': u'\u21da',
'Iscr': u'\u2110',
'nearrow': u'\u2197',
'boxdL': u'\u2555',
'varpi': u'\u03d6',
'boxdR': u'\u2552',
'nrtrie': u'\u22ed',
'vee': u'\u2228',
'Icirc': u'\u00ce',
'isinsv': u'\u22f3',
'succnsim': u'\u22e9',
'harrcir': u'\u2948',
'tbrk': u'\u23b4',
'ogon': u'\u02db',
'ShortLeftArrow': u'\u2190',
'AElig': u'\u00c6',
'Rang': u'\u300b',
'Intersection': u'\u22c2',
'ntriangleleft': u'\u22ea',
'die': u'\u00a8',
'ExponentialE': u'\u2147',
'sdotb': u'\u22a1',
'Tab': u'\t',
'div': u'\u00f7',
'malt': u'\u2720',
'epar': u'\u22d5',
'Leftarrow': u'\u21d0',
'angrtvbd': u'\u299d',
'ordm': u'\u00ba',
'Sacute': u'\u015a',
'ordf': u'\u00aa',
'ldca': u'\u2936',
'nsup': u'\u2285',
'simrarr': u'\u2972',
'backepsilon': u'\u03f6',
'Vdash': u'\u22a9',
'nsub': u'\u2284',
'rarrb': u'\u21e5',
'rarrc': u'\u2933',
'amalg': u'\u2a3f',
'nge': u'\u2271',
'VeryThinSpace': u'\u200a',
'langd': u'\u2991',
'IEcy': u'\u0415',
'rarrw': u'\u219d',
'gnap': u'\u2a8a',
'Agr': u'\u0391',
'uharr': u'\u21be',
'horbar': u'\u2015',
'bot': u'\u22a5',
'barwedge': u'\u2305',
'Ncedil': u'\u0145',
'percnt': u'%',
'digamma': u'\u03dd',
'dblac': u'\u02dd',
'searhk': u'\u2925',
'fllig': u'\ufb02',
'succsim': u'\u227f',
'veebar': u'\u22bb',
'Emacr': u'\u0112',
'Lacute': u'\u0139',
'LeftRightArrow': u'\u2194',
'Bernoullis': u'\u212c',
'planck': u'\u210f',
'ncup': u'\u2a42',
'gdot': u'\u0121',
'epsi': u'\u03f5',
'andd': u'\u2a5c',
'prec': u'\u227a',
'Ogr': u'\u039f',
'perp': u'\u22a5',
'ForAll': u'\u2200',
'eqcirc': u'\u2256',
'andv': u'\u2a5a',
'nRightarrow': u'\u21cf',
'bigtriangleup': u'\u25b3',
'rtri': u'\u25b9',
'sqsube': u'\u2291',
'succneqq': u'\u2ab6',
'ldrushar': u'\u294b',
'Popf': u'\u2119',
'cap': u'\u2229',
'aacgr': u'\u03ac',
'NotTildeEqual': u'\u2244',
'utri': u'\u25b5',
'imped': u'\u01b5',
'circledast': u'\u229b',
'xsqcup': u'\u2a06',
'EEacgr': u'\u0389',
'subseteqq': u'\u2ac5',
'Eogon': u'\u0118',
'RightVector': u'\u21c0',
'sharp': u'\u266f',
'eqsim': u'\u2242',
'range': u'\u29a5',
'trianglelefteq': u'\u22b4',
'sqsubseteq': u'\u2291',
'map': u'\u21a6',
'diams': u'\u2666',
'succ': u'\u227b',
'lneqq': u'\u2268',
'vnsup': u'\u2283\u20d2',
'preceq': u'\u2aaf',
'quot': u'\'',
'uparrow': u'\u2191',
'nsim': u'\u2241',
'Pgr': u'\u03a0',
'gamma': u'\u03b3',
'vnsub': u'\u2282\u20d2',
'sc': u'\u227b',
'eDot': u'\u2251',
'approx': u'\u2248',
'boxuR': u'\u2558',
'downarrow': u'\u2193',
'blacklozenge': u'\u29eb',
'lsh': u'\u21b0',
'otimesas': u'\u2a36',
'Hscr': u'\u210b',
'Re': u'\u211c',
'nbsp': u'\u00a0',
'Ngr': u'\u039d',
'iiota': u'\u2129',
'boxuL': u'\u255b',
'equiv': u'\u2261',
'seswar': u'\u2929',
'boxur': u'\u2514',
'auml': u'\u00e4',
'GreaterGreater': u'\u2aa2',
'Mgr': u'\u039c',
'orderof': u'\u2134',
'ndash': u'\u2013',
'boxul': u'\u2518',
'Zdot': u'\u017b',
'THgr': u'\u0398',
'boxbox': u'\u29c9',
'half': u'\u00bd',
'not': u'\u00ac',
'epsiv': u'\u03b5',
'ntlg': u'\u2278',
'Succeeds': u'\u227b',
'larrfs': u'\u291d',
'operp': u'\u29b9',
'bigwedge': u'\u22c0',
'NotLess': u'\u226e',
'Cfr': u'\u212d',
'NotCongruent': u'\u2262',
'varsupsetneqq': u'\u2acc\ufe00',
'squarf': u'\u25aa',
'el': u'\u2a99',
'square': u'\u25a1',
'dlcorn': u'\u231e',
'eg': u'\u2a9a',
'Gcy': u'\u0413',
'Ggr': u'\u0393',
'nsccue': u'\u22e1',
'LongRightArrow': u'\u27f6',
'RightTeeVector': u'\u295b',
'simeq': u'\u2243',
'bumpe': u'\u224f',
'ENG': u'\u014a',
'orv': u'\u2a5b',
'orslope': u'\u2a57',
'cupcap': u'\u2a46',
'hardcy': u'\u044a',
'ord': u'\u2a5d',
'Sigma': u'\u03a3',
'UpDownArrow': u'\u2195',
'bumpE': u'\u2aae',
'LeftTriangle': u'\u22b2',
'nvltrie': u'\u22b4\u20d2',
'checkmark': u'\u2713',
'lambda': u'\u03bb',
'xi': u'\u03be',
'lrhar': u'\u21cb',
'scnap': u'\u2aba',
'ShortUpArrow': u'\u2191',
'ring': u'\u02da',
'Ubreve': u'\u016c',
'supplus': u'\u2ac0',
'Diamond': u'\u22c4',
'uacgr': u'\u03cd',
'Lgr': u'\u039b',
'LessGreater': u'\u2276',
'supedot': u'\u2ac4',
'LeftVector': u'\u21bc',
'jsercy': u'\u0458',
'ldquor': u'\u201e',
'iacute': u'\u00ed',
'wedbar': u'\u2a5f',
'xcirc': u'\u25ef',
'lsqb': u'[',
'copy': u'\u00a9',
'capcap': u'\u2a4b',
'lharu': u'\u21bc',
'Mscr': u'\u2133',
'mapstodown': u'\u21a7',
'Gbreve': u'\u011e',
'lhard': u'\u21bd',
'pre': u'\u2aaf',
'xoplus': u'\u2a01',
'ang': u'\u2220',
'DD': u'\u2145',
'dash': u'\u2010',
'curarr': u'\u21b7',
'Vcy': u'\u0412',
'fltns': u'\u25b1',
'abreve': u'\u0103',
'prE': u'\u2ab3',
'Phi': u'\u03a6',
'imof': u'\u22b7',
'npreceq': u'\u2aaf\u0338',
'qprime': u'\u2057',
'rlhar': u'\u21cc',
'DotDot': u'\u20dc',
'Ecy': u'\u042d',
'rationals': u'\u211a',
'updownarrow': u'\u2195',
'fcy': u'\u0444',
'prcue': u'\u227c',
'beta': u'\u03b2',
'beth': u'\u2136',
'middot': u'\u00b7',
'rsquo': u'\u2019',
'angst': u'\u212b',
'lnap': u'\u2a89',
'lates': u'\u2aad\ufe00',
'SupersetEqual': u'\u2287',
'Ucy': u'\u0423',
'phiv': u'\u03c6',
'Verbar': u'\u2016',
'sigma': u'\u03c3',
'NotLessLess': u'\u226a\u0338',
'nlt': u'\u226e',
'lozenge': u'\u25ca',
'nvHarr': u'\u2904',
'nle': u'\u2270',
'rightarrowtail': u'\u21a3',
'vsupnE': u'\u2acc\ufe00',
'NotElement': u'\u2209',
'ratio': u'\u2236',
'lvnE': u'\u2268\ufe00',
'nlE': u'\u2266\u0338',
'Laplacetrf': u'\u2112',
'vsupne': u'\u228b\ufe00',
'ges': u'\u2a7e',
'geq': u'\u2265',
'mnplus': u'\u2213',
'DownArrowBar': u'\u2913',
'Dcy': u'\u0414',
'expectation': u'\u2130',
'ncong': u'\u2247',
'LeftArrowBar': u'\u21e4',
'Vvdash': u'\u22aa',
'hairsp': u'\u200a',
'gel': u'\u22db',
'prime': u'\u2032',
'lceil': u'\u2308',
'tdot': u'\u20db',
'shortmid': u'\u2223',
'larrbfs': u'\u291f',
'sdot': u'\u22c5',
'Otimes': u'\u2a37',
'numsp': u'\u2007',
'xmap': u'\u27fc',
'lfloor': u'\u230a',
'NotEqual': u'\u2260',
'dtri': u'\u25bf',
'udhar': u'\u296e',
'Ccaron': u'\u010c',
'ldsh': u'\u21b2',
'lne': u'\u2a87',
'rlarr': u'\u21c4',
'djcy': u'\u0452',
'duhar': u'\u296f',
'khcy': u'\u0445',
'between': u'\u226c',
'lnE': u'\u2268',
'lrtri': u'\u22bf',
'plusdo': u'\u2214',
'erarr': u'\u2971',
'Zfr': u'\u2128',
'yuml': u'\u00ff',
'nVdash': u'\u22ae',
'lHar': u'\u2962',
'Tcaron': u'\u0164',
'SOFTcy': u'\u042c',
'intprod': u'\u2a3c',
'tridot': u'\u25ec',
'xcap': u'\u22c2',
'rbbrk': u'\u3015',
'ovbar': u'\u233d',
'boxhD': u'\u2565',
'Rcy': u'\u0420',
'Utilde': u'\u0168',
'period': u'.',
'NotGreaterLess': u'\u2279',
'colone': u'\u2254',
'curarrm': u'\u293c',
'colon': u':',
'bigcirc': u'\u25ef',
'caret': u'\u2041',
'boxhd': u'\u252c',
'ZHcy': u'\u0416',
'MinusPlus': u'\u2213',
'boxhu': u'\u2534',
'Conint': u'\u222f',
'succapprox': u'\u2ab8',
'ltri': u'\u25c3',
'nedot': u'\u2250\u0338',
'breve': u'\u02d8',
'realine': u'\u211b',
'kappa': u'\u03ba',
'bsim': u'\u223d',
'xwedge': u'\u22c0',
'rdsh': u'\u21b3',
'ThickSpace': u'\u2009\u200a\u200a',
'RoundImplies': u'\u2970',
'ucirc': u'\u00fb',
'Dcaron': u'\u010e',
'eDDot': u'\u2a77',
'ugrave': u'\u00f9',
'ltlarr': u'\u2976',
'parsl': u'\u2afd',
'rtimes': u'\u22ca',
'par': u'\u2225',
'profsurf': u'\u2313',
'capbrcup': u'\u2a49',
'nvsim': u'\u223c\u20d2',
'ecolon': u'\u2255',
'npre': u'\u2aaf\u0338',
'Implies': u'\u21d2',
'NoBreak': u'\u2060',
'nltri': u'\u22ea',
'longleftarrow': u'\u27f5',
'nsmid': u'\u2224',
'ccupssm': u'\u2a50',
'Bcy': u'\u0411',
'OverBrace': u'\ufe37',
'nsimeq': u'\u2244',
'Gamma': u'\u0393',
'lstrok': u'\u0142',
'compfn': u'\u2218',
'leftharpoonup': u'\u21bc',
'Im': u'\u2111',
'nltrie': u'\u22ec',
'ncaron': u'\u0148',
'gtdot': u'\u22d7',
'rBarr': u'\u290f',
'bnot': u'\u2310',
'nleqq': u'\u2266\u0338',
'olarr': u'\u21ba',
'Yuml': u'\u0178',
'real': u'\u211c',
'tstrok': u'\u0167',
'psi': u'\u03c8',
'frown': u'\u2322',
'darr': u'\u2193',
'cupdot': u'\u228d',
'oslash': u'\u00f8',
'acute': u'\u00b4',
'cdot': u'\u010b',
'Igrave': u'\u00cc',
'boxhU': u'\u2568',
'lesssim': u'\u2272',
'nbump': u'\u224e\u0338',
'Zgr': u'\u0396',
'sstarf': u'\u22c6',
'rAarr': u'\u21db',
'verbar': u'|',
'csupe': u'\u2ad2',
'Ycirc': u'\u0176',
'NotLessGreater': u'\u2278',
'zeetrf': u'\u2128',
'Dashv': u'\u2ae4',
'forkv': u'\u2ad9',
'awint': u'\u2a11',
'RightTriangle': u'\u22b3',
'acE': u'\u223e\u0333',
'aacute': u'\u00e1',
'uArr': u'\u21d1',
'PHgr': u'\u03a6',
'oS': u'\u24c8',
'nsime': u'\u2244',
'bepsi': u'\u03f6',
'acd': u'\u223f',
'ruluhar': u'\u2968',
'Uring': u'\u016e',
'acy': u'\u0430',
'or': u'\u2228',
'copysr': u'\u2117',
'nsupseteqq': u'\u2ac6\u0338',
'dagger': u'\u2020',
'ljcy': u'\u0459',
'image': u'\u2111',
'lessdot': u'\u22d6',
'triangledown': u'\u25bf',
'NotCupCap': u'\u226d',
'bigoplus': u'\u2a01',
'homtht': u'\u223b',
'conint': u'\u222e',
'Equilibrium': u'\u21cc',
'iocy': u'\u0451',
'starf': u'\u2605',
'CenterDot': u'\u00b7',
'LongLeftArrow': u'\u27f5',
'emacr': u'\u0113',
'loz': u'\u25ca',
'lang': u'\u2329',
'eqcolon': u'\u2255',
'apid': u'\u224b',
'lrhard': u'\u296d',
'Assign': u'\u2254',
'prnap': u'\u2ab9',
'ccups': u'\u2a4c',
'setmn': u'\u2216',
'Upsilon': u'\u03a5',
'ohm': u'\u2126',
'ac': u'\u223e',
'Idot': u'\u0130',
'af': u'\u2061',
'risingdotseq': u'\u2253',
'ap': u'\u2248',
'angsph': u'\u2222',
'oplus': u'\u2295',
'iiiint': u'\u2a0c',
'eogon': u'\u0119',
'Darr': u'\u21a1',
'amacr': u'\u0101',
'Lstrok': u'\u0141',
'Ecirc': u'\u00ca',
'Superset': u'\u2283',
'isins': u'\u22f4',
'isinv': u'\u2208',
'GJcy': u'\u0403',
'ominus': u'\u2296',
'suphsub': u'\u2ad7',
'dashv': u'\u22a3',
'sccue': u'\u227d',
'Subset': u'\u22d0',
'lbrace': u'{',
'timesbar': u'\u2a31',
'barvee': u'\u22bd',
'gtrapprox': u'\u2a86',
'thorn': u'\u00fe',
'lbrack': u'[',
'DJcy': u'\u0402',
'Vbar': u'\u2aeb',
'isinE': u'\u22f9',
'uwangle': u'\u29a7',
'lvertneqq': u'\u2268\ufe00',
'aring': u'\u00e5',
'sce': u'\u2ab0',
'eparsl': u'\u29e3',
'wreath': u'\u2240',
'dfisht': u'\u297f',
'Uacgr': u'\u038e',
'scy': u'\u0441',
'nvrArr': u'\u2903',
'tshcy': u'\u045b',
'sung': u'\u266a',
'UpperLeftArrow': u'\u2196',
'bbrktbrk': u'\u23b6',
'lEg': u'\u2a8b',
'scE': u'\u2ab4',
'Longleftarrow': u'\u27f8',
'ccaron': u'\u010d',
'bNot': u'\u2aed',
'smile': u'\u2323',
'scnE': u'\u2ab6',
'Lscr': u'\u2112',
'vert': u'|',
'dzigrarr': u'\u27ff',
'varrho': u'\u03f1',
'cedil': u'\u00b8',
'realpart': u'\u211c',
'OverBar': u'\u00af',
'ohbar': u'\u29b5',
'dArr': u'\u21d3',
'harrw': u'\u21ad',
'lsime': u'\u2a8d',
'Esim': u'\u2a73',
'NestedLessLess': u'\u226a',
'Otilde': u'\u00d5',
'boxDL': u'\u2557',
'boxDR': u'\u2554',
'notni': u'\u220c',
'nlsim': u'\u2274',
'smeparsl': u'\u29e4',
'napprox': u'\u2249',
'andand': u'\u2a55',
'diam': u'\u22c4',
'sub': u'\u2282',
'sum': u'\u2211',
'nang': u'\u2220\u20d2',
'pluse': u'\u2a72',
'plusb': u'\u229e',
'ggr': u'\u03b3',
'bowtie': u'\u22c8',
'lesdot': u'\u2a7f',
'geqslant': u'\u2a7e',
'toea': u'\u2928',
'ggg': u'\u22d9',
'rnmid': u'\u2aee',
'rarrtl': u'\u21a3',
'Udblac': u'\u0170',
'Aring': u'\u00c5',
'ReverseElement': u'\u220b',
'rbarr': u'\u290d',
'oscr': u'\u2134',
'emsp13': u'\u2004',
'gesdotol': u'\u2a84',
'emsp14': u'\u2005',
'swArr': u'\u21d9',
'rcedil': u'\u0157',
'CupCap': u'\u224d',
'emptyset': u'\u2205',
'DownArrow': u'\u2193',
'dtdot': u'\u22f1',
'trisb': u'\u29cd',
'varsubsetneqq': u'\u2acb\ufe00',
'Fscr': u'\u2131',
'imath': u'\u0131',
'UnderBracket': u'\u23b5',
'flat': u'\u266d',
'diamond': u'\u22c4',
'boxvR': u'\u255e',
'boxvH': u'\u256a',
'succnapprox': u'\u2aba',
'boxvL': u'\u2561',
'icirc': u'\u00ee',
'yucy': u'\u044e',
'dotminus': u'\u2238',
'Sc': u'\u2abc',
'upsilon': u'\u03c5',
'SHCHcy': u'\u0429',
'ensp': u'\u2002',
'Int': u'\u222c',
'boxvr': u'\u251c',
'tcedil': u'\u0163',
'blacksquare': u'\u25aa',
'boxvh': u'\u253c',
'boxvl': u'\u2524',
'male': u'\u2642',
'rarrfs': u'\u291e',
'vangrt': u'\u299c',
'deg': u'\u00b0',
'submult': u'\u2ac1',
'equest': u'\u225f',
'scap': u'\u2ab8',
'uHar': u'\u2963',
'Hopf': u'\u210d',
'ddagger': u'\u2021',
'lacute': u'\u013a',
'hamilt': u'\u210b',
'tprime': u'\u2034',
'Ccedil': u'\u00c7',
'Qopf': u'\u211a',
'vdash': u'\u22a2',
'maltese': u'\u2720',
'Dot': u'\u00a8',
'lbrke': u'\u298b',
'phmmat': u'\u2133',
'egs': u'\u2a96',
'egr': u'\u03b5',
'sect': u'\u00a7',
'disin': u'\u22f2',
'oacute': u'\u00f3',
'nwarhk': u'\u2923',
'lotimes': u'\u2a34',
'uring': u'\u016f',
'trade': u'\u2122',
'Ocy': u'\u041e',
'lozf': u'\u29eb',
'oelig': u'\u0153',
'prap': u'\u2ab7',
'pcy': u'\u043f',
'DDotrahd': u'\u2911',
'RightCeiling': u'\u2309',
'Lleftarrow': u'\u21da',
'cuvee': u'\u22ce',
'plussim': u'\u2a26',
'late': u'\u2aad',
'Breve': u'\u02d8',
'filig': u'\ufb01',
'thkap': u'\u2248',
'odiv': u'\u2a38',
'supsim': u'\u2ac8',
'bsolhsub': u'\\\u2282',
'rbrksld': u'\u298e',
'hslash': u'\u210f',
'rarrbfs': u'\u2920',
'bigcup': u'\u22c3',
'micro': u'\u00b5',
'iff': u'\u21d4',
'NotTilde': u'\u2241',
'rdca': u'\u2937',
'xutri': u'\u25b3',
'LJcy': u'\u0409',
'szlig': u'\u00df',
'nless': u'\u226e',
'harr': u'\u2194',
'Gcirc': u'\u011c',
'larrtl': u'\u21a2',
'nLtv': u'\u226a\u0338',
'vellip': u'\u22ee',
'boxUr': u'\u2559',
'ycirc': u'\u0177',
'ast': u'*',
'boxUl': u'\u255c',
'rect': u'\u25ad',
'nexists': u'\u2204',
'biguplus': u'\u2a04',
'nspar': u'\u2226',
'measuredangle': u'\u2221',
'bumpeq': u'\u224f',
'omid': u'\u29b6',
'boxUL': u'\u255d',
'scpolint': u'\u2a13',
'subsetneq': u'\u228a',
'Itilde': u'\u0128',
'ctdot': u'\u22ef',
'blank': u'\u2423',
'cups': u'\u222a\ufe00',
'chcy': u'\u0447',
'raemptyv': u'\u29b3',
'swarr': u'\u2199',
'Mcy': u'\u041c',
'boxUR': u'\u255a',
'rarrap': u'\u2975',
'ncy': u'\u043d',
'nleftarrow': u'\u219a',
'Delta': u'\u0394',
'weierp': u'\u2118',
'part': u'\u2202',
'Xi': u'\u039e',
'Ucirc': u'\u00db',
'UnionPlus': u'\u228e',
'Iacgr': u'\u038a',
'Scedil': u'\u015e',
'nGg': u'\u22d9\u0338',
'nleqslant': u'\u2a7d\u0338',
'NotSubsetEqual': u'\u2288',
'nGt': u'\u226b\u20d2',
'Lmidot': u'\u013f',
'origof': u'\u22b6',
'softcy': u'\u044c',
'racute': u'\u0155',
'para': u'\u00b6',
'ltdot': u'\u22d6',
'npart': u'\u2202\u0338',
'yacute': u'\u00fd',
'varkappa': u'\u03f0',
'triangleleft': u'\u25c3',
'macr': u'\u00af',
'sqsupe': u'\u2292',
'Vdashl': u'\u2ae6',
'Equal': u'\u2a75',
'ngeqslant': u'\u2a7e\u0338',
'duarr': u'\u21f5',
'rho': u'\u03c1',
'alpha': u'\u03b1',
'intercal': u'\u22ba',
'eegr': u'\u03b7',
'nrtri': u'\u22eb',
'mcomma': u'\u2a29',
'bernou': u'\u212c',
'loplus': u'\u2a2d',
'blk12': u'\u2592',
'gsime': u'\u2a8e',
'nsupset': u'\u2283\u20d2',
'gsiml': u'\u2a90',
'ltcc': u'\u2aa6',
'lesges': u'\u2a93',
'commat': u'@',
'caron': u'\u02c7',
'subrarr': u'\u2979',
'nshortparallel': u'\u2226',
'notinvc': u'\u22f6',
'notinvb': u'\u22f7',
'notinva': u'\u2209',
'Ntilde': u'\u00d1',
'subnE': u'\u2acb',
'ugr': u'\u03c5',
'Hat': u'^',
'doublebarwedge': u'\u2306',
'roang': u'\u3019',
'Lcy': u'\u041b',
'subne': u'\u228a',
'bullet': u'\u2022',
'Prime': u'\u2033',
'triangle': u'\u25b5',
'precnsim': u'\u22e8',
'ulcorner': u'\u231c',
'vsubne': u'\u228a\ufe00',
'sigmav': u'\u03c2',
'llhard': u'\u296b',
'vDash': u'\u22a8',
'vsubnE': u'\u2acb\ufe00',
'Xgr': u'\u039e',
'lArr': u'\u21d0',
'Auml': u'\u00c4',
'PrecedesTilde': u'\u227e',
'rcaron': u'\u0159',
'rAtail': u'\u291c',
'precnapprox': u'\u2ab9',
'SHcy': u'\u0428',
'leftthreetimes': u'\u22cb',
'hArr': u'\u21d4',
'Oslash': u'\u00d8',
'gbreve': u'\u011f',
'complement': u'\u2201',
'solb': u'\u29c4',
'eqvparsl': u'\u29e5',
'yicy': u'\u0457',
'elinters': u'\ufffd',
'prnE': u'\u2ab5',
'ofcir': u'\u29bf',
'lfisht': u'\u297c',
'emsp': u'\u2003',
'nvdash': u'\u22ac',
'slarr': u'\u2190',
'Ugrave': u'\u00d9',
'opar': u'\u29b7',
'hbar': u'\u210f',
'IJlig': u'\u0132',
'gEl': u'\u2a8c',
'tgr': u'\u03c4',
'napE': u'\u2a70\u0338',
'odblac': u'\u0151',
'zeta': u'\u03b6',
'pr': u'\u227a',
'gneq': u'\u2a88',
'Iogon': u'\u012e',
'dtrif': u'\u25be',
'there4': u'\u2234',
'Edot': u'\u0116',
'pi': u'\u03c0',
'empty': u'\u2205',
'pm': u'\u00b1',
'uhblk': u'\u2580',
'gap': u'\u2a86',
'urcorn': u'\u231d',
'frac25': u'\u2156',
'ccedil': u'\u00e7',
'drbkarow': u'\u2910',
'pitchfork': u'\u22d4',
'udarr': u'\u21c5',
'nprec': u'\u2280',
'Proportional': u'\u221d',
'SucceedsEqual': u'\u2ab0',
'pound': u'\u00a3',
'midcir': u'\u2af0',
'eqslantless': u'\u2a95',
'odash': u'\u229d',
'rightarrow': u'\u2192',
'khgr': u'\u03c7',
'Copf': u'\u2102',
'cularrp': u'\u293d',
'nldr': u'\u2025',
'boxHd': u'\u2564',
'Gcedil': u'\u0122',
'nvgt': u'>\u20d2',
'dgr': u'\u03b4',
'boxHu': u'\u2567',
'ntriangleright': u'\u22eb',
'nvge': u'\u2265\u20d2',
'backprime': u'\u2035',
'DownTeeArrow': u'\u21a7',
'boxHD': u'\u2566',
'Eacgr': u'\u0388',
'urtri': u'\u25f9',
'exponentiale': u'\u2147',
'ngeqq': u'\u2267\u0338',
'boxHU': u'\u2569',
'curvearrowleft': u'\u21b6',
'lBarr': u'\u290e',
'Poincareplane': u'\u210c',
'Abreve': u'\u0102',
'fallingdotseq': u'\u2252',
'questeq': u'\u225f',
'DScy': u'\u0405',
'demptyv': u'\u29b1',
'xuplus': u'\u2a04',
'simg': u'\u2a9e',
'sime': u'\u2243',
'jmath': u'j',
'LeftTee': u'\u22a3',
'Jcirc': u'\u0134',
'siml': u'\u2a9d',
'sqcups': u'\u2294\ufe00',
'rgr': u'\u03c1',
'cup': u'\u222a',
'Congruent': u'\u2261',
'Gammad': u'\u03dc',
'Lcedil': u'\u013b',
'easter': u'\u2a6e',
'DoubleLeftTee': u'\u2ae4',
'idiagr': u'\u0390',
'quest': u'?',
'VerticalTilde': u'\u2240',
'smashp': u'\u2a33',
'lessgtr': u'\u2276',
'DifferentialD': u'\u2146',
'Or': u'\u2a54',
'semi': u';',
'olcross': u'\u29bb',
'rdquo': u'\u201d',
'veeeq': u'\u225a',
'nsubset': u'\u2282\u20d2',
'Colone': u'\u2a74',
'bprime': u'\u2035',
'VerticalLine': u'|',
'tscy': u'\u0446',
'swarrow': u'\u2199',
'Therefore': u'\u2234',
'Gdot': u'\u0120',
'gtrless': u'\u2277',
'bcong': u'\u224c',
'marker': u'\u25ae',
'supnE': u'\u2acc',
'nvrtrie': u'\u22b5\u20d2',
'frac15': u'\u2155',
'frac14': u'\u00bc',
'Jsercy': u'\u0408',
'frac16': u'\u2159',
'frac13': u'\u2153',
'frac12': u'\u00bd',
'RightTee': u'\u22a2',
'dscy': u'\u0455',
'frac18': u'\u215b',
'gacute': u'\u01f5',
'prop': u'\u221d',
'supne': u'\u228b',
'Omacr': u'\u014c',
'prod': u'\u220f',
'eplus': u'\u2a71',
'doteqdot': u'\u2251',
'odot': u'\u2299',
'scirc': u'\u015d',
'hookleftarrow': u'\u21a9',
'varsubsetneq': u'\u228a\ufe00',
'gsim': u'\u2273',
'triminus': u'\u2a3a',
'integers': u'\u2124',
'Square': u'\u25a1',
'Barv': u'\u2ae7',
'angrt': u'\u221f',
'uarr': u'\u2191',
'qint': u'\u2a0c',
'bgr': u'\u03b2',
'rangd': u'\u2992',
'Precedes': u'\u227a',
'succcurlyeq': u'\u227d',
'block': u'\u2588',
'nsc': u'\u2281',
'Cconint': u'\u2230',
'YIcy': u'\u0407',
'gtlPar': u'\u2995',
'csube': u'\u2ad1',
'drcrop': u'\u230c',
'dcaron': u'\u010f',
'RightArrow': u'\u2192',
'CircleDot': u'\u2299',
'PartialD': u'\u2202',
'fflig': u'\ufb00',
'simdot': u'\u2a6a',
'spar': u'\u2225',
'Rarrtl': u'\u2916',
'ulcorn': u'\u231c',
'curlyvee': u'\u22ce',
'larrb': u'\u21e4',
'esdot': u'\u2250',
'plusacir': u'\u2a23',
'delta': u'\u03b4',
'boxV': u'\u2551',
'Ograve': u'\u00d2',
'cir': u'\u25cb',
'NotGreater': u'\u226f',
'geqq': u'\u2267',
'nsce': u'\u2ab0\u0338',
'curren': u'\u00a4',
'Cross': u'\u2a2f',
'divonx': u'\u22c7',
'Icy': u'\u0418',
'escr': u'\u212f',
'HilbertSpace': u'\u210b',
'lbarr': u'\u290c',
'LessSlantEqual': u'\u2a7d',
'Acirc': u'\u00c2',
'kcedil': u'\u0137',
'Updownarrow': u'\u21d5',
'ltcir': u'\u2a79',
'iiint': u'\u222d',
'jcy': u'\u0439',
'llcorner': u'\u231e',
'nap': u'\u2249',
'supseteq': u'\u2287',
'Tcedil': u'\u0162',
'nsqsube': u'\u22e2',
'straightphi': u'\u03d5',
'rtriltri': u'\u29ce',
'rightharpoonup': u'\u21c0',
'TildeFullEqual': u'\u2245',
'UpArrow': u'\u2191',
'supseteqq': u'\u2ac6',
'agr': u'\u03b1',
'Larr': u'\u219e',
'gt': u'>',
'minusd': u'\u2238',
'minusb': u'\u229f',
'gg': u'\u226b',
'ge': u'\u2265',
'SquareUnion': u'\u2294',
'gl': u'\u2277',
'nsupe': u'\u2289',
'lsquor': u'\u201a',
'NotLessTilde': u'\u2274',
'nrArr': u'\u21cf',
'gE': u'\u2267',
'utrif': u'\u25b4',
'ssmile': u'\u2323',
'Not': u'\u2aec',
'nsupE': u'\u2ac6\u0338',
'gnsim': u'\u22e7',
'Wedge': u'\u22c0',
'nsube': u'\u2288',
'dHar': u'\u2965',
'suplarr': u'\u297b',
'kcy': u'\u043a',
'IOcy': u'\u0401',
'LeftArrow': u'\u2190',
'nsubE': u'\u2ac5\u0338',
'nleq': u'\u2270',
'nles': u'\u2a7d\u0338',
'race': u'\u29da',
'neArr': u'\u21d7',
'Zcy': u'\u0417',
'planckh': u'\u210e',
'oror': u'\u2a56',
'natur': u'\u266e',
'Theta': u'\u0398',
'Fouriertrf': u'\u2131',
'UpTeeArrow': u'\u21a5',
'ycy': u'\u044b',
'bnequiv': u'\u2261\u20e5',
'loang': u'\u3018',
'coloneq': u'\u2254',
'OpenCurlyQuote': u'\u2018',
'leg': u'\u22da',
'varsigma': u'\u03c2',
'leq': u'\u2264',
'les': u'\u2a7d',
'sext': u'\u2736',
'thinsp': u'\u2009',
'ecirc': u'\u00ea',
'GreaterEqual': u'\u2265',
'frac23': u'\u2154',
'sqcap': u'\u2293',
'Lsh': u'\u21b0',
'isindot': u'\u22f5',
'dsol': u'\u29f6',
'boxh': u'\u2500',
'tau': u'\u03c4',
'boxVr': u'\u255f',
'sgr': u'\u03c3',
'nrarr': u'\u219b',
'DownBreve': u'\u0311',
'boxVh': u'\u256b',
'VerticalBar': u'\u2223',
'Vee': u'\u22c1',
'boxVl': u'\u2562',
'boxv': u'\u2502',
'lowast': u'\u2217',
'ape': u'\u224a',
'vBarv': u'\u2ae9',
'KHgr': u'\u03a7',
'boxH': u'\u2550',
'chi': u'\u03c7',
'boxVR': u'\u2560',
'varphi': u'\u03c6',
'nsubseteq': u'\u2288',
'boxVH': u'\u256c',
'dstrok': u'\u0111',
'boxVL': u'\u2563',
'subdot': u'\u2abd',
'scnsim': u'\u22e9',
'Iukcy': u'\u0406',
'apE': u'\u2a70',
'Lang': u'\u300a',
'latail': u'\u2919',
'rbrke': u'\u298c',
'lsim': u'\u2272',
'uuml': u'\u00fc',
'HARDcy': u'\u042a',
'vrtri': u'\u22b3',
'SuchThat': u'\u220b',
'Lambda': u'\u039b',
'Oacgr': u'\u038c',
'Idigr': u'\u03aa',
'LeftCeiling': u'\u2308',
'boxplus': u'\u229e',
'blk34': u'\u2593',
'TSHcy': u'\u040b',
'wedge': u'\u2227',
'csup': u'\u2ad0',
'cuesc': u'\u22df',
'inodot': u'\u0131',
'ncedil': u'\u0146',
'nearr': u'\u2197',
'csub': u'\u2acf',
'varnothing': u'\u2205',
'dharr': u'\u21c2',
'Fcy': u'\u0424',
'dharl': u'\u21c3',
'gcy': u'\u0433',
'Aogon': u'\u0104',
'sim': u'\u223c',
'ntgl': u'\u2279',
'MediumSpace': u'\u205f',
'cacute': u'\u0107',
'CirclePlus': u'\u2295',
'xcup': u'\u22c3',
'ltrie': u'\u22b4',
'nequiv': u'\u2262',
'gammad': u'\u03dd',
'rarrpl': u'\u2945',
'Leftrightarrow': u'\u21d4',
'larrsim': u'\u2973',
'npr': u'\u2280',
'hookrightarrow': u'\u21aa',
'TScy': u'\u0426',
'laemptyv': u'\u29b4',
'lE': u'\u2266',
'cudarrr': u'\u2935',
'LeftTeeVector': u'\u295a',
'nsucc': u'\u2281',
'VDash': u'\u22ab',
'comma': u',',
'cudarrl': u'\u2938',
'psgr': u'\u03c8',
'lg': u'\u2276',
'le': u'\u2264',
'll': u'\u226a',
'jcirc': u'\u0135',
'multimap': u'\u22b8',
'uacute': u'\u00fa',
'Cap': u'\u22d2',
'CircleTimes': u'\u2297',
'nLt': u'\u226a\u20d2',
'pluscir': u'\u2a22',
'gesles': u'\u2a94',
'Integral': u'\u222b',
'InvisibleTimes': u'\u2062',
'subset': u'\u2282',
'bump': u'\u224e',
'timesb': u'\u22a0',
'oast': u'\u229b',
'agrave': u'\u00e0',
'Ocirc': u'\u00d4',
'lesseqqgtr': u'\u2a8b',
'Vert': u'\u2016',
'ouml': u'\u00f6',
'Uuml': u'\u00dc',
'vcy': u'\u0432',
'sfrown': u'\u2322',
'reg': u'\u00ae',
'Ecaron': u'\u011a',
'nsucceq': u'\u2ab0\u0338',
'PrecedesEqual': u'\u2aaf',
'Lcaron': u'\u013d',
'rhov': u'\u03f1',
'Kgr': u'\u039a',
'ltrif': u'\u25c2',
'cirfnint': u'\u2a10',
'ecy': u'\u044d',
'simne': u'\u2246',
'times': u'\u00d7',
'quaternions': u'\u210d',
'gtreqqless': u'\u2a8c',
'prnsim': u'\u22e8',
'nrarrw': u'\u219d\u0338',
'DownTee': u'\u22a4',
'rbrace': u'}',
'lmoust': u'\u23b0',
'NewLine': u'\n',
'precapprox': u'\u2ab7',
'spadesuit': u'\u2660',
'RightFloor': u'\u230b',
'pgr': u'\u03c0',
'clubsuit': u'\u2663',
'bigsqcup': u'\u2a06',
'plusmn': u'\u00b1',
'bull': u'\u2022',
'frac38': u'\u215c',
'mDDot': u'\u223a',
'frac35': u'\u2157',
'frac34': u'\u00be',
'iota': u'\u03b9',
'RightTeeArrow': u'\u21a6',
'cemptyv': u'\u29b2',
'rsh': u'\u21b1',
'Umacr': u'\u016a',
'rmoust': u'\u23b1',
'ogt': u'\u29c1',
'supset': u'\u2283',
'gtreqless': u'\u22db',
'Uarr': u'\u219f',
'ogr': u'\u03bf',
'lowbar': u'_',
'natural': u'\u266e',
'rtrif': u'\u25b8',
'Iuml': u'\u00cf',
'ocir': u'\u229a',
'RuleDelayed': u'\u29f4',
'Dagger': u'\u2021',
'SquareSuperset': u'\u2290',
'infintie': u'\u29dd',
'gtrdot': u'\u22d7',
'utdot': u'\u22f0',
'precneqq': u'\u2ab5',
'RBarr': u'\u2910',
'complexes': u'\u2102',
'iacgr': u'\u03af',
'blacktriangle': u'\u25b4',
'xvee': u'\u22c1',
'angle': u'\u2220',
'supdot': u'\u2abe',
'luruhar': u'\u2966',
'mid': u'\u2223',
'iquest': u'\u00bf',
'Scirc': u'\u015c',
'larrpl': u'\u2939',
'gcirc': u'\u011d',
'mumap': u'\u22b8',
'Cdot': u'\u010a',
'Uogon': u'\u0172',
'gesl': u'\u22db\ufe00',
'kgr': u'\u03ba',
'lltri': u'\u25fa',
'lgE': u'\u2a91',
'nmid': u'\u2224',
'cwconint': u'\u2232',
'plankv': u'\u210f',
'subsup': u'\u2ad3',
'TildeEqual': u'\u2243',
'ngE': u'\u2267\u0338',
'Ccirc': u'\u0108',
'backsimeq': u'\u22cd',
'leftarrowtail': u'\u21a2',
'UnderBar': u'\u0332',
'Wcirc': u'\u0174',
'naturals': u'\u2115',
'nprcue': u'\u22e0',
'sdote': u'\u2a66',
'curlyeqsucc': u'\u22df',
'aelig': u'\u00e6',
'EEgr': u'\u0397',
'ni': u'\u220b',
'Psi': u'\u03a8',
'ropar': u'\u2986',
'vartheta': u'\u03d1',
'ngt': u'\u226f',
'bsolb': u'\u29c5',
'hstrok': u'\u0127',
'ngr': u'\u03bd',
'triangleright': u'\u25b9',
'Zacute': u'\u0179',
'RightUpVector': u'\u21be',
'precsim': u'\u227e',
'Bscr': u'\u212c',
'capdot': u'\u2a40',
'ratail': u'\u291a',
'mgr': u'\u03bc',
'looparrowright': u'\u21ac',
'Ncaron': u'\u0147',
'bigstar': u'\u2605',
'leqslant': u'\u2a7d',
'preccurlyeq': u'\u227c',
'Because': u'\u2235',
'Rrightarrow': u'\u21db',
'umacr': u'\u016b',
'shy': u'\u00ad',
'curlywedge': u'\u22cf',
'nisd': u'\u22fa',
'NotSubset': u'\u2282\u20d2',
'Pr': u'\u2abb',
'oacgr': u'\u03cc',
'Rsh': u'\u21b1',
'brvbar': u'\u00a6',
'SmallCircle': u'\u2218',
'iecy': u'\u0435',
'upsi': u'\u03c5',
'Pi': u'\u03a0',
'OElig': u'\u0152',
'LeftVectorBar': u'\u2952',
'gtrsim': u'\u2273',
'apos': u"'",
'theta': u'\u03b8',
'sacute': u'\u015b',
'LessLess': u'\u2aa1',
'nshortmid': u'\u2224',
'mlcp': u'\u2adb',
'langle': u'\u2329',
'EqualTilde': u'\u2242',
'Exists': u'\u2203',
'nabla': u'\u2207',
'trpezium': u'\ufffd',
'eta': u'\u03b7',
'uogon': u'\u0173',
'forall': u'\u2200',
'eth': u'\u00f0',
'ddarr': u'\u21ca',
'larrlp': u'\u21ab',
'Racute': u'\u0154',
'ffllig': u'\ufb04',
'UpArrowBar': u'\u2912',
'UpEquilibrium': u'\u296e',
'Imacr': u'\u012a',
'hyphen': u'\u2010',
'ucy': u'\u0443',
'longrightarrow': u'\u27f6',
'sqcup': u'\u2294',
'imagpart': u'\u2111',
'Aacute': u'\u00c1',
'cent': u'\u00a2',
'wcirc': u'\u0175',
'sqsupseteq': u'\u2292',
'intlarhk': u'\u2a17',
'euml': u'\u00eb',
'downdownarrows': u'\u21ca',
'thetav': u'\u03d1',
'supdsub': u'\u2ad8',
'dlcrop': u'\u230d',
'varsupsetneq': u'\u228b\ufe00',
'subedot': u'\u2ac3',
'lgr': u'\u03bb',
'supmult': u'\u2ac2',
'Oacute': u'\u00d3',
'And': u'\u2a53',
'vzigzag': u'\u299a',
'frac45': u'\u2158',
'setminus': u'\u2216',
'ultri': u'\u25f8',
'rdquor': u'\u201d',
'sqsup': u'\u2290',
'gesdoto': u'\u2a82',
'suphsol': u'\u2283/',
'Ropf': u'\u211d',
'dotplus': u'\u2214',
'sqsub': u'\u228f',
'PSgr': u'\u03a8',
'capcup': u'\u2a47',
'topbot': u'\u2336',
'Product': u'\u220f',
'rmoustache': u'\u23b1',
'rbrkslu': u'\u2990',
'zgr': u'\u03b6',
'fpartint': u'\u2a0d',
'cross': u'\u2717',
'oint': u'\u222e',
'supsetneq': u'\u228b',
'acirc': u'\u00e2',
'phi': u'\u03d5',
'nvap': u'\u224d\u20d2',
'frac58': u'\u215d',
'DotEqual': u'\u2250',
'ecir': u'\u2256',
'subplus': u'\u2abf',
'NotEqualTilde': u'\u2242\u0338',
'igr': u'\u03b9',
'topfork': u'\u2ada',
'angzarr': u'\u237c',
'blk14': u'\u2591',
'subsetneqq': u'\u2acb',
'curlyeqprec': u'\u22de',
'eacgr': u'\u03ad',
'tcy': u'\u0442',
'bigotimes': u'\u2a02',
'upuparrows': u'\u21c8',
'lesg': u'\u22da\ufe00',
'frac56': u'\u215a',
'udiagr': u'\u03b0',
'Zcaron': u'\u017d',
'ntilde': u'\u00f1',
'GreaterTilde': u'\u2273',
'nVDash': u'\u22af',
'Longrightarrow': u'\u27f9',
'LeftFloor': u'\u230a',
'SquareSubset': u'\u228f',
'varpropto': u'\u221d',
'iogon': u'\u012f',
'dwangle': u'\u29a6',
'shchcy': u'\u0449',
'ecaron': u'\u011b',
'mho': u'\u2127',
'xdtri': u'\u25bd',
'LeftUpVector': u'\u21bf',
'Rcedil': u'\u0156',
'erDot': u'\u2253',
'sfgr': u'\u03c2',
'because': u'\u2235',
'cuwed': u'\u22cf',
'Sum': u'\u2211',
'divideontimes': u'\u22c7',
'cirscir': u'\u29c2',
'Sup': u'\u22d1',
'nbumpe': u'\u224f\u0338',
'utilde': u'\u0169',
'ocirc': u'\u00f4',
'supsetneqq': u'\u2acc',
'daleth': u'\u2138',
'upharpoonright': u'\u21be',
'lharul': u'\u296a',
'ccirc': u'\u0109',
'dcy': u'\u0434',
'Element': u'\u2208',
'nhpar': u'\u2af2',
'varepsilon': u'\u03b5',
'urcrop': u'\u230e',
'mdash': u'\u2014',
'rarrlp': u'\u21ac',
'Rscr': u'\u211b',
'rarr': u'\u2192',
'Scy': u'\u0421',
'Upsi': u'\u03d2',
'NotExists': u'\u2204',
'profline': u'\u2312',
'ncongdot': u'\u2a6d\u0338',
'Ouml': u'\u00d6',
'supsub': u'\u2ad4',
'LowerLeftArrow': u'\u2199',
'Sqrt': u'\u221a',
'ltimes': u'\u22c9',
'female': u'\u2640',
'ufisht': u'\u297e',
'bsol': u'\\',
'supsup': u'\u2ad6',
'ltquest': u'\u2a7b',
'rcy': u'\u0440',
'circledR': u'\u00ae',
'circledS': u'\u24c8',
'ee': u'\u2147',
'bigcap': u'\u22c2',
'lmidot': u'\u0140',
'nrarrc': u'\u2933\u0338',
'smid': u'\u2223',
'nvinfin': u'\u29de',
'hksearow': u'\u2925',
'subsub': u'\u2ad5',
'TripleDot': u'\u20db',
'simlE': u'\u2a9f',
'urcorner': u'\u231d',
'Del': u'\u2207',
'Udigr': u'\u03ab',
'npar': u'\u2226',
'timesd': u'\u2a30',
'CHcy': u'\u0427',
'NotHumpEqual': u'\u224f\u0338',
'HumpEqual': u'\u224f',
'DoubleDot': u'\u00a8',
'int': u'\u222b',
'hercon': u'\u22b9',
'Ubrcy': u'\u040e',
'piv': u'\u03d6',
'orarr': u'\u21bb',
'Map': u'\u2905',
'ShortDownArrow': u'\u2193',
'cirmid': u'\u2aef',
'lesseqgtr': u'\u22da',
'roarr': u'\u21fe',
'ubreve': u'\u016d',
'rdldhar': u'\u2969',
'cupor': u'\u2a45',
'NotSucceeds': u'\u2281',
'star': u'\u2606',
'Hfr': u'\u210c',
'thicksim': u'\u223c',
'lesdoto': u'\u2a81',
'rArr': u'\u21d2',
'Acy': u'\u0410',
'asympeq': u'\u224d',
'hybull': u'\u2043',
'zdot': u'\u017c',
'Nacute': u'\u0143',
'lmoustache': u'\u23b0',
'bcy': u'\u0431',
}
def entitymap(match):
"""
Matches the HTML character and then checks if its inside any of the entity
definitions. If so, it replaces it using the dictionary outlined in
entitydefs
:param match: the regular expression compressed string
:return: the string that should replace the one given
"""
x = match.group(1)
if x in entitydefs:
return entitydefs[x]
def convertentities(input_string):
"""
Replaces any of the HTML/LaTeX types listed in entitydefs that are matched
by regular expression.
:param input_string: string that needs to be parsed
:return: string with the relevant characters removed
"""
if input_string is None:
return input_string
return re.sub('&(#\d+|#x[0-9a-fA-F]+|\w+);', entitymap, input_string)
| adsabs/ADSfulltext | adsft/entitydefs.py | Python | gpl-3.0 | 48,476 | 0.000041 |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import sys
import unittest
import gyp.generator.ninja as ninja
class TestPrefixesAndSuffixes(unittest.TestCase):
def test_BinaryNamesWindows(self):
# These cannot run on non-Windows as they require a VS installation to
# correctly handle variable expansion.
if sys.platform.startswith("win"):
writer = ninja.NinjaWriter(
"foo", "wee", ".", ".", "build.ninja", ".", "build.ninja", "win"
)
spec = {"target_name": "wee"}
self.assertTrue(
writer.ComputeOutputFileName(spec, "executable").endswith(".exe")
)
self.assertTrue(
writer.ComputeOutputFileName(spec, "shared_library").endswith(".dll")
)
self.assertTrue(
writer.ComputeOutputFileName(spec, "static_library").endswith(".lib")
)
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter(
"foo", "wee", ".", ".", "build.ninja", ".", "build.ninja", "linux"
)
spec = {"target_name": "wee"}
self.assertTrue("." not in writer.ComputeOutputFileName(spec, "executable"))
self.assertTrue(
writer.ComputeOutputFileName(spec, "shared_library").startswith("lib")
)
self.assertTrue(
writer.ComputeOutputFileName(spec, "static_library").startswith("lib")
)
self.assertTrue(
writer.ComputeOutputFileName(spec, "shared_library").endswith(".so")
)
self.assertTrue(
writer.ComputeOutputFileName(spec, "static_library").endswith(".a")
)
if __name__ == "__main__":
unittest.main()
| onecoolx/picasso | tools/gyp/pylib/gyp/generator/ninja_test.py | Python | bsd-3-clause | 1,909 | 0.004191 |
from datetime import datetime
# Defaults
def noop(*args, **kw):
"""No operation. Returns nothing"""
pass
def identity(x):
"""Returns argument x"""
return x
def default_now():
return datetime.utcnow()
def default_comparer(x, y):
return x == y
def default_sub_comparer(x, y):
return x - y
def default_key_serializer(x):
return str(x)
def default_error(err):
if isinstance(err, BaseException):
raise err
else:
raise Exception(err)
| dbrattli/RxPY | rx/internal/basic.py | Python | apache-2.0 | 498 | 0 |
"""Group module
Groups are intented to cluster users together for logical reasons,
such as a list of users for whom patient notifications apply.
Groups should not be used to grant or restrict access - see `Role`.
"""
import re
from sqlalchemy import UniqueConstraint
from werkzeug.exceptions import BadRequest
from ..database import db
class Group(db.Model):
"""SQLAlchemy class for `groups` table"""
__tablename__ = 'groups'
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), unique=True)
description = db.Column(db.Text)
def __str__(self):
return "Group {}".format(self.name)
def as_json(self):
return {'name': self.name, 'description': self.description}
@classmethod
def from_json(cls, data):
instance = cls()
instance.name = cls.validate_name(data['name'])
instance.description = data['description']
return instance
@staticmethod
def validate_name(name):
"""Only accept lowercase letters and underscores in name
:returns: the name if valid
:raises BadRequest: on error
"""
if re.match(r'^[a-z][a-z0-9_]*$', name):
return name
raise BadRequest(
"Group name may only contain lowercase letters and underscores")
class UserGroup(db.Model):
__tablename__ = 'user_groups'
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(
db.Integer(), db.ForeignKey('users.id', ondelete='CASCADE'),
nullable=False)
group_id = db.Column(
db.Integer(), db.ForeignKey('groups.id', ondelete='CASCADE'),
nullable=False)
__table_args__ = (UniqueConstraint(
'user_id', 'group_id', name='_user_group'),)
def __str__(self):
"""Print friendly format for logging, etc."""
return "UserGroup {0.user_id}:{0.group_id}".format(self)
| uwcirg/true_nth_usa_portal | portal/models/group.py | Python | bsd-3-clause | 1,907 | 0 |
import testlib
import test_combinators
fee = 20
initialsend = 200000
capacity = 1000000
def forward(env):
lit1 = env.lits[0]
lit2 = env.lits[1]
test_combinators.run_break_test(env, lit1, lit2, lit1)
def reverse(env):
lit1 = env.lits[0]
lit2 = env.lits[1]
test_combinators.run_break_test(env, lit1, lit2, lit1)
| mit-dci/lit | test/itest_break.py | Python | mit | 337 | 0.005935 |
# encoding: utf-8
# module PyQt4.QtCore
# from /usr/lib/python3/dist-packages/PyQt4/QtCore.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import sip as __sip
class QTextStream(): # skipped bases: <class 'sip.simplewrapper'>
"""
QTextStream()
QTextStream(QIODevice)
QTextStream(QByteArray, QIODevice.OpenMode mode=QIODevice.ReadWrite)
"""
def atEnd(self): # real signature unknown; restored from __doc__
""" QTextStream.atEnd() -> bool """
return False
def autoDetectUnicode(self): # real signature unknown; restored from __doc__
""" QTextStream.autoDetectUnicode() -> bool """
return False
def codec(self): # real signature unknown; restored from __doc__
""" QTextStream.codec() -> QTextCodec """
return QTextCodec
def device(self): # real signature unknown; restored from __doc__
""" QTextStream.device() -> QIODevice """
return QIODevice
def fieldAlignment(self): # real signature unknown; restored from __doc__
""" QTextStream.fieldAlignment() -> QTextStream.FieldAlignment """
pass
def fieldWidth(self): # real signature unknown; restored from __doc__
""" QTextStream.fieldWidth() -> int """
return 0
def flush(self): # real signature unknown; restored from __doc__
""" QTextStream.flush() """
pass
def generateByteOrderMark(self): # real signature unknown; restored from __doc__
""" QTextStream.generateByteOrderMark() -> bool """
return False
def integerBase(self): # real signature unknown; restored from __doc__
""" QTextStream.integerBase() -> int """
return 0
def locale(self): # real signature unknown; restored from __doc__
""" QTextStream.locale() -> QLocale """
return QLocale
def numberFlags(self): # real signature unknown; restored from __doc__
""" QTextStream.numberFlags() -> QTextStream.NumberFlags """
pass
def padChar(self): # real signature unknown; restored from __doc__
""" QTextStream.padChar() -> str """
return ""
def pos(self): # real signature unknown; restored from __doc__
""" QTextStream.pos() -> int """
return 0
def read(self, p_int): # real signature unknown; restored from __doc__
""" QTextStream.read(int) -> str """
return ""
def readAll(self): # real signature unknown; restored from __doc__
""" QTextStream.readAll() -> str """
return ""
def readLine(self, int_maxLength=0): # real signature unknown; restored from __doc__
""" QTextStream.readLine(int maxLength=0) -> str """
return ""
def realNumberNotation(self): # real signature unknown; restored from __doc__
""" QTextStream.realNumberNotation() -> QTextStream.RealNumberNotation """
pass
def realNumberPrecision(self): # real signature unknown; restored from __doc__
""" QTextStream.realNumberPrecision() -> int """
return 0
def reset(self): # real signature unknown; restored from __doc__
""" QTextStream.reset() """
pass
def resetStatus(self): # real signature unknown; restored from __doc__
""" QTextStream.resetStatus() """
pass
def seek(self, p_int): # real signature unknown; restored from __doc__
""" QTextStream.seek(int) -> bool """
return False
def setAutoDetectUnicode(self, bool): # real signature unknown; restored from __doc__
""" QTextStream.setAutoDetectUnicode(bool) """
pass
def setCodec(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QTextStream.setCodec(QTextCodec)
QTextStream.setCodec(str)
"""
pass
def setDevice(self, QIODevice): # real signature unknown; restored from __doc__
""" QTextStream.setDevice(QIODevice) """
pass
def setFieldAlignment(self, QTextStream_FieldAlignment): # real signature unknown; restored from __doc__
""" QTextStream.setFieldAlignment(QTextStream.FieldAlignment) """
pass
def setFieldWidth(self, p_int): # real signature unknown; restored from __doc__
""" QTextStream.setFieldWidth(int) """
pass
def setGenerateByteOrderMark(self, bool): # real signature unknown; restored from __doc__
""" QTextStream.setGenerateByteOrderMark(bool) """
pass
def setIntegerBase(self, p_int): # real signature unknown; restored from __doc__
""" QTextStream.setIntegerBase(int) """
pass
def setLocale(self, QLocale): # real signature unknown; restored from __doc__
""" QTextStream.setLocale(QLocale) """
pass
def setNumberFlags(self, QTextStream_NumberFlags): # real signature unknown; restored from __doc__
""" QTextStream.setNumberFlags(QTextStream.NumberFlags) """
pass
def setPadChar(self, p_str): # real signature unknown; restored from __doc__
""" QTextStream.setPadChar(str) """
pass
def setRealNumberNotation(self, QTextStream_RealNumberNotation): # real signature unknown; restored from __doc__
""" QTextStream.setRealNumberNotation(QTextStream.RealNumberNotation) """
pass
def setRealNumberPrecision(self, p_int): # real signature unknown; restored from __doc__
""" QTextStream.setRealNumberPrecision(int) """
pass
def setStatus(self, QTextStream_Status): # real signature unknown; restored from __doc__
""" QTextStream.setStatus(QTextStream.Status) """
pass
def setString(self, *args, **kwargs): # real signature unknown
pass
def skipWhiteSpace(self): # real signature unknown; restored from __doc__
""" QTextStream.skipWhiteSpace() """
pass
def status(self): # real signature unknown; restored from __doc__
""" QTextStream.status() -> QTextStream.Status """
pass
def string(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __lshift__(self, *args, **kwargs): # real signature unknown
""" Return self<<value. """
pass
def __rlshift__(self, *args, **kwargs): # real signature unknown
""" Return value<<self. """
pass
def __rrshift__(self, *args, **kwargs): # real signature unknown
""" Return value>>self. """
pass
def __rshift__(self, *args, **kwargs): # real signature unknown
""" Return self>>value. """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
AlignAccountingStyle = 3
AlignCenter = 2
AlignLeft = 0
AlignRight = 1
FieldAlignment = None # (!) real value is ''
FixedNotation = 1
ForcePoint = 2
ForceSign = 4
NumberFlag = None # (!) real value is ''
NumberFlags = None # (!) real value is ''
Ok = 0
ReadCorruptData = 2
ReadPastEnd = 1
RealNumberNotation = None # (!) real value is ''
ScientificNotation = 2
ShowBase = 1
SmartNotation = 0
Status = None # (!) real value is ''
UppercaseBase = 8
UppercaseDigits = 16
WriteFailed = 3
| ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyQt4/QtCore/QTextStream.py | Python | gpl-2.0 | 7,395 | 0.010007 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-10-23 19:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workshops', '0118_auto_20160922_0009'),
]
operations = [
migrations.AlterField(
model_name='event',
name='invoice_status',
field=models.CharField(choices=[('unknown', 'Unknown'), ('invoiced', 'Invoice requested'), ('not-invoiced', 'Invoice not requested'), ('na-historic', 'Not applicable for historical reasons'), ('na-member', 'Not applicable because of membership'), ('na-self-org', 'Not applicable because self-organized'), ('na-waiver', 'Not applicable because waiver granted'), ('na-other', 'Not applicable because other arrangements made'), ('paid', 'Paid')], default='not-invoiced', max_length=40, verbose_name='Invoice status'),
),
migrations.AlterField(
model_name='invoicerequest',
name='status',
field=models.CharField(choices=[('not-invoiced', 'Invoice not requested'), ('sent', 'Sent out'), ('paid', 'Paid')], default='not-invoiced', max_length=40, verbose_name='Invoice status'),
),
]
| vahtras/amy | workshops/migrations/0119_auto_20161023_1413.py | Python | mit | 1,244 | 0.001608 |
import dolfin
from nanopores.physics.params_physical import *
def lscale(geo):
# TODO: "lscale" is confusing since it is actually 1 over the length scale
try:
return geo.parameter("lscale")
except:
try:
return geo.parameter("nm")/nm
except:
return 1e9
def grad(lscale):
def grad0(u):
return dolfin.Constant(lscale)*dolfin.nabla_grad(u)
return grad0
def div(lscale):
def div0(u):
return dolfin.Constant(lscale)*dolfin.transpose(dolfin.nabla_div(u))
return div0
def dim(geo):
return geo.mesh.topology().dim()
cyl = False
def r2pi(cyl):
return dolfin.Expression("2*pi*x[0]", degree=1) if cyl else dolfin.Constant(1.)
def invscale(lscale):
return lambda i: dolfin.Constant(1./lscale**i) | mitschabaude/nanopores | nanopores/physics/default.py | Python | mit | 792 | 0.013889 |
import dredd_hooks as hooks
import imp
import os
import json
import uuid
#if you want to import another module for use in this workflow
utils = imp.load_source("utils",os.path.join(os.getcwd(),'utils.py'))
###############################################################################
###############################################################################
# Tags
###############################################################################
###############################################################################
@hooks.before("Auth Providers > Auth Providers collection > List auth providers")
@hooks.before("Auth Providers > Auth Provider instance > View auth provider")
@hooks.before("Auth Provider Affiliates > NOT_IMPL_NEW List auth provider affiliates > NOT_IMPL_NEW List auth provider affiliates")
@hooks.before("Auth Provider Affiliates > NOT_IMPL_NEW View auth provider affiliate > NOT_IMPL_NEW View auth provider affiliate")
@hooks.before("Auth Provider Affiliates > NOT_IMPL_NEW Create user account for affiliate > NOT_IMPL_NEW Create user account for affiliate")
def skippy21_1(transaction):
utils.skip_this_endpoint(transaction)
| benneely/duke-data-service-dredd | dredd/dredd_scripts/21_auth_provider.py | Python | gpl-3.0 | 1,178 | 0.006791 |
# Copyright 2016 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
from oslo_config import cfg
from oslo_config import types
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.objects import fields
from cinder.volume import driver
from cinder.volume.drivers.dell import dell_storagecenter_api
from cinder.volume.drivers.san.san import san_opts
from cinder.volume import volume_types
common_opts = [
cfg.IntOpt('dell_sc_ssn',
default=64702,
help='Storage Center System Serial Number'),
cfg.PortOpt('dell_sc_api_port',
default=3033,
help='Dell API port'),
cfg.StrOpt('dell_sc_server_folder',
default='openstack',
help='Name of the server folder to use on the Storage Center'),
cfg.StrOpt('dell_sc_volume_folder',
default='openstack',
help='Name of the volume folder to use on the Storage Center'),
cfg.BoolOpt('dell_sc_verify_cert',
default=False,
help='Enable HTTPS SC certificate verification'),
cfg.StrOpt('secondary_san_ip',
default='',
help='IP address of secondary DSM controller'),
cfg.StrOpt('secondary_san_login',
default='Admin',
help='Secondary DSM user name'),
cfg.StrOpt('secondary_san_password',
default='',
help='Secondary DSM user password name',
secret=True),
cfg.PortOpt('secondary_sc_api_port',
default=3033,
help='Secondary Dell API port'),
cfg.MultiOpt('excluded_domain_ip',
item_type=types.IPAddress(),
default=None,
help='Domain IP to be excluded from iSCSI returns.'),
cfg.StrOpt('dell_server_os',
default='Red Hat Linux 6.x',
help='Server OS type to use when creating a new server on the '
'Storage Center.')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(common_opts)
class DellCommonDriver(driver.ManageableVD,
driver.ManageableSnapshotsVD,
driver.BaseVD):
def __init__(self, *args, **kwargs):
super(DellCommonDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(common_opts)
self.configuration.append_config_values(san_opts)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'Dell'
self.backends = self.configuration.safe_get('replication_device')
self.replication_enabled = True if self.backends else False
self.is_direct_connect = False
self.active_backend_id = kwargs.get('active_backend_id', None)
self.failed_over = True if self.active_backend_id else False
LOG.info(_LI('Loading %(name)s: Failover state is %(state)r'),
{'name': self.backend_name,
'state': self.failed_over})
self.storage_protocol = 'iSCSI'
self.failback_timeout = 60
def _bytes_to_gb(self, spacestring):
"""Space is returned in a string like ...
7.38197504E8 Bytes
Need to split that apart and convert to GB.
:returns: gbs in int form
"""
try:
n = spacestring.split(' ', 1)
fgbs = float(n[0]) / 1073741824.0
igbs = int(fgbs)
return igbs
except Exception:
# If any of that blew up it isn't in the format we
# thought so eat our error and return None
return None
def do_setup(self, context):
"""One time driver setup.
Called once by the manager after the driver is loaded.
Sets up clients, check licenses, sets up protocol
specific helpers.
"""
self._client = dell_storagecenter_api.StorageCenterApiHelper(
self.configuration, self.active_backend_id, self.storage_protocol)
def check_for_setup_error(self):
"""Validates the configuration information."""
with self._client.open_connection() as api:
api.find_sc()
self.is_direct_connect = api.is_direct_connect
if self.is_direct_connect and self.replication_enabled:
msg = _('Dell Cinder driver configuration error replication '
'not supported with direct connect.')
raise exception.InvalidHost(reason=msg)
# If we are a healthy replicated system make sure our backend
# is alive.
if self.replication_enabled and not self.failed_over:
# Check that our replication destinations are available.
for backend in self.backends:
replssn = backend['target_device_id']
try:
# Just do a find_sc on it. If it raises we catch
# that and raise with a correct exception.
api.find_sc(int(replssn))
except exception.VolumeBackendAPIException:
msg = _('Dell Cinder driver configuration error '
'replication_device %s not found') % replssn
raise exception.InvalidHost(reason=msg)
def _get_volume_extra_specs(self, obj):
"""Gets extra specs for the given object."""
type_id = obj.get('volume_type_id')
if type_id:
return volume_types.get_volume_type_extra_specs(type_id)
return {}
def _add_volume_to_consistency_group(self, api, scvolume, volume):
"""Just a helper to add a volume to a consistency group.
:param api: Dell SC API opbject.
:param scvolume: Dell SC Volume object.
:param volume: Cinder Volume object.
:returns: Nothing.
"""
if scvolume and volume.get('consistencygroup_id'):
profile = api.find_replay_profile(
volume.get('consistencygroup_id'))
if profile:
api.update_cg_volumes(profile, [volume])
def _get_replication_specs(self, specs):
"""Checks if we can do replication.
Need the extra spec set and we have to be talking to EM.
:param specs: Cinder Volume or snapshot extra specs.
:return: rinfo dict.
"""
rinfo = {'enabled': False, 'sync': False,
'live': False, 'active': False,
'autofailover': False}
# Repl does not work with direct connect.
if not self.is_direct_connect:
if (not self.failed_over and
specs.get('replication_enabled') == '<is> True'):
rinfo['enabled'] = True
if specs.get('replication_type') == '<in> sync':
rinfo['sync'] = True
if specs.get('replication:livevolume') == '<is> True':
rinfo['live'] = True
if specs.get('replication:livevolume:autofailover') == '<is> True':
rinfo['autofailover'] = True
if specs.get('replication:activereplay') == '<is> True':
rinfo['active'] = True
# Some quick checks.
if rinfo['enabled']:
replication_target_count = len(self.backends)
msg = None
if replication_target_count == 0:
msg = _(
'Replication setup failure: replication has been '
'enabled but no replication target has been specified '
'for this backend.')
if rinfo['live'] and replication_target_count != 1:
msg = _('Replication setup failure: replication:livevolume'
' has been enabled but more than one replication '
'target has been specified for this backend.')
if msg:
LOG.debug(msg)
raise exception.ReplicationError(message=msg)
# Got this far. Life is good. Return our data.
return rinfo
def _is_live_vol(self, obj):
rspecs = self._get_replication_specs(self._get_volume_extra_specs(obj))
return rspecs['enabled'] and rspecs['live']
def _create_replications(self, api, volume, scvolume, extra_specs=None):
"""Creates any appropriate replications for a given volume.
:param api: Dell REST API object.
:param volume: Cinder volume object.
:param scvolume: Dell Storage Center Volume object.
:param extra_specs: Extra specs if we have them otherwise gets them
from the volume.
:return: model_update
"""
# Replication V2
# for now we assume we have an array named backends.
replication_driver_data = None
# Replicate if we are supposed to.
if not extra_specs:
extra_specs = self._get_volume_extra_specs(volume)
rspecs = self._get_replication_specs(extra_specs)
if rspecs['enabled']:
for backend in self.backends:
targetdeviceid = backend['target_device_id']
primaryqos = backend.get('qosnode', 'cinderqos')
secondaryqos = backend.get('remoteqos', 'cinderqos')
diskfolder = backend.get('diskfolder', None)
obj = None
if rspecs['live']:
# We are rolling with a live volume.
obj = api.create_live_volume(scvolume, targetdeviceid,
rspecs['active'],
rspecs['sync'],
rspecs['autofailover'],
primaryqos, secondaryqos)
else:
# Else a regular replication.
obj = api.create_replication(scvolume, targetdeviceid,
primaryqos, rspecs['sync'],
diskfolder, rspecs['active'])
# This is either a ScReplication object or a ScLiveVolume
# object. So long as it isn't None we are fine.
if not obj:
# Create replication will have printed a better error.
msg = _('Replication %(name)s to %(ssn)s failed.') % {
'name': volume['id'],
'ssn': targetdeviceid}
raise exception.VolumeBackendAPIException(data=msg)
if not replication_driver_data:
replication_driver_data = backend['target_device_id']
else:
replication_driver_data += ','
replication_driver_data += backend['target_device_id']
# If we did something return model update.
model_update = {}
if replication_driver_data:
model_update = {
'replication_status': fields.ReplicationStatus.ENABLED,
'replication_driver_data': replication_driver_data}
return model_update
@staticmethod
def _cleanup_failed_create_volume(api, volumename):
try:
api.delete_volume(volumename)
except exception.VolumeBackendAPIException as ex:
LOG.info(_LI('Non fatal cleanup error: %s.'), ex.msg)
def create_volume(self, volume):
"""Create a volume."""
model_update = {}
# We use id as our name as it is unique.
volume_name = volume.get('id')
# Look for our volume
volume_size = volume.get('size')
LOG.debug('Creating volume %(name)s of size %(size)s',
{'name': volume_name,
'size': volume_size})
scvolume = None
with self._client.open_connection() as api:
try:
# Get our extra specs.
specs = self._get_volume_extra_specs(volume)
scvolume = api.create_volume(
volume_name, volume_size,
specs.get('storagetype:storageprofile'),
specs.get('storagetype:replayprofiles'),
specs.get('storagetype:volumeqos'),
specs.get('storagetype:groupqos'),
specs.get('storagetype:datareductionprofile'))
if scvolume is None:
raise exception.VolumeBackendAPIException(
message=_('Unable to create volume %s') %
volume_name)
# Update Consistency Group
self._add_volume_to_consistency_group(api, scvolume, volume)
# Create replications. (Or not. It checks.)
model_update = self._create_replications(api, volume, scvolume)
# Save our provider_id.
model_update['provider_id'] = scvolume['instanceId']
except Exception:
# if we actually created a volume but failed elsewhere
# clean up the volume now.
self._cleanup_failed_create_volume(api, volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is None:
raise exception.VolumeBackendAPIException(
data=_('Unable to create volume. Backend down.'))
return model_update
def _split_driver_data(self, replication_driver_data):
"""Splits the replication_driver_data into an array of ssn strings.
:param replication_driver_data: A string of comma separated SSNs.
:returns: SSNs in an array of strings.
"""
ssnstrings = []
# We have any replication_driver_data.
if replication_driver_data:
# Split the array and wiffle through the entries.
for str in replication_driver_data.split(','):
# Strip any junk from the string.
ssnstring = str.strip()
# Anything left?
if ssnstring:
# Add it to our array.
ssnstrings.append(ssnstring)
return ssnstrings
def _delete_live_volume(self, api, volume):
"""Delete live volume associated with volume.
:param api:Dell REST API object.
:param volume: Cinder Volume object
:return: True if we actually deleted something. False for everything
else.
"""
# Live Volume was added after provider_id support. So just assume it is
# there.
replication_driver_data = volume.get('replication_driver_data')
# Do we have any replication driver data?
if replication_driver_data:
# Valid replication data?
ssnstrings = self._split_driver_data(replication_driver_data)
if ssnstrings:
ssn = int(ssnstrings[0])
sclivevolume = api.get_live_volume(volume.get('provider_id'),
volume.get('id'))
# Have we found the live volume?
if (sclivevolume and
sclivevolume.get('secondaryScSerialNumber') == ssn and
api.delete_live_volume(sclivevolume, True)):
LOG.info(_LI('%(vname)s\'s replication live volume has '
'been deleted from storage Center %(sc)s,'),
{'vname': volume.get('id'),
'sc': ssn})
return True
# If we are here either we do not have a live volume, we do not have
# one on our configured SC or we were not able to delete it.
# Either way, warn and leave.
LOG.warning(_LW('Unable to delete %s live volume.'),
volume.get('id'))
return False
def _delete_replications(self, api, volume):
"""Delete replications associated with a given volume.
We should be able to roll through the replication_driver_data list
of SSNs and delete replication objects between them and the source
volume.
:param api: Dell REST API object.
:param volume: Cinder Volume object
:return: None
"""
replication_driver_data = volume.get('replication_driver_data')
if replication_driver_data:
ssnstrings = self._split_driver_data(replication_driver_data)
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
scvol = api.find_volume(volume_name, provider_id)
# This is just a string of ssns separated by commas.
# Trundle through these and delete them all.
for ssnstring in ssnstrings:
ssn = int(ssnstring)
# Are we a replication or a live volume?
if not api.delete_replication(scvol, ssn):
LOG.warning(_LW('Unable to delete replication of Volume '
'%(vname)s to Storage Center %(sc)s.'),
{'vname': volume_name,
'sc': ssnstring})
# If none of that worked or there was nothing to do doesn't matter.
# Just move on.
def delete_volume(self, volume):
deleted = False
# We use id as our name as it is unique.
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
# Unless we are migrating.
if volume.get('migration_status') == 'deleting':
volume_name = volume.get('_name_id')
provider_id = None
LOG.debug('Deleting volume %s', volume_name)
with self._client.open_connection() as api:
try:
rspecs = self._get_replication_specs(
self._get_volume_extra_specs(volume))
if rspecs['enabled']:
if rspecs['live']:
self._delete_live_volume(api, volume)
else:
self._delete_replications(api, volume)
deleted = api.delete_volume(volume_name, provider_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to delete volume %s'),
volume_name)
# if there was an error we will have raised an
# exception. If it failed to delete it is because
# the conditions to delete a volume were not met.
if deleted is False:
raise exception.VolumeIsBusy(volume_name=volume_name)
def create_snapshot(self, snapshot):
"""Create snapshot"""
# our volume name is the volume id
volume_name = snapshot.get('volume_id')
provider_id = snapshot.volume.get('provider_id')
snapshot_id = snapshot.get('id')
LOG.debug('Creating snapshot %(snap)s on volume %(vol)s',
{'snap': snapshot_id,
'vol': volume_name})
with self._client.open_connection() as api:
scvolume = api.find_volume(volume_name, provider_id,
self._is_live_vol(snapshot))
if scvolume is not None:
replay = api.create_replay(scvolume, snapshot_id, 0)
if replay:
return {'status': fields.SnapshotStatus.AVAILABLE,
'provider_id': scvolume['instanceId']}
else:
LOG.warning(_LW('Unable to locate volume:%s'),
volume_name)
snapshot['status'] = fields.SnapshotStatus.ERROR
msg = _('Failed to create snapshot %s') % snapshot_id
raise exception.VolumeBackendAPIException(data=msg)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other volume's snapshot on appliance."""
model_update = {}
scvolume = None
volume_name = volume.get('id')
src_provider_id = snapshot.get('provider_id')
src_volume_name = snapshot.get('volume_id')
# This snapshot could have been created on its own or as part of a
# cgsnapshot. If it was a cgsnapshot it will be identified on the Dell
# backend under cgsnapshot_id. Given the volume ID and the
# cgsnapshot_id we can find the appropriate snapshot.
# So first we look for cgsnapshot_id. If that is blank then it must
# have been a normal snapshot which will be found under snapshot_id.
snapshot_id = snapshot.get('cgsnapshot_id')
if not snapshot_id:
snapshot_id = snapshot.get('id')
LOG.debug(
'Creating new volume %(vol)s from snapshot %(snap)s '
'from vol %(src)s',
{'vol': volume_name,
'snap': snapshot_id,
'src': src_volume_name})
with self._client.open_connection() as api:
try:
srcvol = api.find_volume(src_volume_name, src_provider_id)
if srcvol is not None:
replay = api.find_replay(srcvol, snapshot_id)
if replay is not None:
# See if we have any extra specs.
specs = self._get_volume_extra_specs(volume)
scvolume = api.create_view_volume(
volume_name, replay,
specs.get('storagetype:replayprofiles'),
specs.get('storagetype:volumeqos'),
specs.get('storagetype:groupqos'),
specs.get('storagetype:datareductionprofile'))
# Extend Volume
if scvolume and (volume['size'] >
snapshot["volume_size"]):
LOG.debug('Resize the new volume to %s.',
volume['size'])
scvolume = api.expand_volume(scvolume,
volume['size'])
if scvolume is None:
raise exception.VolumeBackendAPIException(
message=_('Unable to create volume '
'%(name)s from %(snap)s.') %
{'name': volume_name,
'snap': snapshot_id})
# Update Consistency Group
self._add_volume_to_consistency_group(api,
scvolume,
volume)
# Replicate if we are supposed to.
model_update = self._create_replications(api,
volume,
scvolume)
# Save our instanceid.
model_update['provider_id'] = (
scvolume['instanceId'])
except Exception:
# Clean up after ourselves.
self._cleanup_failed_create_volume(api, volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is not None:
LOG.debug('Volume %(vol)s created from %(snap)s',
{'vol': volume_name,
'snap': snapshot_id})
else:
msg = _('Failed to create volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return model_update
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
model_update = {}
scvolume = None
src_volume_name = src_vref.get('id')
src_provider_id = src_vref.get('provider_id')
volume_name = volume.get('id')
LOG.debug('Creating cloned volume %(clone)s from volume %(vol)s',
{'clone': volume_name,
'vol': src_volume_name})
with self._client.open_connection() as api:
try:
srcvol = api.find_volume(src_volume_name, src_provider_id)
if srcvol is not None:
# Get our specs.
specs = self._get_volume_extra_specs(volume)
# Create our volume
scvolume = api.create_cloned_volume(
volume_name, srcvol,
specs.get('storagetype:storageprofile'),
specs.get('storagetype:replayprofiles'),
specs.get('storagetype:volumeqos'),
specs.get('storagetype:groupqos'),
specs.get('storagetype:datareductionprofile'))
# Extend Volume
if scvolume and volume['size'] > src_vref['size']:
LOG.debug('Resize the volume to %s.', volume['size'])
scvolume = api.expand_volume(scvolume, volume['size'])
# If either of those didn't work we bail.
if scvolume is None:
raise exception.VolumeBackendAPIException(
message=_('Unable to create volume '
'%(name)s from %(vol)s.') %
{'name': volume_name,
'vol': src_volume_name})
# Update Consistency Group
self._add_volume_to_consistency_group(api,
scvolume,
volume)
# Replicate if we are supposed to.
model_update = self._create_replications(api,
volume,
scvolume)
# Save our provider_id.
model_update['provider_id'] = scvolume['instanceId']
except Exception:
# Clean up after ourselves.
self._cleanup_failed_create_volume(api, volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is not None:
LOG.debug('Volume %(vol)s cloned from %(src)s',
{'vol': volume_name,
'src': src_volume_name})
else:
msg = _('Failed to create volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return model_update
def delete_snapshot(self, snapshot):
"""delete_snapshot"""
volume_name = snapshot.get('volume_id')
snapshot_id = snapshot.get('id')
provider_id = snapshot.get('provider_id')
LOG.debug('Deleting snapshot %(snap)s from volume %(vol)s',
{'snap': snapshot_id,
'vol': volume_name})
with self._client.open_connection() as api:
scvolume = api.find_volume(volume_name, provider_id)
if scvolume and api.delete_replay(scvolume, snapshot_id):
return
# if we are here things went poorly.
snapshot['status'] = fields.SnapshotStatus.ERROR_DELETING
msg = _('Failed to delete snapshot %s') % snapshot_id
raise exception.VolumeBackendAPIException(data=msg)
def create_export(self, context, volume, connector):
"""Create an export of a volume.
The volume exists on creation and will be visible on
initialize connection. So nothing to do here.
"""
pass
def ensure_export(self, context, volume):
"""Ensure an export of a volume.
Per the eqlx driver we just make sure that the volume actually
exists where we think it does.
"""
scvolume = None
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
LOG.debug('Checking existence of volume %s', volume_name)
with self._client.open_connection() as api:
try:
scvolume = api.find_volume(volume_name, provider_id,
self._is_live_vol(volume))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to ensure export of volume %s'),
volume_name)
if scvolume is None:
msg = _('Unable to find volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
def remove_export(self, context, volume):
"""Remove an export of a volume.
We do nothing here to match the nothing we do in create export. Again
we do everything in initialize and terminate connection.
"""
pass
def extend_volume(self, volume, new_size):
"""Extend the size of the volume."""
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
LOG.debug('Extending volume %(vol)s to %(size)s',
{'vol': volume_name,
'size': new_size})
if volume is not None:
with self._client.open_connection() as api:
scvolume = api.find_volume(volume_name, provider_id)
if api.expand_volume(scvolume, new_size) is not None:
return
# If we are here nothing good happened.
msg = _('Unable to extend volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
# Take this opportunity to report our failover state.
if self.failed_over:
LOG.debug('%(source)s has been failed over to %(dest)s',
{'source': self.backend_name,
'dest': self.active_backend_id})
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
with self._client.open_connection() as api:
# Static stats.
data = {}
data['volume_backend_name'] = self.backend_name
data['vendor_name'] = 'Dell'
data['driver_version'] = self.VERSION
data['storage_protocol'] = self.storage_protocol
data['reserved_percentage'] = 0
data['consistencygroup_support'] = True
data['thin_provisioning_support'] = True
data['QoS_support'] = False
data['replication_enabled'] = self.replication_enabled
if self.replication_enabled:
data['replication_type'] = ['async', 'sync']
data['replication_count'] = len(self.backends)
replication_targets = []
# Trundle through our backends.
for backend in self.backends:
target_device_id = backend.get('target_device_id')
if target_device_id:
replication_targets.append(target_device_id)
data['replication_targets'] = replication_targets
# Get our capacity.
storageusage = api.get_storage_usage()
if storageusage:
# Get actual stats.
totalcapacity = storageusage.get('availableSpace')
totalcapacitygb = self._bytes_to_gb(totalcapacity)
data['total_capacity_gb'] = totalcapacitygb
freespace = storageusage.get('freeSpace')
freespacegb = self._bytes_to_gb(freespace)
data['free_capacity_gb'] = freespacegb
else:
# Soldier on. Just return 0 for this iteration.
LOG.error(_LE('Unable to retrieve volume stats.'))
data['total_capacity_gb'] = 0
data['free_capacity_gb'] = 0
self._stats = data
LOG.debug('Total cap %(total)s Free cap %(free)s',
{'total': data['total_capacity_gb'],
'free': data['free_capacity_gb']})
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:returns: model_update to update DB with any needed changes
"""
# We use id as our volume name so we need to rename the backend
# volume to the original volume name.
original_volume_name = volume.get('id')
current_name = new_volume.get('id')
# We should have this. If we don't we'll set it below.
provider_id = new_volume.get('provider_id')
LOG.debug('update_migrated_volume: %(current)s to %(original)s',
{'current': current_name,
'original': original_volume_name})
if original_volume_name:
with self._client.open_connection() as api:
# todo(tswanson): Delete old volume repliations/live volumes
# todo(tswanson): delete old volume?
scvolume = api.find_volume(current_name, provider_id)
if (scvolume and
api.rename_volume(scvolume, original_volume_name)):
# Replicate if we are supposed to.
model_update = self._create_replications(api,
new_volume,
scvolume)
model_update['_name_id'] = None
model_update['provider_id'] = scvolume['instanceId']
return model_update
# The world was horrible to us so we should error and leave.
LOG.error(_LE('Unable to rename the logical volume for volume: %s'),
original_volume_name)
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
def create_consistencygroup(self, context, group):
"""This creates a replay profile on the storage backend.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be created.
:returns: Nothing on success.
:raises: VolumeBackendAPIException
"""
gid = group['id']
with self._client.open_connection() as api:
cgroup = api.create_replay_profile(gid)
if cgroup:
LOG.info(_LI('Created Consistency Group %s'), gid)
return
msg = _('Unable to create consistency group %s') % gid
raise exception.VolumeBackendAPIException(data=msg)
def delete_consistencygroup(self, context, group, volumes):
"""Delete the Dell SC profile associated with this consistency group.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be created.
:returns: Updated model_update, volumes.
"""
gid = group['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(gid)
if profile:
api.delete_replay_profile(profile)
# If we are here because we found no profile that should be fine
# as we are trying to delete it anyway.
# Trundle through the list deleting the volumes.
volume_updates = []
for volume in volumes:
self.delete_volume(volume)
volume_updates.append({'id': volume['id'],
'status': 'deleted'})
model_update = {'status': group['status']}
return model_update, volume_updates
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a consistency group.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be updated.
:param add_volumes: a list of volume dictionaries to be added.
:param remove_volumes: a list of volume dictionaries to be removed.
:returns: model_update, add_volumes_update, remove_volumes_update
model_update is a dictionary that the driver wants the manager
to update upon a successful return. If None is returned, the manager
will set the status to 'available'.
add_volumes_update and remove_volumes_update are lists of dictionaries
that the driver wants the manager to update upon a successful return.
Note that each entry requires a {'id': xxx} so that the correct
volume entry can be updated. If None is returned, the volume will
remain its original status. Also note that you cannot directly
assign add_volumes to add_volumes_update as add_volumes is a list of
cinder.db.sqlalchemy.models.Volume objects and cannot be used for
db update directly. Same with remove_volumes.
If the driver throws an exception, the status of the group as well as
those of the volumes to be added/removed will be set to 'error'.
"""
gid = group['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(gid)
if not profile:
LOG.error(_LE('Cannot find Consistency Group %s'), gid)
elif api.update_cg_volumes(profile,
add_volumes,
remove_volumes):
LOG.info(_LI('Updated Consistency Group %s'), gid)
# we need nothing updated above us so just return None.
return None, None, None
# Things did not go well so throw.
msg = _('Unable to update consistency group %s') % gid
raise exception.VolumeBackendAPIException(data=msg)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Takes a snapshot of the consistency group.
:param context: the context of the caller.
:param cgsnapshot: Information about the snapshot to take.
:param snapshots: List of snapshots for this cgsnapshot.
:returns: Updated model_update, snapshots.
:raises: VolumeBackendAPIException.
"""
cgid = cgsnapshot['consistencygroup_id']
snapshotid = cgsnapshot['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(cgid)
if profile:
LOG.debug('profile %s replayid %s', profile, snapshotid)
if api.snap_cg_replay(profile, snapshotid, 0):
snapshot_updates = []
for snapshot in snapshots:
snapshot_updates.append({
'id': snapshot.id,
'status': fields.SnapshotStatus.AVAILABLE
})
model_update = {'status': fields.SnapshotStatus.AVAILABLE}
return model_update, snapshot_updates
# That didn't go well. Tell them why. Then bomb out.
LOG.error(_LE('Failed to snap Consistency Group %s'), cgid)
else:
LOG.error(_LE('Cannot find Consistency Group %s'), cgid)
msg = _('Unable to snap Consistency Group %s') % cgid
raise exception.VolumeBackendAPIException(data=msg)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot.
If profile isn't found return success. If failed to delete the
replay (the snapshot) then raise an exception.
:param context: the context of the caller.
:param cgsnapshot: Information about the snapshot to delete.
:returns: Updated model_update, snapshots.
:raises: VolumeBackendAPIException.
"""
cgid = cgsnapshot['consistencygroup_id']
snapshotid = cgsnapshot['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(cgid)
if profile:
LOG.info(_LI('Deleting snapshot %(ss)s from %(pro)s'),
{'ss': snapshotid,
'pro': profile})
if not api.delete_cg_replay(profile, snapshotid):
msg = (_('Unable to delete Consistency Group snapshot %s')
% snapshotid)
raise exception.VolumeBackendAPIException(data=msg)
snapshot_updates = []
for snapshot in snapshots:
snapshot_updates.append(
{'id': snapshot['id'],
'status': fields.SnapshotStatus.DELETED})
model_update = {'status': fields.SnapshotStatus.DELETED}
return model_update, snapshot_updates
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object under Cinder management.
existing_ref is passed straight through from the API request's
manage_existing_ref value, and it is up to the driver how this should
be interpreted. It should be sufficient to identify a storage object
that the driver should somehow associate with the newly-created cinder
volume structure.
There are two ways to do this:
1. Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
2. Place some metadata on the volume, or somewhere in the backend, that
allows other driver requests (e.g. delete, clone, attach, detach...)
to locate the backend storage object when required.
If the existing_ref doesn't make sense, or doesn't refer to an existing
backend storage object, raise a ManageExistingInvalidReference
exception.
The volume may have a volume_type, and the driver can inspect that and
compare against the properties of the referenced backend storage
object. If they are incompatible, raise a
ManageExistingVolumeTypeMismatch, specifying a reason for the failure.
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
if existing_ref.get('source-name') or existing_ref.get('source-id'):
with self._client.open_connection() as api:
api.manage_existing(volume['id'], existing_ref)
# Replicate if we are supposed to.
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
scvolume = api.find_volume(volume_name, provider_id)
model_update = self._create_replications(api, volume, scvolume)
if model_update:
return model_update
else:
msg = _('Must specify source-name or source-id.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
# Only return a model_update if we have replication info to add.
return None
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
if existing_ref.get('source-name') or existing_ref.get('source-id'):
with self._client.open_connection() as api:
return api.get_unmanaged_volume_size(existing_ref)
else:
msg = _('Must specify source-name or source-id.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
For most drivers, this will not need to do anything. However, some
drivers might use this call as an opportunity to clean up any
Cinder-specific configuration that they have associated with the
backend storage object.
:param volume: Cinder volume to unmanage
"""
with self._client.open_connection() as api:
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
scvolume = api.find_volume(volume_name, provider_id)
if scvolume:
api.unmanage(scvolume)
def _get_retype_spec(self, diff, volume_name, specname, spectype):
"""Helper function to get current and requested spec.
:param diff: A difference dictionary.
:param volume_name: The volume name we are working with.
:param specname: The pretty name of the parameter.
:param spectype: The actual spec string.
:return: current, requested spec.
:raises: VolumeBackendAPIException
"""
spec = (diff['extra_specs'].get(spectype))
if spec:
if len(spec) != 2:
msg = _('Unable to retype %(specname)s, expected to receive '
'current and requested %(spectype)s values. Value '
'received: %(spec)s') % {'specname': specname,
'spectype': spectype,
'spec': spec}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
current = spec[0]
requested = spec[1]
if current != requested:
LOG.debug('Retyping volume %(vol)s to use %(specname)s '
'%(spec)s.',
{'vol': volume_name,
'specname': specname,
'spec': requested})
return current, requested
else:
LOG.info(_LI('Retype was to same Storage Profile.'))
return None, None
def _retype_replication(self, api, volume, scvolume, new_type, diff):
model_update = None
ret = True
# Replication.
current, requested = (
self._get_retype_spec(diff, volume.get('id'),
'replication_enabled',
'replication_enabled'))
# We only toggle at the repl level.
if current != requested:
# If we are changing to on...
if requested == '<is> True':
# We create our replication using our new type's extra specs.
model_update = self._create_replications(
api, volume, scvolume,
new_type.get('extra_specs'))
elif current == '<is> True':
# If we are killing replication we have to see if we currently
# have live volume enabled or not.
if self._is_live_vol(volume):
ret = self._delete_live_volume(api, volume)
else:
self._delete_replications(api, volume)
model_update = {'replication_status':
fields.ReplicationStatus.DISABLED,
'replication_driver_data': ''}
# TODO(tswanson): Add support for changing replication options.
return ret, model_update
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities (Not Used).
:returns: Boolean or Boolean, model_update tuple.
"""
LOG.info(_LI('retype: volume_name: %(name)s new_type: %(newtype)s '
'diff: %(diff)s host: %(host)s'),
{'name': volume.get('id'), 'newtype': new_type,
'diff': diff, 'host': host})
model_update = None
# Any spec changes?
if diff['extra_specs']:
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
with self._client.open_connection() as api:
try:
# Get our volume
scvolume = api.find_volume(volume_name, provider_id)
if scvolume is None:
LOG.error(_LE('Retype unable to find volume %s.'),
volume_name)
return False
# Check our specs.
# Storage profiles.
current, requested = (
self._get_retype_spec(diff, volume_name,
'Storage Profile',
'storagetype:storageprofile'))
# if there is a change and it didn't work fast fail.
if (current != requested and not
api.update_storage_profile(scvolume, requested)):
LOG.error(_LE('Failed to update storage profile'))
return False
# Replay profiles.
current, requested = (
self._get_retype_spec(diff, volume_name,
'Replay Profiles',
'storagetype:replayprofiles'))
# if there is a change and it didn't work fast fail.
if requested and not api.update_replay_profiles(scvolume,
requested):
LOG.error(_LE('Failed to update replay profiles'))
return False
# Volume QOS profiles.
current, requested = (
self._get_retype_spec(diff, volume_name,
'Volume QOS Profile',
'storagetype:volumeqos'))
if current != requested:
if not api.update_qos_profile(scvolume, requested):
LOG.error(_LE('Failed to update volume '
'qos profile'))
# Group QOS profiles.
current, requested = (
self._get_retype_spec(diff, volume_name,
'Group QOS Profile',
'storagetype:groupqos'))
if current != requested:
if not api.update_qos_profile(scvolume, requested,
True):
LOG.error(_LE('Failed to update group '
'qos profile'))
return False
# Data reduction profiles.
current, requested = (
self._get_retype_spec(
diff, volume_name, 'Data Reduction Profile',
'storagetype:datareductionprofile'))
if current != requested:
if not api.update_datareduction_profile(scvolume,
requested):
LOG.error(_LE('Failed to update data reduction '
'profile'))
return False
# Active Replay
current, requested = (
self._get_retype_spec(diff, volume_name,
'Replicate Active Replay',
'replication:activereplay'))
if current != requested and not (
api.update_replicate_active_replay(
scvolume, requested == '<is> True')):
LOG.error(_LE('Failed to apply '
'replication:activereplay setting'))
return False
# Deal with replication.
ret, model_update = self._retype_replication(
api, volume, scvolume, new_type, diff)
if not ret:
return False
except exception.VolumeBackendAPIException:
# We do nothing with this. We simply return failure.
return False
# If we have something to send down...
if model_update:
return True, model_update
return True
def _parse_secondary(self, api, secondary):
"""Find the replication destination associated with secondary.
:param api: Dell StorageCenterApi
:param secondary: String indicating the secondary to failover to.
:return: Destination SSN for the given secondary.
"""
LOG.debug('_parse_secondary. Looking for %s.', secondary)
destssn = None
# Trundle through these looking for our secondary.
for backend in self.backends:
ssnstring = backend['target_device_id']
# If they list a secondary it has to match.
# If they do not list a secondary we return the first
# replication on a working system.
if not secondary or secondary == ssnstring:
# Is a string. Need an int.
ssn = int(ssnstring)
# Without the source being up we have no good
# way to pick a destination to failover to. So just
# look for one that is just up.
try:
# If the SC ssn exists use it.
if api.find_sc(ssn):
destssn = ssn
break
except exception.VolumeBackendAPIException:
LOG.warning(_LW('SSN %s appears to be down.'), ssn)
LOG.info(_LI('replication failover secondary is %(ssn)s'),
{'ssn': destssn})
return destssn
def _update_backend(self, active_backend_id):
# Mark for failover or undo failover.
LOG.debug('active_backend_id: %s', active_backend_id)
if active_backend_id:
self.active_backend_id = six.text_type(active_backend_id)
self.failed_over = True
else:
self.active_backend_id = None
self.failed_over = False
self._client.active_backend_id = self.active_backend_id
def _get_qos(self, targetssn):
# Find our QOS.
qosnode = None
for backend in self.backends:
if int(backend['target_device_id']) == targetssn:
qosnode = backend.get('qosnode', 'cinderqos')
return qosnode
def _parse_extraspecs(self, volume):
# Digest our extra specs for replication.
extraspecs = {}
specs = self._get_volume_extra_specs(volume)
if specs.get('replication_type') == '<in> sync':
extraspecs['replicationtype'] = 'Synchronous'
else:
extraspecs['replicationtype'] = 'Asynchronous'
if specs.get('replication:activereplay') == '<is> True':
extraspecs['activereplay'] = True
else:
extraspecs['activereplay'] = False
extraspecs['storage_profile'] = specs.get('storagetype:storageprofile')
extraspecs['replay_profile_string'] = (
specs.get('storagetype:replayprofiles'))
return extraspecs
def _wait_for_replication(self, api, items):
# Wait for our replications to resync with their original volumes.
# We wait for completion, errors or timeout.
deadcount = 5
lastremain = 0.0
# The big wait loop.
while True:
# We run until all volumes are synced or in error.
done = True
currentremain = 0.0
# Run the list.
for item in items:
# If we have one cooking.
if item['status'] == 'inprogress':
# Is it done?
synced, remain = api.replication_progress(item['screpl'])
currentremain += remain
if synced:
# It is! Get our volumes.
cvol = api.get_volume(item['cvol'])
nvol = api.get_volume(item['nvol'])
# Flip replication.
if (cvol and nvol and api.flip_replication(
cvol, nvol, item['volume']['id'],
item['specs']['replicationtype'],
item['qosnode'],
item['specs']['activereplay'])):
# rename the original. Doesn't matter if it
# succeeded as we should have the provider_id
# of the new volume.
ovol = api.get_volume(item['ovol'])
if not ovol or not api.rename_volume(
ovol, 'org:' + ovol['name']):
# Not a reason to fail but will possibly
# cause confusion so warn.
LOG.warning(_LW('Unable to locate and rename '
'original volume: %s'),
item['ovol'])
item['status'] = 'synced'
else:
item['status'] = 'error'
elif synced is None:
# Couldn't get info on this one. Call it baked.
item['status'] = 'error'
else:
# Miles to go before we're done.
done = False
# done? then leave.
if done:
break
# Confirm we are or are not still making progress.
if lastremain == currentremain:
# One chance down. Warn user.
deadcount -= 1
LOG.warning(_LW('Waiting for replications to complete. '
'No progress for %(timeout)d seconds. '
'deadcount = %(cnt)d'),
{'timeout': self.failback_timeout,
'cnt': deadcount})
else:
# Reset
lastremain = currentremain
deadcount = 5
# If we've used up our 5 chances we error and log..
if deadcount == 0:
LOG.error(_LE('Replication progress has stopped: '
'%f remaining.'), currentremain)
for item in items:
if item['status'] == 'inprogress':
LOG.error(_LE('Failback failed for volume: %s. '
'Timeout waiting for replication to '
'sync with original volume.'),
item['volume']['id'])
item['status'] = 'error'
break
# This is part of an async call so we should be good sleeping here.
# Have to balance hammering the backend for no good reason with
# the max timeout for the unit tests. Yeah, silly.
eventlet.sleep(self.failback_timeout)
def _reattach_remaining_replications(self, api, items):
# Wiffle through our backends and reattach any remaining replication
# targets.
for item in items:
if item['status'] == 'synced':
svol = api.get_volume(item['nvol'])
# assume it went well. Will error out if not.
item['status'] = 'reattached'
# wiffle through our backends and kick off replications.
for backend in self.backends:
rssn = int(backend['target_device_id'])
if rssn != api.ssn:
rvol = api.find_repl_volume(item['volume']['id'],
rssn, None)
# if there is an old replication whack it.
api.delete_replication(svol, rssn, False)
if api.start_replication(
svol, rvol,
item['specs']['replicationtype'],
self._get_qos(rssn),
item['specs']['activereplay']):
# Save our replication_driver_data.
item['rdd'] += ','
item['rdd'] += backend['target_device_id']
else:
# No joy. Bail
item['status'] = 'error'
def _fixup_types(self, api, items):
# Update our replay profiles.
for item in items:
if item['status'] == 'reattached':
# Re-apply any appropriate replay profiles.
item['status'] = 'available'
rps = item['specs']['replay_profile_string']
if rps:
svol = api.get_volume(item['nvol'])
if not api.update_replay_profiles(svol, rps):
item['status'] = 'error'
def _volume_updates(self, items):
# Update our volume updates.
volume_updates = []
for item in items:
# Set our status for our replicated volumes
model_update = {'provider_id': item['nvol'],
'replication_driver_data': item['rdd']}
# These are simple. If the volume reaches available then,
# since we were replicating it, replication status must
# be good. Else error/error.
if item['status'] == 'available':
model_update['status'] = 'available'
model_update['replication_status'] = (
fields.ReplicationStatus.ENABLED)
else:
model_update['status'] = 'error'
model_update['replication_status'] = (
fields.ReplicationStatus.ERROR)
volume_updates.append({'volume_id': item['volume']['id'],
'updates': model_update})
return volume_updates
def _failback_replication(self, api, volume, qosnode):
"""Sets up the replication failback.
:param api: Dell SC API.
:param volume: Cinder Volume
:param qosnode: Dell QOS node object.
:return: replitem dict.
"""
LOG.info(_LI('failback_volumes: replicated volume'))
# Get our current volume.
cvol = api.find_volume(volume['id'], volume['provider_id'])
# Original volume on the primary.
ovol = api.find_repl_volume(volume['id'], api.primaryssn,
None, True, False)
# Delete our current mappings.
api.remove_mappings(cvol)
# If there is a replication to delete do so.
api.delete_replication(ovol, api.ssn, False)
# Replicate to a common replay.
screpl = api.replicate_to_common(cvol, ovol, 'tempqos')
# We made it this far. Update our status.
screplid = None
status = ''
if screpl:
screplid = screpl['instanceId']
nvolid = screpl['destinationVolume']['instanceId']
status = 'inprogress'
else:
LOG.error(_LE('Unable to restore %s'), volume['id'])
screplid = None
nvolid = None
status = 'error'
# Save some information for the next step.
# nvol is the new volume created by replicate_to_common.
# We also grab our extra specs here.
replitem = {
'volume': volume,
'specs': self._parse_extraspecs(volume),
'qosnode': qosnode,
'screpl': screplid,
'cvol': cvol['instanceId'],
'ovol': ovol['instanceId'],
'nvol': nvolid,
'rdd': six.text_type(api.ssn),
'status': status}
return replitem
def _failback_live_volume(self, api, id, provider_id):
"""failback the live volume to its original
:param api: Dell SC API
:param id: Volume ID
:param provider_id: Dell Instance ID
:return: model_update dict
"""
model_update = {}
# We do not search by name. Only failback if we have a complete
# LV object.
sclivevolume = api.get_live_volume(provider_id)
# TODO(tswanson): Check swapped state first.
if sclivevolume and api.swap_roles_live_volume(sclivevolume):
LOG.info(_LI('Success swapping sclivevolume roles %s'), id)
model_update = {
'status': 'available',
'replication_status': fields.ReplicationStatus.ENABLED,
'provider_id':
sclivevolume['secondaryVolume']['instanceId']}
else:
LOG.info(_LI('Failure swapping roles %s'), id)
model_update = {'status': 'error'}
return model_update
def _finish_failback(self, api, replitems):
# Wait for replication to complete.
# This will also flip replication.
self._wait_for_replication(api, replitems)
# Replications are done. Attach to any additional replication
# backends.
self._reattach_remaining_replications(api, replitems)
self._fixup_types(api, replitems)
return self._volume_updates(replitems)
def failback_volumes(self, volumes):
"""This is a generic volume failback.
:param volumes: List of volumes that need to be failed back.
:return: volume_updates for the list of volumes.
"""
LOG.info(_LI('failback_volumes'))
with self._client.open_connection() as api:
# Get our qosnode. This is a good way to make sure the backend
# is still setup so that we can do this.
qosnode = self._get_qos(api.ssn)
if not qosnode:
raise exception.VolumeBackendAPIException(
message=_('Unable to failback. Backend is misconfigured.'))
volume_updates = []
replitems = []
# Trundle through the volumes. Update non replicated to alive again
# and reverse the replications for the remaining volumes.
for volume in volumes:
LOG.info(_LI('failback_volumes: starting volume: %s'), volume)
model_update = {}
if volume.get('replication_driver_data'):
rspecs = self._get_replication_specs(
self._get_volume_extra_specs(volume))
if rspecs['live']:
model_update = self._failback_live_volume(
api, volume['id'], volume['provider_id'])
else:
replitem = self._failback_replication(api, volume,
qosnode)
# Save some information for the next step.
# nvol is the new volume created by
# replicate_to_common. We also grab our
# extra specs here.
replitems.append(replitem)
else:
# Not replicated. Just set it to available.
model_update = {'status': 'available'}
# Save our update
if model_update:
volume_updates.append({'volume_id': volume['id'],
'updates': model_update})
# Let's do up to 5 replications at once.
if len(replitems) == 5:
volume_updates += self._finish_failback(api, replitems)
replitems = []
# Finish any leftover items
if replitems:
volume_updates += self._finish_failback(api, replitems)
# Set us back to a happy state.
# The only way this doesn't happen is if the primary is down.
self._update_backend(None)
return volume_updates
def _failover_replication(self, api, id, provider_id, destssn):
rvol = api.break_replication(id, provider_id, destssn)
model_update = {}
if rvol:
LOG.info(_LI('Success failing over volume %s'), id)
model_update = {'replication_status':
fields.ReplicationStatus.FAILED_OVER,
'provider_id': rvol['instanceId']}
else:
LOG.info(_LI('Failed failing over volume %s'), id)
model_update = {'status': 'error'}
return model_update
def _failover_live_volume(self, api, id, provider_id):
model_update = {}
# Search for volume by id if we have to.
sclivevolume = api.get_live_volume(provider_id, id)
if sclivevolume:
swapped = api.is_swapped(provider_id, sclivevolume)
# If we aren't swapped try it. If fail error out.
if not swapped and not api.swap_roles_live_volume(sclivevolume):
LOG.info(_LI('Failure swapping roles %s'), id)
model_update = {'status': 'error'}
return model_update
LOG.info(_LI('Success swapping sclivevolume roles %s'), id)
sclivevolume = api.get_live_volume(provider_id)
model_update = {
'replication_status':
fields.ReplicationStatus.FAILED_OVER,
'provider_id':
sclivevolume['primaryVolume']['instanceId']}
# Error and leave.
return model_update
def failover_host(self, context, volumes, secondary_id=None):
"""Failover to secondary.
:param context: security context
:param secondary_id: Specifies rep target to fail over to
:param volumes: List of volumes serviced by this backend.
:returns: destssn, volume_updates data structure
Example volume_updates data structure:
.. code-block:: json
[{'volume_id': <cinder-uuid>,
'updates': {'provider_id': 8,
'replication_status': 'failed-over',
'replication_extended_status': 'whatever',...}},]
"""
LOG.debug('failover-host')
LOG.debug(self.failed_over)
LOG.debug(self.active_backend_id)
LOG.debug(self.replication_enabled)
if self.failed_over:
if secondary_id == 'default':
LOG.debug('failing back')
return 'default', self.failback_volumes(volumes)
raise exception.InvalidReplicationTarget(
reason=_('Already failed over'))
LOG.info(_LI('Failing backend to %s'), secondary_id)
# basic check
if self.replication_enabled:
with self._client.open_connection() as api:
# Look for the specified secondary.
destssn = self._parse_secondary(api, secondary_id)
if destssn:
# We roll through trying to break replications.
# Is failing here a complete failure of failover?
volume_updates = []
for volume in volumes:
model_update = {}
if volume.get('replication_driver_data'):
rspecs = self._get_replication_specs(
self._get_volume_extra_specs(volume))
if rspecs['live']:
model_update = self._failover_live_volume(
api, volume['id'],
volume.get('provider_id'))
else:
model_update = self._failover_replication(
api, volume['id'],
volume.get('provider_id'), destssn)
else:
# Not a replicated volume. Try to unmap it.
scvolume = api.find_volume(
volume['id'], volume.get('provider_id'))
api.remove_mappings(scvolume)
model_update = {'status': 'error'}
# Either we are failed over or our status is now error.
volume_updates.append({'volume_id': volume['id'],
'updates': model_update})
# this is it.
self._update_backend(destssn)
LOG.debug('after update backend')
LOG.debug(self.failed_over)
LOG.debug(self.active_backend_id)
LOG.debug(self.replication_enabled)
return destssn, volume_updates
else:
raise exception.InvalidReplicationTarget(reason=(
_('replication_failover failed. %s not found.') %
secondary_id))
# I don't think we should ever get here.
raise exception.VolumeBackendAPIException(message=(
_('replication_failover failed. '
'Backend not configured for failover')))
def _get_unmanaged_replay(self, api, volume_name, provider_id,
existing_ref):
replay_name = None
if existing_ref:
replay_name = existing_ref.get('source-name')
if not replay_name:
msg = _('_get_unmanaged_replay: Must specify source-name.')
LOG.error(msg)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
# Find our volume.
scvolume = api.find_volume(volume_name, provider_id)
if not scvolume:
# Didn't find it.
msg = (_('_get_unmanaged_replay: Cannot find volume id %s')
% volume_name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Find our replay.
screplay = api.find_replay(scvolume, replay_name)
if not screplay:
# Didn't find it. Reference must be invalid.
msg = (_('_get_unmanaged_replay: Cannot '
'find snapshot named %s') % replay_name)
LOG.error(msg)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
return screplay
def manage_existing_snapshot(self, snapshot, existing_ref):
"""Brings an existing backend storage object under Cinder management.
existing_ref is passed straight through from the API request's
manage_existing_ref value, and it is up to the driver how this should
be interpreted. It should be sufficient to identify a storage object
that the driver should somehow associate with the newly-created cinder
snapshot structure.
There are two ways to do this:
1. Rename the backend storage object so that it matches the
snapshot['name'] which is how drivers traditionally map between a
cinder snapshot and the associated backend storage object.
2. Place some metadata on the snapshot, or somewhere in the backend,
that allows other driver requests (e.g. delete) to locate the
backend storage object when required.
If the existing_ref doesn't make sense, or doesn't refer to an existing
backend storage object, raise a ManageExistingInvalidReference
exception.
"""
with self._client.open_connection() as api:
# Find our unmanaged snapshot. This will raise on error.
volume_name = snapshot.get('volume_id')
provider_id = snapshot.get('provider_id')
snapshot_id = snapshot.get('id')
screplay = self._get_unmanaged_replay(api, volume_name,
provider_id, existing_ref)
# Manage means update description and update expiration.
if not api.manage_replay(screplay, snapshot_id):
# That didn't work. Error.
msg = (_('manage_existing_snapshot: Error managing '
'existing replay %(ss)s on volume %(vol)s') %
{'ss': screplay.get('description'),
'vol': volume_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Life is good. Let the world know what we've done.
LOG.info(_LI('manage_existing_snapshot: snapshot %(exist)s on '
'volume %(volume)s has been renamed to %(id)s and is '
'now managed by Cinder.'),
{'exist': screplay.get('description'),
'volume': volume_name,
'id': snapshot_id})
return {'provider_id': screplay['createVolume']['instanceId']}
# NOTE: Can't use abstractmethod before all drivers implement it
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
"""Return size of snapshot to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
volume_name = snapshot.get('volume_id')
provider_id = snapshot.get('provider_id')
with self._client.open_connection() as api:
screplay = self._get_unmanaged_replay(api, volume_name,
provider_id, existing_ref)
sz, rem = dell_storagecenter_api.StorageCenterApi.size_to_gb(
screplay['size'])
if rem > 0:
raise exception.VolumeBackendAPIException(
data=_('Volume size must be a multiple of 1 GB.'))
return sz
# NOTE: Can't use abstractmethod before all drivers implement it
def unmanage_snapshot(self, snapshot):
"""Removes the specified snapshot from Cinder management.
Does not delete the underlying backend storage object.
NOTE: We do set the expire countdown to 1 day. Once a snapshot is
unmanaged it will expire 24 hours later.
"""
with self._client.open_connection() as api:
snapshot_id = snapshot.get('id')
# provider_id is the snapshot's parent volume's instanceId.
provider_id = snapshot.get('provider_id')
volume_name = snapshot.get('volume_id')
# Find our volume.
scvolume = api.find_volume(volume_name, provider_id)
if not scvolume:
# Didn't find it.
msg = (_('unmanage_snapshot: Cannot find volume id %s')
% volume_name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Find our replay.
screplay = api.find_replay(scvolume, snapshot_id)
if not screplay:
# Didn't find it. Reference must be invalid.
msg = (_('unmanage_snapshot: Cannot find snapshot named %s')
% snapshot_id)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Free our snapshot.
api.unmanage_replay(screplay)
# Do not check our result.
def thaw_backend(self, context):
"""Notify the backend that it's unfrozen/thawed.
This is a gate. We do not allow the backend to be thawed if
it is still failed over.
:param context: security context
:response: True on success
:raises Invalid: if it cannot be thawed.
"""
# We shouldn't be called if we are not failed over.
if self.failed_over:
msg = _('The Dell SC array does not support thawing a failed over'
' replication. Please migrate volumes to an operational '
'back-end or resolve primary system issues and '
'fail back to reenable full functionality.')
LOG.error(msg)
raise exception.Invalid(reason=msg)
return True
| ge0rgi/cinder | cinder/volume/drivers/dell/dell_storagecenter_common.py | Python | apache-2.0 | 83,512 | 0 |
#
# Code by Alexander Pruss and under the MIT license
#
#
# pysanka.py [filename [height [oval|N]]]
# oval: wrap an oval image onto an egg
# N: wrap a rectangular image onto an egg N times (N is an integer)
#
# Yeah, these arguments are a mess!
from mine import *
import colors
import sys
import os
from PIL import Image
from random import uniform
def egg(block=block.GOLD_BLOCK, h=40, a=2.5, b=1, c=0.1, sphere=False):
def radius(y):
if y < 0 or y >= h:
return 0
if sphere:
return sqrt((h/2.)**2 - (y-h/2.)**2)
l = y / float(h-1)
# Formula from: http://www.jolyon.co.uk/myresearch/image-analysis/egg-shape-modelling/
return h*a*exp((-0.5*l*l+c*l-.5*c*c)/(b*b))*sqrt(1-l)*sqrt(l)/(pi*b)
for y in range(0,h):
r = radius(y)
minimumr = min(r-2,radius(y-1),radius(y+1))
for x in range(-h,h+1):
for z in range(-h,h+1):
myr = sqrt(x*x + z*z)
if myr <= r and minimumr <= myr:
if x==0 and z==0:
theta = 0
else:
theta = atan2(z,x)+pi/2
yield (x,y,z,block,theta % (2*pi))
def getPixel(image, x, y, dither=None):
rgb = image.getpixel(( image.size[0]-1-floor( x * image.size[0] ), image.size[1]-1-floor( y * image.size[1] ) ))
if dither is not None:
tweaked = ( rgb[0] + uniform(-dither,dither), rgb[1] + uniform(-dither,dither), rgb[2] + uniform(-dither,dither) )
return colors.rgbToBlock(tweaked)[0]
return colors.rgbToBlock(rgb)[0]
if __name__ == '__main__':
mc = Minecraft()
if len(sys.argv) > 1:
filename = sys.argv[1]
if not os.path.isfile(filename):
filename = os.path.dirname(os.path.realpath(sys.argv[0])) + "/" + filename
else:
filename = os.path.dirname(os.path.realpath(sys.argv[0])) + "/" + "pysanka.jpg"
if len(sys.argv) > 2:
height = int(sys.argv[2])
else:
height = 100
oval = False
sphereWrap = False
if len(sys.argv) > 3:
if sys.argv[3] == "oval":
oval = True
elif sys.argv[3] == "sphere":
sphereWrap = True
else:
repeat = int(sys.argv[3])
else:
repeat = 2
pos = mc.player.getPos()
if oval:
image = Image.open(filename).convert('RGBA')
first = None
last = None
start = [None] * image.size[1]
stop = [None] * image.size[1]
for y in range(image.size[1]):
for x in range(image.size[0]):
_,_,_,alpha = image.getpixel((x,y))
if alpha == 255:
start[y] = x
break
for x in range(image.size[0]-1,-1,-1):
_,_,_,alpha = image.getpixel((x,y))
if alpha == 255:
stop[y] = x
break
if start[y] is not None:
if first is None:
first = y
last = y
assert first is not None
for (x,y,z,block,theta) in egg(h=height,block=None):
imageY = first + int(float(height-1-y)/height*(last-first+1))
if imageY < first:
imageY = first
if imageY > last:
imageY = last
imageX = start[imageY]+ int((0.5 - 0.5 * sin(theta)) * (stop[imageY]-start[imageY]))
if imageX < start[imageY]:
imageX = start[imageY]
if imageX > stop[imageY]:
imageX = stop[imageY]
mc.setBlock(x+pos.x,y+pos.y,z+pos.z, getPixel(image, imageX, imageY))
else:
image = Image.open(filename).convert('RGB')
for (x,y,z,block,theta) in egg(h=height,block=None):
mc.setBlock(x+pos.x,y+pos.y,z+pos.z,getPixel(image, (theta * repeat / (2*pi)) % 1, y / float(height), dither=20))
| arpruss/raspberryjam-pe | p2/scripts3/pysanka.py | Python | mit | 4,144 | 0.017133 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-11-04 17:41
from __future__ import unicode_literals
from django.db import migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('museum', '0032_auto_20161104_1839'),
]
operations = [
migrations.AddField(
model_name='collection',
name='tags',
field=taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', through='museum.TaggedObject', to='museum.Keyword', verbose_name='Tags'),
),
]
| Sound-Colour-Space/sound-colour-space | website/apps/museum/migrations/0033_collection_tags.py | Python | mit | 594 | 0.001684 |
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
# Nelson Liu <nelson@nelsonliu.me>
#
# License: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_split,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than or equal "
"to 0")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth, self.min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes, self.min_impurity_split)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. If the impurity
of a node is below the threshold, the node is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_split=min_impurity_split,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_split=1e-7,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=random_state)
| sonnyhu/scikit-learn | sklearn/tree/tree.py | Python | bsd-3-clause | 41,818 | 0.000072 |
from django import forms
from django.conf import settings
from django.forms.util import ErrorDict
import happyforms
from tower import ugettext_lazy as _lazy
import amo
from amo import helpers
from search.utils import floor_version
collection_sort_by = (
('weekly', _lazy(u'Most popular this week')),
('monthly', _lazy(u'Most popular this month')),
('all', _lazy(u'Most popular all time')),
('rating', _lazy(u'Highest Rated')),
('created', _lazy(u'Newest')),
('updated', _lazy(u'Recently Updated')),
('name', _lazy(u'Name')),
)
PER_PAGE = 20
SEARCH_CHOICES = (
('all', _lazy(u'search for add-ons')),
('collections', _lazy(u'search for collections')),
('themes', _lazy(u'search for themes')),
('apps', _lazy(u'search for apps'))
)
class SimpleSearchForm(forms.Form):
"""Powers the search box on every page."""
q = forms.CharField(required=False)
cat = forms.CharField(required=False, widget=forms.HiddenInput)
appver = forms.CharField(required=False, widget=forms.HiddenInput)
platform = forms.CharField(required=False, widget=forms.HiddenInput)
choices = dict(SEARCH_CHOICES)
def clean_cat(self):
return self.data.get('cat', 'all')
def placeholder(self, txt=None):
if settings.APP_PREVIEW:
return self.choices['apps']
return self.choices.get(txt or self.clean_cat(), self.choices['all'])
class SecondarySearchForm(forms.Form):
q = forms.CharField(widget=forms.HiddenInput, required=False)
cat = forms.CharField(widget=forms.HiddenInput)
pp = forms.CharField(widget=forms.HiddenInput, required=False)
sort = forms.ChoiceField(label=_lazy(u'Sort By'), required=False,
choices=collection_sort_by, initial='weekly')
page = forms.IntegerField(widget=forms.HiddenInput, required=False)
def clean_pp(self):
try:
return int(self.cleaned_data.get('pp'))
except TypeError:
return PER_PAGE
def clean(self):
d = self.cleaned_data
if not d.get('pp'):
d['pp'] = PER_PAGE
return d
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
Does not remove cleaned_data if there are errors.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data
# has changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
SORT_CHOICES = (
(None, _lazy(u'Relevance')),
('users', _lazy(u'Most Users')),
('rating', _lazy(u'Top Rated')),
('created', _lazy(u'Newest')),
# --
('name', _lazy(u'Name')),
('downloads', _lazy(u'Weekly Downloads')),
#('price', helpers.loc(u'Price')),
('updated', _lazy(u'Recently Updated')),
('hotness', _lazy(u'Up & Coming')),
)
APP_SORT_CHOICES = (
(None, _lazy(u'Relevance')),
('downloads', _lazy(u'Weekly Downloads')),
('rating', _lazy(u'Top Rated')),
('price', helpers.loc(u'Price')),
# --
('name', _lazy(u'Name')),
('created', _lazy(u'Newest')),
)
class ESSearchForm(happyforms.Form):
q = forms.CharField(required=False)
tag = forms.CharField(required=False)
platform = forms.CharField(required=False)
appver = forms.CharField(required=False)
atype = forms.TypedChoiceField(required=False, coerce=int,
choices=amo.ADDON_TYPES.iteritems())
cat = forms.CharField(required=False)
price = forms.CharField(required=False)
sort = forms.CharField(required=False)
def __init__(self, *args, **kw):
self.addon_type = kw.pop('type', None)
super(ESSearchForm, self).__init__(*args, **kw)
self.sort_choices = SORT_CHOICES
def clean_appver(self):
return floor_version(self.cleaned_data.get('appver'))
def clean_sort(self):
sort = self.cleaned_data.get('sort')
return sort if sort in dict(self.sort_choices) else None
def clean_cat(self):
cat = self.cleaned_data.get('cat')
if ',' in cat:
try:
self.cleaned_data['atype'], cat = map(int, cat.split(','))
except ValueError:
return None
else:
try:
return int(cat)
except ValueError:
return None
def full_clean(self):
"""
Cleans self.data and populates self._errors and self.cleaned_data.
Does not remove cleaned_data if there are errors.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data
# has changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
| clouserw/olympia | apps/search/forms.py | Python | bsd-3-clause | 5,239 | 0.000191 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler
"""
import mock
from nova import context
from nova import objects
from nova.scheduler import caching_scheduler
from nova.scheduler import chance
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
from nova.scheduler import ironic_host_manager
from nova.scheduler import manager
from nova import servicegroup
from nova import test
from nova.tests.unit import fake_server_actions
from nova.tests.unit.scheduler import fakes
class SchedulerManagerInitTestCase(test.NoDBTestCase):
"""Test case for scheduler manager initiation."""
manager_cls = manager.SchedulerManager
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def test_init_using_default_schedulerdriver(self,
mock_init_agg,
mock_init_inst):
driver = self.manager_cls().driver
self.assertIsInstance(driver, filter_scheduler.FilterScheduler)
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def test_init_using_chance_schedulerdriver(self,
mock_init_agg,
mock_init_inst):
self.flags(scheduler_driver='chance_scheduler')
driver = self.manager_cls().driver
self.assertIsInstance(driver, chance.ChanceScheduler)
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def test_init_using_caching_schedulerdriver(self,
mock_init_agg,
mock_init_inst):
self.flags(scheduler_driver='caching_scheduler')
driver = self.manager_cls().driver
self.assertIsInstance(driver, caching_scheduler.CachingScheduler)
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def test_init_nonexist_schedulerdriver(self,
mock_init_agg,
mock_init_inst):
self.flags(scheduler_driver='nonexist_scheduler')
self.assertRaises(RuntimeError, self.manager_cls)
# NOTE(Yingxin): Loading full class path is deprecated and should be
# removed in the N release.
@mock.patch.object(manager.LOG, 'warning')
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def test_init_using_classpath_to_schedulerdriver(self,
mock_init_agg,
mock_init_inst,
mock_warning):
self.flags(
scheduler_driver=
'nova.scheduler.chance.ChanceScheduler')
driver = self.manager_cls().driver
self.assertIsInstance(driver, chance.ChanceScheduler)
warn_args, kwargs = mock_warning.call_args
self.assertIn("DEPRECATED", warn_args[0])
class SchedulerManagerTestCase(test.NoDBTestCase):
"""Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = fakes.FakeScheduler
driver_plugin_name = 'fake_scheduler'
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def setUp(self, mock_init_agg, mock_init_inst):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_plugin_name)
with mock.patch.object(host_manager.HostManager, '_init_aggregates'):
self.manager = self.manager_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
fake_server_actions.stub_out_action_events(self.stubs)
def test_1_correct_init(self):
# Correct scheduler driver
manager = self.manager
self.assertIsInstance(manager.driver, self.driver_cls)
def test_select_destination(self):
fake_spec = objects.RequestSpec()
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
self.manager.select_destinations(None, spec_obj=fake_spec)
select_destinations.assert_called_once_with(None, fake_spec)
# TODO(sbauza): Remove that test once the API v4 is removed
@mock.patch.object(objects.RequestSpec, 'from_primitives')
def test_select_destination_with_old_client(self, from_primitives):
fake_spec = objects.RequestSpec()
from_primitives.return_value = fake_spec
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
self.manager.select_destinations(None, request_spec='fake_spec',
filter_properties='fake_props')
select_destinations.assert_called_once_with(None, fake_spec)
def test_update_aggregates(self):
with mock.patch.object(self.manager.driver.host_manager,
'update_aggregates'
) as update_aggregates:
self.manager.update_aggregates(None, aggregates='agg')
update_aggregates.assert_called_once_with('agg')
def test_delete_aggregate(self):
with mock.patch.object(self.manager.driver.host_manager,
'delete_aggregate'
) as delete_aggregate:
self.manager.delete_aggregate(None, aggregate='agg')
delete_aggregate.assert_called_once_with('agg')
def test_update_instance_info(self):
with mock.patch.object(self.manager.driver.host_manager,
'update_instance_info') as mock_update:
self.manager.update_instance_info(mock.sentinel.context,
mock.sentinel.host_name,
mock.sentinel.instance_info)
mock_update.assert_called_once_with(mock.sentinel.context,
mock.sentinel.host_name,
mock.sentinel.instance_info)
def test_delete_instance_info(self):
with mock.patch.object(self.manager.driver.host_manager,
'delete_instance_info') as mock_delete:
self.manager.delete_instance_info(mock.sentinel.context,
mock.sentinel.host_name,
mock.sentinel.instance_uuid)
mock_delete.assert_called_once_with(mock.sentinel.context,
mock.sentinel.host_name,
mock.sentinel.instance_uuid)
def test_sync_instance_info(self):
with mock.patch.object(self.manager.driver.host_manager,
'sync_instance_info') as mock_sync:
self.manager.sync_instance_info(mock.sentinel.context,
mock.sentinel.host_name,
mock.sentinel.instance_uuids)
mock_sync.assert_called_once_with(mock.sentinel.context,
mock.sentinel.host_name,
mock.sentinel.instance_uuids)
class SchedulerInitTestCase(test.NoDBTestCase):
"""Test case for base scheduler driver initiation."""
driver_cls = fakes.FakeScheduler
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def test_init_using_default_hostmanager(self,
mock_init_agg,
mock_init_inst):
manager = self.driver_cls().host_manager
self.assertIsInstance(manager, host_manager.HostManager)
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def test_init_using_ironic_hostmanager(self,
mock_init_agg,
mock_init_inst):
self.flags(scheduler_host_manager='ironic_host_manager')
manager = self.driver_cls().host_manager
self.assertIsInstance(manager, ironic_host_manager.IronicHostManager)
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def test_init_nonexist_hostmanager(self,
mock_init_agg,
mock_init_inst):
self.flags(scheduler_host_manager='nonexist_host_manager')
self.assertRaises(RuntimeError, self.driver_cls)
class SchedulerTestCase(test.NoDBTestCase):
"""Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = fakes.FakeScheduler
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def setUp(self, mock_init_agg, mock_init_inst):
super(SchedulerTestCase, self).setUp()
self.driver = self.driver_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.servicegroup_api = servicegroup.API()
@mock.patch('nova.objects.ServiceList.get_by_topic')
@mock.patch('nova.servicegroup.API.service_is_up')
def test_hosts_up(self, mock_service_is_up, mock_get_by_topic):
service1 = objects.Service(host='host1')
service2 = objects.Service(host='host2')
services = objects.ServiceList(objects=[service1, service2])
mock_get_by_topic.return_value = services
mock_service_is_up.side_effect = [False, True]
result = self.driver.hosts_up(self.context, self.topic)
self.assertEqual(result, ['host2'])
mock_get_by_topic.assert_called_once_with(self.context, self.topic)
calls = [mock.call(service1), mock.call(service2)]
self.assertEqual(calls, mock_service_is_up.call_args_list)
| bigswitch/nova | nova/tests/unit/scheduler/test_scheduler.py | Python | apache-2.0 | 11,503 | 0.000435 |
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.views import serve
from django.views.decorators.cache import never_cache
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'apps.user.views.index', name='index'),
url(r'^login/$', 'apps.user.views.login', name='login'),
url(r'^logout/$', 'apps.user.views.logout', name='logout'),
url(r'^user/$', 'apps.user.views.user', name='user'),
url(r'^customer/((?P<id>[0-9]+)/)?$', 'apps.customer.views.customer', name='customer'),
url(r'^project/((?P<id>[0-9]+)/((?P<foreign>[a-z]+)/)?)?$', 'apps.project.views.project', name='project'),
url(r'^time/((?P<id>[0-9]+)/)?$', 'apps.project.views.time', name='time'),
url(r'^invoice/((?P<id>[0-9]+)/)?$', 'apps.invoice.views.invoice', name='invoice'),
)
# Serve all media files publically
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Skip cache for development
if settings.DEBUG:
urlpatterns += patterns('', url(r'^static/(?P<path>.*)$', never_cache(serve)))
| danijar/invoicepad | invoicepad/invoicepad/urls.py | Python | gpl-3.0 | 1,168 | 0.011986 |
from task import Task
class SyncTask(Task):
def __init__(self, *remotes):
'''Init this task with all of the remote tasks'''
super(SyncTask, self).__init__()
self.remote_tasks = []
for arg in remotes:
print arg
self.remote_tasks.append(arg)
for task in self.remote_tasks:
print task.name
self.update()
def update_from(self, task):
'''Use attributes: Takes all of the attributes from a different task and assigns them to self.'''
self.description = task.description
self.name = task.name
self.lastModifiedDate = task.lastModifiedDate
# todo: fill out rest of attributes
def sync_changes(self):
for remote in self.remote_tasks:
remote.set_attributes(self)
remote.push_changes()
def update(self, fetch_latest=False):
# todo: updating each task from remote (trello) may be more costly then together as a list
if fetch_latest:
for remote in self.remote_tasks:
remote.update()
latest_update = self
for remote_task in self.remote_tasks:
if remote_task.modified_later_than(latest_update):
latest_update = remote_task
if latest_update is not self:
print "New modification from {source}".format(source=type(latest_update._remote_source).__name__)
self.update_from(latest_update)
self.sync_changes()
def reset_updated(self):
'''resets the updated flag on all of the remote tasks'''
for task in self.remote_tasks:
self.updated = False
def has_remote(self, remote_task):
# todo should compare by uid of card/task/whatever
return remote_task in self.remote_tasks
def has_stale_remotes(self):
'''returns a bool indicating if any of the remotes are stale since the last update'''
for task in self.remote_tasks:
if not task.updated:
return True
self.reset_updated()
return False
| bhylak/trello_things3_sync | tasks/sync_task.py | Python | mit | 2,101 | 0.002856 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#Pytigon - wxpython and django application framework
#author: "Slawomir Cholaj (slawomir.cholaj@gmail.com)"
#copyright: "Copyright (C) ????/2012 Slawomir Cholaj"
#license: "LGPL 3.0"
#version: "0.1a"
import wx
from autocomplete import TextCtrlAutoComplete
from pytigon_lib.schtools import schjson
from pytigon_gui.guictrl.ctrl import SchBaseCtrl
import pytigon_gui.guictrl.ctrl
class DbDict(object):
def __init__(self, href):
self.href = href
self.tab = ['']
def filter(self, parent, f):
http = wx.GetApp().get_http(parent)
response = http.get(self, str(self.href), {'query': f.encode('utf-8')})
s = response.str()
try:
self.tab = schjson.loads(s)
except:
self.tab = []
self.tab2 = []
for pos in self.tab:
self.tab2.append((pos['value'], ))
self.tab = self.tab2
def __iter__(self):
for x in self.tab:
yield x
def __getitem__(self, id):
if id < len(self.tab):
return self.tab[id]
else:
return None
def __len__(self):
return len(self.tab)
def __contains__(self, x):
if x in self.tab:
return True
else:
return False
class Autocomplete(TextCtrlAutoComplete, SchBaseCtrl):
def __init__(self, parent, **kwds):
SchBaseCtrl.__init__(self, parent, kwds)
self.dynamic_choices = DbDict(self.src)
if 'style' in kwds:
style = kwds['style']
style = style | wx.TE_MULTILINE | wx.TE_PROCESS_ENTER
kwds['style'] = style
else:
kwds['style'] = wx.TE_MULTILINE | wx.TE_PROCESS_ENTER
kwds['choices'] = self.dynamic_choices
TextCtrlAutoComplete.__init__(self, parent, colNames=('label', 'value'), **kwds)
self.SetEntryCallback(self.set_dynamic_choices)
self.SetMatchFunction(self.match)
if 'data' in self.param:
self.SetValue(self.param['data'].encode('utf-8'))
def SetValue(self, value):
if value.__class__ == str:
return TextCtrlAutoComplete.SetValue(self, value.decode('utf-8'))
else:
return TextCtrlAutoComplete.SetValue(self, value)
def on_key_down(self, event):
kc = event.GetKeyCode()
if kc in (wx.WXK_LEFT, wx.WXK_RIGHT):
event.Skip()
else:
super(Autocomplete, self).onKeyDown(event)
def match(self, text, choice):
t = text.lower()
c = choice.lower()
if c.startswith(t):
return True
if c.startswith(r'http://'):
c = c[7:]
if c.startswith(t):
return True
if c.startswith('www.'):
c = c[4:]
return c.startswith(t)
def set_dynamic_choices(self):
ctrl = self
text = ctrl.GetValue().lower()
self.dynamic_choices.filter(self.GetParent(), text)
if len(self.dynamic_choices) > 1:
ctrl.SetMultipleChoices(self.dynamic_choices)
else:
if len(self.dynamic_choices) > 0:
ctrl.SetChoices(self.dynamic_choices[0])
def _set_value_from_selected(self):
x = TextCtrlAutoComplete._setValueFromSelected(self)
return x
def _set_value_from_selected2(self):
sel = self.dropdownlistbox.GetFirstSelected()
if sel > -1:
if self._colFetch != -1:
col = self._colFetch
else:
col = self._colSearch
itemtext = self.dropdownlistbox.GetItem(sel, col).GetText()
self.SetValue(itemtext)
def init_plugin(app, mainframe, desktop, mgr, menubar, toolbar, accel):
pytigon_gui.guictrl.ctrl.AUTOCOMPLETE = Autocomplete
| Splawik/pytigon | pytigon/appdata/plugins/standard/autocomplete/__init__.py | Python | lgpl-3.0 | 4,308 | 0.001857 |
am = imp.load_source( 'am', 'artifact-manager' )
| paulovn/artifact-manager | test/__init__.py | Python | gpl-2.0 | 50 | 0.04 |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Aureus Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import AureusTestFramework
from test_framework.util import *
from binascii import a2b_hex, b2a_hex
from hashlib import sha256
from struct import pack
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def b2x(b):
return b2a_hex(b).decode('ascii')
# NOTE: This does not work for signed numbers (set the high bit) or zero (use b'\0')
def encodeUNum(n):
s = bytearray(b'\1')
while n > 127:
s[0] += 1
s.append(n % 256)
n //= 256
s.append(n)
return bytes(s)
def varlenEncode(n):
if n < 0xfd:
return pack('<B', n)
if n <= 0xffff:
return b'\xfd' + pack('<H', n)
if n <= 0xffffffff:
return b'\xfe' + pack('<L', n)
return b'\xff' + pack('<Q', n)
def dblsha(b):
return sha256(sha256(b).digest()).digest()
def genmrklroot(leaflist):
cur = leaflist
while len(cur) > 1:
n = []
if len(cur) & 1:
cur.append(cur[-1])
for i in range(0, len(cur), 2):
n.append(dblsha(cur[i] + cur[i+1]))
cur = n
return cur[0]
def template_to_bytes(tmpl, txlist):
blkver = pack('<L', tmpl['version'])
mrklroot = genmrklroot(list(dblsha(a) for a in txlist))
timestamp = pack('<L', tmpl['curtime'])
nonce = b'\0\0\0\0'
blk = blkver + a2b_hex(tmpl['previousblockhash'])[::-1] + mrklroot + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce
blk += varlenEncode(len(txlist))
for tx in txlist:
blk += tx
return blk
def template_to_hex(tmpl, txlist):
return b2x(template_to_bytes(tmpl, txlist))
def assert_template(node, tmpl, txlist, expect):
rsp = node.getblocktemplate({'data':template_to_hex(tmpl, txlist),'mode':'proposal'})
if rsp != expect:
raise AssertionError('unexpected: %s' % (rsp,))
class GetBlockTemplateProposalTest(AureusTestFramework):
'''
Test block proposals with getblocktemplate.
'''
def run_test(self):
node = self.nodes[0]
node.generate(1) # Mine a block to leave initial block download
tmpl = node.getblocktemplate()
if 'coinbasetxn' not in tmpl:
rawcoinbase = encodeUNum(tmpl['height'])
rawcoinbase += b'\x01-'
hexcoinbase = b2x(rawcoinbase)
hexoutval = b2x(pack('<Q', tmpl['coinbasevalue']))
tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + ('%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'}
txlist = list(bytearray(a2b_hex(a['data'])) for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions']))
# Test 0: Capability advertised
assert('proposal' in tmpl['capabilities'])
# NOTE: This test currently FAILS (regtest mode doesn't enforce block height in coinbase)
## Test 1: Bad height in coinbase
#txlist[0][4+1+36+1+1] += 1
#assert_template(node, tmpl, txlist, 'FIXME')
#txlist[0][4+1+36+1+1] -= 1
# Test 2: Bad input hash for gen tx
txlist[0][4+1] += 1
assert_template(node, tmpl, txlist, 'bad-cb-missing')
txlist[0][4+1] -= 1
# Test 3: Truncated final tx
lastbyte = txlist[-1].pop()
try:
assert_template(node, tmpl, txlist, 'n/a')
except JSONRPCException:
pass # Expected
txlist[-1].append(lastbyte)
# Test 4: Add an invalid tx to the end (duplicate of gen tx)
txlist.append(txlist[0])
assert_template(node, tmpl, txlist, 'bad-txns-duplicate')
txlist.pop()
# Test 5: Add an invalid tx to the end (non-duplicate)
txlist.append(bytearray(txlist[0]))
txlist[-1][4+1] = b'\xff'
assert_template(node, tmpl, txlist, 'bad-txns-inputs-missingorspent')
txlist.pop()
# Test 6: Future tx lock time
txlist[0][-4:] = b'\xff\xff\xff\xff'
assert_template(node, tmpl, txlist, 'bad-txns-nonfinal')
txlist[0][-4:] = b'\0\0\0\0'
# Test 7: Bad tx count
txlist.append(b'')
try:
assert_template(node, tmpl, txlist, 'n/a')
except JSONRPCException:
pass # Expected
txlist.pop()
# Test 8: Bad bits
realbits = tmpl['bits']
tmpl['bits'] = '1c0000ff' # impossible in the real world
assert_template(node, tmpl, txlist, 'bad-diffbits')
tmpl['bits'] = realbits
# Test 9: Bad merkle root
rawtmpl = template_to_bytes(tmpl, txlist)
rawtmpl[4+32] = (rawtmpl[4+32] + 1) % 0x100
rsp = node.getblocktemplate({'data':b2x(rawtmpl),'mode':'proposal'})
if rsp != 'bad-txnmrklroot':
raise AssertionError('unexpected: %s' % (rsp,))
# Test 10: Bad timestamps
realtime = tmpl['curtime']
tmpl['curtime'] = 0x7fffffff
assert_template(node, tmpl, txlist, 'time-too-new')
tmpl['curtime'] = 0
assert_template(node, tmpl, txlist, 'time-too-old')
tmpl['curtime'] = realtime
# Test 11: Valid block
assert_template(node, tmpl, txlist, None)
# Test 12: Orphan block
tmpl['previousblockhash'] = 'ff00' * 16
assert_template(node, tmpl, txlist, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
GetBlockTemplateProposalTest().main()
| hideoussquid/aureus-12-bitcore | qa/rpc-tests/getblocktemplate_proposals.py | Python | mit | 6,330 | 0.005055 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 12:05:24 2015
@author: jean
"""
def hill_climbing(neighborhood, x):
y = neighborhood.randomNeighbor(x)
if y is not None and y.isBetterThan(x):
return y
return x
| jeanpm/pof | methods/hc.py | Python | gpl-2.0 | 234 | 0.008547 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for spanner databases create."""
from googlecloudsdk.api_lib.spanner import database_operations
from googlecloudsdk.api_lib.spanner import databases
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.spanner import flags
class Create(base.CreateCommand):
"""Cloud Spanner databases create command."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Please add arguments in alphabetical order except for no- or a clear-
pair for that argument which can follow the argument itself.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
flags.Instance(positional=False).AddToParser(parser)
flags.Database().AddToParser(parser)
flags.Ddl(help_text='Semi-colon separated DDL (data definition language) '
'statements to run inside the '
'newly created database. If there is an error in any statement, '
'the database is not created. Full DDL specification is at '
'https://cloud.google.com/spanner/docs/data-definition-language'
).AddToParser(parser)
base.ASYNC_FLAG.AddToParser(parser)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
op = databases.Create(
args.instance, args.database, flags.FixDdl(args.ddl or []))
if args.async:
return op
return database_operations.Await(op, 'Creating database')
| Sorsly/subtle | google-cloud-sdk/lib/surface/spanner/databases/create.py | Python | mit | 2,377 | 0.002945 |
# -*- coding: utf-8 -*-
from odoo.exceptions import ValidationError
from odoo import models, api, _
class ProductProduct(models.Model):
_inherit = 'product.product'
_rec_name = 'config_name'
"""
Copy the function from product_configurator to show price using price list.
To Fix :
- Extra price For Attribute value
- Extra price For Custom value.
"""
@api.multi
def _compute_product_price_extra(self):
"""Compute price of configurable products as sum
of products related to attribute values picked"""
products = self.filtered(lambda x: not x.config_ok)
pricelist = self.env.user.partner_id.property_product_pricelist
configurable_products = self - products
if products:
prices = super(ProductProduct, self)._compute_product_price_extra()
conversions = self._get_conversions_dict()
for product in configurable_products:
lst_price = product.product_tmpl_id.lst_price
value_ids = product.attribute_value_ids.ids
# TODO: Merge custom values from products with cfg session
# and use same method to retrieve parsed custom val dict
custom_vals = {}
for val in product.value_custom_ids:
custom_type = val.attribute_id.custom_type
if custom_type in conversions:
try:
custom_vals[val.attribute_id.id] = conversions[
custom_type](val.value)
except:
raise ValidationError(
_("Could not convert custom value '%s' to '%s' on "
"product variant: '%s'" % (val.value,
custom_type,
product.display_name))
)
else:
custom_vals[val.attribute_id.id] = val.value
#
# prices = product.product_tmpl_id.get_cfg_price(
# value_ids, custom_vals)
product_price = pricelist.get_product_price(product, 1, 1)
# product.price_extra = prices['total'] - prices['taxes'] - lst_price
product.price_extra = product_price - lst_price | microcom/odoo-product-configurator | product_configurator_use_default_pricelist/models/product.py | Python | agpl-3.0 | 2,350 | 0.001277 |
# -*- coding: utf-8 -*-
"""
@author: Fabio Erculiani <lxnay@sabayon.org>
@contact: lxnay@sabayon.org
@copyright: Fabio Erculiani
@license: GPL-2
B{Entropy Framework repository database module}.
Entropy repositories (server and client) are implemented as relational
databases. Currently, EntropyRepository class is the object that wraps
sqlite3 database queries and repository logic: there are no more
abstractions between the two because there is only one implementation
available at this time. In future, entropy.db will feature more backends
such as MySQL embedded, SparQL, remote repositories support via TCP socket,
etc. This will require a new layer between the repository interface now
offered by EntropyRepository and the underlying data retrieval logic.
Every repository interface available inherits from EntropyRepository
class and has to reimplement its own Schema subclass and its get_init
method (see EntropyRepository documentation for more information).
I{EntropyRepository} is the sqlite3 implementation of the repository
interface, as written above.
"""
from entropy.db.sqlite import EntropySQLiteRepository as EntropyRepository
from entropy.db.mysql import EntropyMySQLRepository
from entropy.db.cache import EntropyRepositoryCacher
__all__ = ["EntropyRepository", "EntropyMySQLRepository",
"EntropyRepositoryCacher"]
| kidaa/entropy | lib/entropy/db/__init__.py | Python | gpl-2.0 | 1,423 | 0 |
#!/usr/bin/env python
"""API Extension (requires the ORM extension).
This extension provides a base class to create API views.
Setup is as follows:
.. code:: python
from kit import Flask
from kit.ext import API
app = Flask(__name__)
api = API(app)
View = api.View # the base API view
Views can then be created for models as follows:
.. code:: python
# Cat is a subclass of kit.ext.orm.Base
class CatView(View):
__model__ = Cat
This view will create the following hooks:
* ``/cats``
* ``/cats/<id>``
Another slighly more complex example:
.. code:: python
# House is a subclass of kit.ext.orm.Base
class HouseView(View):
__model__ = House
methods = ['GET', 'POST']
subviews = ['cats']
# we register the api views
api.register(app)
This view will create the following hooks:
* ``/houses``
* ``/houses/<id>``
* ``/houses/<id>/cats``
* ``/houses/<id>/cats/<position>``
These are only two simple ways to add a view. Please refer to the documentation
for :class:`kit.ext.api.BaseView` for the list of all available options.
"""
from flask import Blueprint, jsonify, request
from sqlalchemy.orm import class_mapper, Query
from time import time
from werkzeug.exceptions import HTTPException
from .orm import Model
from ..util import make_view, query_to_models, View as _View, _ViewMeta
class APIError(HTTPException):
"""Thrown when an API call is invalid.
The following error codes can occur:
* ``400 Bad Request`` if the request is badly formulated (wrong query
parameters, invalid form data, etc.)
* ``403 Forbidden`` if the request is not authorized by the server
* ``404 Not Found`` if the request refers to a non-existent resource
"""
def __init__(self, code, content):
self.code = code
self.content = content
super(APIError, self).__init__(content)
def __repr__(self):
return '<APIError %r: %r>' % (self.message, self.content)
class API(object):
"""The main API object.
:param project: the project against which the extension will be registered
:type project: kit.project.Project
:param url_prefix: the blueprint URL prefix
:type url_prefix: str
:param index_view: whether or not to create a splash page for the api
:type index_view: bool
:param parser_options: dictionary of options to create the default request
:class:`kit.ext.api.Parser`
:type parser_options: dict
"""
def __init__(self, flask_app, url_prefix='api', parser_options=None):
parser_options = parser_options or {}
self.url_prefix = url_prefix
self.blueprint = Blueprint(
url_prefix,
'%s.%s' % (flask_app.name, url_prefix),
url_prefix='/%s' % url_prefix,
)
self.View = make_view(
self.blueprint,
view_class=View,
parser=Parser(**parser_options)
)
def register(self, flask_app, index_view=True):
if index_view:
@self.blueprint.route('/')
def index():
return jsonify({
'available_endpoints': sorted(
'%s (%s)' % (r.rule, ', '.join(str(meth) for meth in r.methods))
for r in flask_app.url_map.iter_rules()
if r.endpoint.startswith('%s.' % self.url_prefix)
)
})
flask_app.register_blueprint(self.blueprint)
class _ApiViewMeta(_ViewMeta):
"""To register classes with the API on definition.
Automatically creates the ``endpoint``, ``base_url`` and ``rules`` for the
view from the ``__model__`` attribute.
Each route is then registered on the bound application (the current API
blueprint here).
"""
def __new__(cls, name, bases, dct):
model = dct.get('__model__', None)
if model is not None:
if not issubclass(model, Model):
raise ValueError('Api views can only be used with Orm models.')
dct.setdefault('endpoint', model.__tablename__)
base_url = dct.setdefault('base_url', model.__tablename__)
collection_route = '/%s/' % (base_url, )
model_route = '/%s/%s' % (
base_url,
'/'.join('<%s>' % k.name for k in class_mapper(model).primary_key)
)
dct['rules'] = {
collection_route: ['GET', 'POST'],
model_route: ['GET', 'PUT', 'DELETE'],
}
return super(_ApiViewMeta, cls).__new__(cls, name, bases, dct)
class View(_View):
"""Base API view.
To customize, override the ``get``, ``post``, etc. methods.
"""
__metaclass__ = _ApiViewMeta
#: orm.Model class
__model__ = None
#: Base URL (will default to the model's tablename).
base_url = None
#: Allowed methods.
methods = frozenset(['GET'])
#: Request parser.
parser = None
#: Which relationship endpoints to create (these allow GET requests).
#: Can be ``True`` (all relationships) or a list of relationship names.
#: Only relationships with ``lazy`` set to ``'dynamic'``, ``'select'`` or
#: ``True`` can have subroutes. All eagerly loaded relationships are simply
#: available directly on the model.
subviews = []
@classmethod
def register_view(cls):
"""Create the URL routes for the view.
Standard :class:`kit.util.View` implementation plus subview support.
"""
super(View, cls).register_view()
if cls.subviews:
model = cls.__model__
all_keys = set(
model._get_relationships(
lazy=['dynamic', True, 'select'],
uselist=True
).keys() +
model._get_association_proxies().keys()
)
if cls.subviews == True:
keys = all_keys
else:
keys = set(cls.subviews)
if keys - all_keys:
raise ValueError('%s invalid for subviews' % (keys - all_keys, ))
keys = all_keys & keys
for key in keys:
collection_route = '/%s/%s/%s/' % (
cls.base_url,
'/'.join(
'<%s>' % k.name for k in class_mapper(model).primary_key
),
key,
)
model_route = '/%s/%s/%s/<position>' % (
cls.base_url,
'/'.join(
'<%s>' % k.name for k in class_mapper(model).primary_key
),
key
)
make_view(
cls.__app__,
view_class=_RelationshipView,
view_name='%s_%s' % (cls.endpoint, key),
__model__=model,
__assoc_key__=key,
parser=cls.parser,
endpoint='%s_%s' % (cls.endpoint, key),
methods=['GET', ],
rules={
collection_route: ['GET', ],
model_route: ['GET', ],
},
)
def get(self, **kwargs):
"""GET request handler."""
if kwargs:
model = self.__model__.retrieve(from_key=True, **kwargs)
if not model:
raise APIError(404, 'Not found')
return self.parser.jsonify(model)
else:
return self.parser.jsonify(self.__model__.q)
def post(self):
"""POST request handler."""
if not self.validate(json):
raise APIError(400, 'Invalid POST parameters')
model = self.__model__(**request.json)
model.flush()
return self.parser.jsonify(model)
def put(self, **kwargs):
"""PUT request handler."""
model = self.__model__.retrieve(from_key=True, **kwargs)
if not model:
raise APIError(404, 'Not found')
if not self.validate(json, model):
raise APIError(400, 'Invalid PUT parameters')
for k, v in request.json.items():
setattr(model, k, v)
return self.parser.jsonify(model)
def delete(self, **kwargs):
"""DELETE request handler."""
model = self.__model__.retrieve(from_key=True, **kwargs)
if not model:
raise APIError(404, 'Not found')
model.delete()
return self.parser.jsonify(model)
def validate(self, json, model=None):
"""Validation method.
:param json: a dictionary of attributes
:type json: dict
:param model: ``None`` if it is POST request, and the model instance to be
modified if it is a PUT request.
:type model: None or kit.ext.orm.BaseModel
:rtype: bool
This method is called on each POST and PUT request. Override it to
implement your own validation logic: return ``True`` when the input is
valid and ``False`` otherwise. Default implementation accepts everything.
"""
return True
class _RelationshipView(_View):
"""Relationship View."""
__model__ = None
__assoc_key__ = None
def get(self, **kwargs):
"""GET request handler."""
position = kwargs.pop('position', None)
parent = self.__model__.retrieve(from_key=True, **kwargs)
if not parent:
raise APIError(404, 'Parent not found')
collection = getattr(parent, self.__assoc_key__)
if position:
position = int(position) - 1 # model_position is 1 indexed
if isinstance(collection, Query):
model = collection.offset(position).limit(1).first()
else:
collection = collection[position:(position + 1)]
model = collection[0] if collection else None
if not model:
raise APIError(404, 'Not found')
return self.parser.jsonify(model)
else:
return self.parser.jsonify(collection)
class Parser(object):
"""The request parameter parser.
:param default_depth: the default depth models are jsonified to. ``0`` yields
an empty dictionary
:type default_depth: int
:param max_depth: the maximum depth allowed in a query. ``0`` means no limit.
:type max_depth: int
:param default_limit: the default number of results returned per query
:type default_limit: int
:param max_limit: the maximum number of results returned by a query. ``0``
means no limit.
:type max_limit: int
:param sep: the separator used for filters and sort parameters
:type sep: str
This class has a single method :meth:``jsonify`` which is used to parse a
model or collection and return the serialized response.
"""
def __init__(self, default_depth=1, max_depth=0, default_limit=20,
max_limit=0, sep=';'):
self.options = {
'default_depth': default_depth,
'max_depth': max_depth,
'default_limit': default_limit,
'max_limit': max_limit,
'sep': sep,
}
def jsonify(self, data, data_key='data', meta_key='meta',
include_request=True, include_time=True, include_matches=True, **kwargs):
"""Parses the data and returns the serialized response.
:param data: data. At this time, only instances, and lists of instances of
``kit.util.sqlalchemy.Model``, along with instances of
``kit.util.sqlalchemy.Query`` are valid.
:type data: model or collection
:param data_key: key where the serialized data will go
:type data_key: str
:param meta_key: key where the metadata will go
:type meta_key: str
:param include_request: whether or not to include the issued request
information
:type include_request: bool
:param include_time: whether or not to include processing time
:type include_time: bool
:param include_matches: whether or not to include the total number of
results from the data (useful if ``data`` is a collection)
:type include_matches: bool
:rtype: Flask response
Any keyword arguments will be included with the metadata.
"""
depth = request.args.get('depth', self.options['default_depth'], int)
max_depth = self.options['max_depth']
if max_depth:
depth = min(depth, max_depth)
start = time()
if isinstance(data, Model):
data = data.to_json(depth=depth)
match = 1
else:
col, matches = self._get_collection(data)
data = [e.to_json(depth=depth) for e in col if e]
match = {'total': matches, 'returned': len(data)}
rv = {data_key: data, meta_key: kwargs}
if include_matches:
rv[meta_key]['matches'] = match
if include_request:
rv[meta_key]['request'] = {
'base_url': request.base_url,
'method': request.method,
'values': request.values,
}
if include_time:
rv[meta_key]['parsing_time'] = time() - start
return jsonify(rv)
def _get_collection(self, collection):
"""Parse query and return JSON.
:param collection: the query or list to be transformed to JSON
:type collection: kit.ext.orm.Query, list
:rtype: tuple
Returns a tuple ``(collection, match)``:
* ``collection`` is the filtered, sorted, offsetted, limited collection.
* ``match`` is the total number of results from the filtered query
"""
model = self._get_model_class(collection)
raw_filters = request.args.getlist('filter')
raw_sorts = request.args.getlist('sort')
offset = request.args.get('offset', 0, int)
limit = request.args.get('limit', self.options['default_limit'], int)
max_limit = self.options['max_limit']
if max_limit:
limit = min(limit, max_limit) if limit else max_limit
if isinstance(collection, Query):
sep = self.options['sep']
for raw_filter in raw_filters:
try:
key, op, value = raw_filter.split(sep, 3)
except ValueError:
raise APIError(400, 'Invalid filter: %s' % raw_filter)
column = getattr(model, key, None)
if not column: # TODO check if is actual column
raise APIError(400, 'Invalid filter column: %s' % key)
if op == 'in':
filt = column.in_(value.split(','))
else:
try:
attr = filter(
lambda e: hasattr(column, e % op),
['%s', '%s_', '__%s__']
)[0] % op
except IndexError:
raise APIError(400, 'Invalid filter operator: %s' % op)
if value == 'null':
value = None
filt = getattr(column, attr)(value)
collection = collection.filter(filt)
for raw_sort in raw_sorts:
try:
key, order = raw_sort.split(sep)
except ValueError:
raise APIError(400, 'Invalid sort: %s' % raw_sort)
if not order in ['asc', 'desc']:
raise APIError(400, 'Invalid sort order: %s' % order)
column = getattr(model, key, None)
if column:
collection = collection.order_by(getattr(column, order)())
else:
raise APIError(400, 'Invalid sort column: %s' % key)
if hasattr(collection, 'fast_count'):
matches = collection.fast_count()
else:
matches = collection.count()
if offset:
collection = collection.offset(offset)
if limit:
collection = collection.limit(limit)
else:
if raw_filters or raw_sorts:
raise APIError(400, 'Filter and sorts not implemented for lists')
matches = len(collection)
if limit:
collection = collection[offset:(offset + limit)]
else:
collection = collection[offset:]
return collection, matches
def _get_model_class(self, collection):
"""Return corresponding model class from collection."""
if isinstance(collection, Query):
models = query_to_models(collection)
# only tested for _BaseQueries and associated count queries
assert len(models) < 2, 'Invalid query'
if not len(models):
# this is a count query
return collection._select_from_entity
else:
# this is a Query
return models[0]
else:
return collection[0].__class__
| mtth/kit | kit/ext/api.py | Python | mit | 15,315 | 0.008554 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-16 17:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bookings', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField()),
('qrcode', models.ImageField(blank=True, null=True, upload_to='qrcode')),
('booking', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bookings.Booking')),
],
),
]
| aniruddha-adhikary/bookit | bookit/bookings/migrations/0002_ticket.py | Python | mit | 794 | 0.003778 |
#! /usr/bin/env python3
assert __name__ == '__main__'
'''
To update ANGLE in Gecko, use Windows with git-bash, and setup depot_tools, python2, and
python3. Because depot_tools expects `python` to be `python2` (shame!), python2 must come
before python3 in your path.
Upstream: https://chromium.googlesource.com/angle/angle
Our repo: https://github.com/mozilla/angle
It has branches like 'firefox-60' which is the branch we use for pulling into
Gecko with this script.
This script leaves a record of the merge-base and cherry-picks that we pull into
Gecko. (gfx/angle/cherries.log)
ANGLE<->Chrome version mappings are here: https://omahaproxy.appspot.com/
An easy choice is to grab Chrome's Beta's ANGLE branch.
## Usage
Prepare your env:
~~~
export PATH="$PATH:/path/to/depot_tools"
~~~
If this is a new repo, don't forget:
~~~
# In the angle repo:
./scripts/bootstrap.py
gclient sync
~~~
Update: (in the angle repo)
~~~
# In the angle repo:
/path/to/gecko/gfx/angle/update-angle.py origin/chromium/XXXX
git push moz # Push the firefox-XX branch to github.com/mozilla/angle
~~~~
'''
import json
import os
import pathlib
import re
import shutil
import subprocess
import sys
from typing import * # mypy annotations
REPO_DIR = pathlib.Path.cwd()
GN_ENV = dict(os.environ)
# We need to set DEPOT_TOOLS_WIN_TOOLCHAIN to 0 for non-Googlers, but otherwise
# leave it unset since vs_toolchain.py assumes that the user is a Googler with
# the Visual Studio files in depot_tools if DEPOT_TOOLS_WIN_TOOLCHAIN is not
# explicitly set to 0.
vs_found = False
for directory in os.environ['PATH'].split(os.pathsep):
vs_dir = os.path.join(directory, 'win_toolchain', 'vs_files')
if os.path.exists(vs_dir):
vs_found = True
break
if not vs_found:
GN_ENV['DEPOT_TOOLS_WIN_TOOLCHAIN'] = '0'
if len(sys.argv) < 3:
sys.exit('Usage: export_targets.py OUT_DIR ROOTS...')
(OUT_DIR, *ROOTS) = sys.argv[1:]
for x in ROOTS:
assert x.startswith('//:')
# ------------------------------------------------------------------------------
def run_checked(*args, **kwargs):
print(' ', args, file=sys.stderr)
sys.stderr.flush()
return subprocess.run(args, check=True, **kwargs)
def sortedi(x):
return sorted(x, key=str.lower)
def dag_traverse(root_keys: Sequence[str], pre_recurse_func: Callable[[str], list]):
visited_keys: Set[str] = set()
def recurse(key):
if key in visited_keys:
return
visited_keys.add(key)
t = pre_recurse_func(key)
try:
(next_keys, post_recurse_func) = t
except ValueError:
(next_keys,) = t
post_recurse_func = None
for x in next_keys:
recurse(x)
if post_recurse_func:
post_recurse_func(key)
return
for x in root_keys:
recurse(x)
return
# ------------------------------------------------------------------------------
print('Importing graph', file=sys.stderr)
try:
p = run_checked('gn', 'desc', '--format=json', str(OUT_DIR), '*', stdout=subprocess.PIPE,
env=GN_ENV, shell=(True if sys.platform == 'win32' else False))
except subprocess.CalledProcessError:
sys.stderr.buffer.write(b'"gn desc" failed. Is depot_tools in your PATH?\n')
exit(1)
# -
print('\nProcessing graph', file=sys.stderr)
descs = json.loads(p.stdout.decode())
# Ready to traverse
# ------------------------------------------------------------------------------
LIBRARY_TYPES = ('shared_library', 'static_library')
def flattened_target(target_name: str, descs: dict, stop_at_lib: bool =True) -> dict:
flattened = dict(descs[target_name])
EXPECTED_TYPES = LIBRARY_TYPES + ('source_set', 'group', 'action')
def pre(k):
dep = descs[k]
dep_type = dep['type']
deps = dep['deps']
if stop_at_lib and dep_type in LIBRARY_TYPES:
return ((),)
if dep_type == 'copy':
assert not deps, (target_name, dep['deps'])
else:
assert dep_type in EXPECTED_TYPES, (k, dep_type)
for (k,v) in dep.items():
if type(v) in (list, tuple, set):
# This is a workaround for
# https://bugs.chromium.org/p/gn/issues/detail?id=196, where
# the value of "public" can be a string instead of a list.
existing = flattened.get(k, [])
if isinstance(existing, str):
existing = [existing]
flattened[k] = sortedi(set(existing + v))
else:
#flattened.setdefault(k, v)
pass
return (deps,)
dag_traverse(descs[target_name]['deps'], pre)
return flattened
# ------------------------------------------------------------------------------
# Check that includes are valid. (gn's version of this check doesn't seem to work!)
INCLUDE_REGEX = re.compile(b'(?:^|\\n) *# *include +([<"])([^>"]+)[>"]')
assert INCLUDE_REGEX.match(b'#include "foo"')
assert INCLUDE_REGEX.match(b'\n#include "foo"')
# Most of these are ignored because this script does not currently handle
# #includes in #ifdefs properly, so they will erroneously be marked as being
# included, but not part of the source list.
IGNORED_INCLUDES = {
b'absl/container/flat_hash_map.h',
b'compiler/translator/TranslatorESSL.h',
b'compiler/translator/TranslatorGLSL.h',
b'compiler/translator/TranslatorHLSL.h',
b'compiler/translator/TranslatorMetal.h',
b'compiler/translator/TranslatorVulkan.h',
b'contrib/optimizations/slide_hash_neon.h',
b'dirent_on_windows.h',
b'dlopen_fuchsia.h',
b'kernel/image.h',
b'libANGLE/renderer/d3d/d3d11/winrt/NativeWindow11WinRT.h',
b'libANGLE/renderer/d3d/DeviceD3D.h',
b'libANGLE/renderer/d3d/DisplayD3D.h',
b'libANGLE/renderer/d3d/RenderTargetD3D.h',
b'libANGLE/renderer/gl/apple/DisplayApple_api.h',
b'libANGLE/renderer/gl/cgl/DisplayCGL.h',
b'libANGLE/renderer/gl/eagl/DisplayEAGL.h',
b'libANGLE/renderer/gl/egl/android/DisplayAndroid.h',
b'libANGLE/renderer/gl/egl/DisplayEGL.h',
b'libANGLE/renderer/gl/egl/gbm/DisplayGbm.h',
b'libANGLE/renderer/gl/glx/DisplayGLX.h',
b'libANGLE/renderer/gl/wgl/DisplayWGL.h',
b'libANGLE/renderer/metal/DisplayMtl_api.h',
b'libANGLE/renderer/null/DisplayNULL.h',
b'libANGLE/renderer/vulkan/android/AHBFunctions.h',
b'libANGLE/renderer/vulkan/android/DisplayVkAndroid.h',
b'libANGLE/renderer/vulkan/fuchsia/DisplayVkFuchsia.h',
b'libANGLE/renderer/vulkan/ggp/DisplayVkGGP.h',
b'libANGLE/renderer/vulkan/mac/DisplayVkMac.h',
b'libANGLE/renderer/vulkan/win32/DisplayVkWin32.h',
b'libANGLE/renderer/vulkan/xcb/DisplayVkXcb.h',
b'loader_cmake_config.h',
b'optick.h',
b'spirv-tools/libspirv.h',
b'third_party/volk/volk.h',
b'vk_loader_extensions.c',
b'vk_snippets.h',
b'vulkan_android.h',
b'vulkan_beta.h',
b'vulkan_directfb.h',
b'vulkan_fuchsia.h',
b'vulkan_ggp.h',
b'vulkan_ios.h',
b'vulkan_macos.h',
b'vulkan_metal.h',
b'vulkan_vi.h',
b'vulkan_wayland.h',
b'vulkan_win32.h',
b'vulkan_xcb.h',
b'vulkan_xlib.h',
b'vulkan_xlib_xrandr.h',
# rapidjson adds these include stubs into their documentation
# comments. Since the script doesn't skip comments they are
# erroneously marked as valid includes
b'rapidjson/...',
}
IGNORED_INCLUDE_PREFIXES = {
b'android',
b'Carbon',
b'CoreFoundation',
b'CoreServices',
b'IOSurface',
b'mach',
b'mach-o',
b'OpenGL',
b'pci',
b'sys',
b'wrl',
b'X11',
}
IGNORED_DIRECTORIES = {
'//buildtools/third_party/libc++',
'//third_party/abseil-cpp',
'//third_party/SwiftShader',
}
def has_all_includes(target_name: str, descs: dict) -> bool:
for ignored_directory in IGNORED_DIRECTORIES:
if target_name.startswith(ignored_directory):
return True
flat = flattened_target(target_name, descs, stop_at_lib=False)
acceptable_sources = flat.get('sources', []) + flat.get('outputs', [])
acceptable_sources = {x.rsplit('/', 1)[-1].encode() for x in acceptable_sources}
ret = True
desc = descs[target_name]
for cur_file in desc.get('sources', []):
assert cur_file.startswith('/'), cur_file
if not cur_file.startswith('//'):
continue
cur_file = pathlib.Path(cur_file[2:])
text = cur_file.read_bytes()
for m in INCLUDE_REGEX.finditer(text):
if m.group(1) == b'<':
continue
include = m.group(2)
if include in IGNORED_INCLUDES:
continue
try:
(prefix, _) = include.split(b'/', 1)
if prefix in IGNORED_INCLUDE_PREFIXES:
continue
except ValueError:
pass
include_file = include.rsplit(b'/', 1)[-1]
if include_file not in acceptable_sources:
#print(' acceptable_sources:')
#for x in sorted(acceptable_sources):
# print(' ', x)
print('Warning in {}: {}: Invalid include: {}'.format(target_name, cur_file, include), file=sys.stderr)
ret = False
#print('Looks valid:', m.group())
continue
return ret
# -
# Gather real targets:
def gather_libraries(roots: Sequence[str], descs: dict) -> Set[str]:
libraries = set()
def fn(target_name):
cur = descs[target_name]
print(' ' + cur['type'], target_name, file=sys.stderr)
assert has_all_includes(target_name, descs), target_name
if cur['type'] in ('shared_library', 'static_library'):
libraries.add(target_name)
return (cur['deps'], )
dag_traverse(roots, fn)
return libraries
# -
libraries = gather_libraries(ROOTS, descs)
print(f'\n{len(libraries)} libraries:', file=sys.stderr)
for k in libraries:
print(f' {k}', file=sys.stderr)
print('\nstdout begins:', file=sys.stderr)
sys.stderr.flush()
# ------------------------------------------------------------------------------
# Output
out = {k: flattened_target(k, descs) for k in libraries}
for (k,desc) in out.items():
dep_libs: Set[str] = set()
for dep_name in set(desc['deps']):
dep = descs[dep_name]
if dep['type'] in LIBRARY_TYPES:
dep_libs.add(dep_name[3:])
desc['deps'] = sortedi(dep_libs)
json.dump(out, sys.stdout, indent=' ')
exit(0)
| ppy/angle | scripts/export_targets.py | Python | bsd-3-clause | 10,578 | 0.003781 |
# coding: utf-8
import decimal
from . import settings as price_settings
from .utils import price_amount
from .currency import Currency
from .tax import NO_TAX
class Price(object):
def __init__(self, net, currency=None, tax=None, gross=None):
if currency is None:
currency = price_settings.DEFAULT_CURRENCY
if not isinstance(currency, Currency):
currency = Currency(currency)
self.currency = currency
if not isinstance(net, decimal.Decimal):
net = decimal.Decimal(str(net) or 'NaN')
# support tax models
if tax is not None and hasattr(tax, 'get_tax'):
tax = tax.get_tax()
# calculate tax, gross
self._applied_taxes = {}
if not tax is None and not gross is None:
# we need to trust the external calculation here
if not isinstance(gross, decimal.Decimal):
gross = decimal.Decimal(str(gross or '') or 'NaN')
elif not tax is None:
# self.net is still not rounded here, so tax_amount is super-precise ;-)
gross = tax.apply(net)
elif not gross is None:
raise RuntimeError('cannot specify gross amount without tax')
else:
# no tax applied
gross = net
tax = NO_TAX
self._applied_taxes[tax.unique_id] = (tax, net, gross)
self._recalculate_overall()
def _recalculate_overall(self):
# we pass net/gross through price_amount as late as possible, to avoid
# removing decimal_places we might need to calculate the right
# gross or tax. self._applied_taxes always stores the raw values without
# any rounding. This way we do not loose precision on calculation.
net = decimal.Decimal('0')
gross = decimal.Decimal('0')
for tax, tax_net, tax_gross in self._applied_taxes.values():
# we have to round every net/gross on its own, otherwise
# we would risk rounding issues (0.3 + 0.3 = 0.6, rounded
# 0 + 0 = 1)
net += price_amount(tax_net, self.currency)
gross += price_amount(tax_gross, self.currency)
self.net = net
self.gross = gross
def __str__(self):
from django.utils.encoding import smart_str
return smart_str(unicode(self))
def __unicode__(self):
from django.utils.translation import ugettext
return ugettext('%(currency)s %(amount)s') % {
'amount': self.formatted_gross,
'currency': self.formatted_currency,
}
def copy(self):
from copy import copy
result = copy(self)
result._applied_taxes = self._applied_taxes.copy()
return result
def rounded(self):
applied_taxes = {}
for tax, net, gross in self._applied_taxes.values():
applied_taxes[tax.unique_id] = (
tax,
price_amount(net, self.currency),
price_amount(gross, self.currency),
)
return CalculatedPrice(applied_taxes, self.currency)
@property
def precise_net(self):
return sum([t[1] for t in self._applied_taxes.values()])
@property
def precise_gross(self):
return sum([t[2] for t in self._applied_taxes.values()])
@property
def precise_tax(self):
return sum([t[2] - t[1] for t in self._applied_taxes.values()])
@property
def tax(self):
return self.gross - self.net
@property
def applied_tax(self):
if len(self._applied_taxes) != 1:
raise RuntimeError('This Price has multiple taxes, use obj.taxes instead')
return self._applied_taxes.values()[0][0]
@property
def applied_taxes(self):
return [
Price(
net = net,
tax = tax,
gross = gross,
currency = self.currency,
)
for tax, net, gross
in self._applied_taxes.values()
]
@property
def formatted_currency(self):
return self.currency.symbol if self.currency.symbol else self.currency.iso_code
def _format_amount(self, value):
from django.utils.formats import number_format
# workaround for django not treating decimal_places==0 is implied
# as prices always are rounded to their decimal_places, see
# utils.price_amount
# see https://code.djangoproject.com/ticket/13810
return number_format(value, self.currency.decimal_places or 0)
@property
def formatted_net(self):
return self._format_amount(self.net)
@property
def formatted_gross(self):
return self._format_amount(self.gross)
@property
def formatted_tax(self):
return self._format_amount(self.tax)
def __add__(self, other):
# EmptyPrice should work regardless of currency, does not change anything
if isinstance(other, EmptyPrice):
self.copy()
if not isinstance(other, Price):
raise TypeError('cannot add %s' % type(other))
if self.currency != other.currency:
raise TypeError('cannot add different currencies')
applied_taxes = self._applied_taxes.copy()
for tax, net, gross in other._applied_taxes.values():
if tax.unique_id in applied_taxes:
applied_taxes[tax.unique_id] = (
applied_taxes[tax.unique_id][0],
applied_taxes[tax.unique_id][1] + net,
applied_taxes[tax.unique_id][2] + gross,
)
else:
applied_taxes[tax.unique_id] = (tax, net, gross)
# filter out NO_TAX, if it is not relevant
if NO_TAX.unique_id in applied_taxes \
and applied_taxes[NO_TAX.unique_id][1] == 0 \
and applied_taxes[NO_TAX.unique_id][2] == 0:
del applied_taxes[NO_TAX.unique_id]
return CalculatedPrice(applied_taxes, self.currency)
def __neg__(self):
applied_taxes = {}
for tax, net, gross in self._applied_taxes.values():
applied_taxes[tax.unique_id] = (tax, -net, -gross)
return CalculatedPrice(applied_taxes, self.currency)
def __mul__(self, factor):
if not isinstance(factor, (int, long, float, decimal.Decimal)):
raise TypeError("Cannot multiply with %s" % type(factor))
if not isinstance(factor, decimal.Decimal):
factor = decimal.Decimal(str(factor))
if factor.is_nan():
raise TypeError("Factor must be a number (!= 'NaN')")
applied_taxes = {}
for tax, net, gross in self._applied_taxes.values():
calc_net = net * factor
calc_gross = gross * factor
applied_taxes[tax.unique_id] = (tax, calc_net, calc_gross)
return CalculatedPrice(applied_taxes, self.currency)
def __div__(self, factor):
if not isinstance(factor, (int, long, float, decimal.Decimal)):
raise TypeError("Cannot multiply with %s" % type(factor))
if not isinstance(factor, decimal.Decimal):
factor = decimal.Decimal(str(factor))
if factor.is_nan():
raise TypeError("Factor must be a number (!= 'NaN')")
applied_taxes = {}
for tax, net, gross in self._applied_taxes.values():
calc_net = net / factor
calc_gross = gross / factor
applied_taxes[tax.unique_id] = (tax, calc_net, calc_gross)
return CalculatedPrice(applied_taxes, self.currency)
__truediv__ = __div__
# django_ajax hook
def ajax_data(self):
return {
'tax': self.formatted_tax,
'net': self.formatted_net,
'gross': self.formatted_gross,
'currency': self.currency.ajax_data(),
}
class CalculatedPrice(Price):
def __init__(self, applied_taxes, currency=None):
if currency is None:
currency = price_settings.DEFAULT_CURRENCY
if not isinstance(currency, Currency):
currency = Currency(currency)
self.currency = currency
self._applied_taxes = applied_taxes
self._recalculate_overall()
class EmptyPrice(Price):
def __init__(self):
self.net = decimal.Decimal('0')
self.currency = Currency(price_settings.DEFAULT_CURRENCY)
self.gross = decimal.Decimal('0')
self._applied_taxes = {
NO_TAX.unique_id: (NO_TAX, self.net, self.gross)
}
def copy(self):
return self
def __add__(self, other):
return other.copy()
def __mul__(self, factor):
return self
def __div__(self, factor):
return self
__truediv__ = __div__
| ddanier/django_price | django_price/price.py | Python | bsd-3-clause | 8,910 | 0.005387 |
"""
Weather component that handles meteorological data for your location.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/weather/
"""
import asyncio
import logging
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.temperature import display_temp as show_temp
from homeassistant.const import PRECISION_WHOLE, PRECISION_TENTHS, TEMP_CELSIUS
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = []
DOMAIN = 'weather'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
ATTR_CONDITION_CLASS = 'condition_class'
ATTR_FORECAST = 'forecast'
ATTR_FORECAST_CONDITION = 'condition'
ATTR_FORECAST_PRECIPITATION = 'precipitation'
ATTR_FORECAST_TEMP = 'temperature'
ATTR_FORECAST_TEMP_LOW = 'templow'
ATTR_FORECAST_TIME = 'datetime'
ATTR_WEATHER_ATTRIBUTION = 'attribution'
ATTR_WEATHER_HUMIDITY = 'humidity'
ATTR_WEATHER_OZONE = 'ozone'
ATTR_WEATHER_PRESSURE = 'pressure'
ATTR_WEATHER_TEMPERATURE = 'temperature'
ATTR_WEATHER_VISIBILITY = 'visibility'
ATTR_WEATHER_WIND_BEARING = 'wind_bearing'
ATTR_WEATHER_WIND_SPEED = 'wind_speed'
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the weather component."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_setup(config)
return True
class WeatherEntity(Entity):
"""ABC for weather data."""
@property
def temperature(self):
"""Return the platform temperature."""
raise NotImplementedError()
@property
def temperature_unit(self):
"""Return the unit of measurement."""
raise NotImplementedError()
@property
def pressure(self):
"""Return the pressure."""
return None
@property
def humidity(self):
"""Return the humidity."""
raise NotImplementedError()
@property
def wind_speed(self):
"""Return the wind speed."""
return None
@property
def wind_bearing(self):
"""Return the wind bearing."""
return None
@property
def ozone(self):
"""Return the ozone level."""
return None
@property
def attribution(self):
"""Return the attribution."""
return None
@property
def visibility(self):
"""Return the visibility."""
return None
@property
def forecast(self):
"""Return the forecast."""
return None
@property
def precision(self):
"""Return the forecast."""
return PRECISION_TENTHS if self.temperature_unit == TEMP_CELSIUS \
else PRECISION_WHOLE
@property
def state_attributes(self):
"""Return the state attributes."""
data = {
ATTR_WEATHER_TEMPERATURE: show_temp(
self.hass, self.temperature, self.temperature_unit,
self.precision),
}
humidity = self.humidity
if humidity is not None:
data[ATTR_WEATHER_HUMIDITY] = round(humidity)
ozone = self.ozone
if ozone is not None:
data[ATTR_WEATHER_OZONE] = ozone
pressure = self.pressure
if pressure is not None:
data[ATTR_WEATHER_PRESSURE] = pressure
wind_bearing = self.wind_bearing
if wind_bearing is not None:
data[ATTR_WEATHER_WIND_BEARING] = wind_bearing
wind_speed = self.wind_speed
if wind_speed is not None:
data[ATTR_WEATHER_WIND_SPEED] = wind_speed
visibility = self.visibility
if visibility is not None:
data[ATTR_WEATHER_VISIBILITY] = visibility
attribution = self.attribution
if attribution is not None:
data[ATTR_WEATHER_ATTRIBUTION] = attribution
if self.forecast is not None:
forecast = []
for forecast_entry in self.forecast:
forecast_entry = dict(forecast_entry)
forecast_entry[ATTR_FORECAST_TEMP] = show_temp(
self.hass, forecast_entry[ATTR_FORECAST_TEMP],
self.temperature_unit, self.precision)
if ATTR_FORECAST_TEMP_LOW in forecast_entry:
forecast_entry[ATTR_FORECAST_TEMP_LOW] = show_temp(
self.hass, forecast_entry[ATTR_FORECAST_TEMP_LOW],
self.temperature_unit, self.precision)
forecast.append(forecast_entry)
data[ATTR_FORECAST] = forecast
return data
@property
def state(self):
"""Return the current state."""
return self.condition
@property
def condition(self):
"""Return the current condition."""
raise NotImplementedError()
| persandstrom/home-assistant | homeassistant/components/weather/__init__.py | Python | apache-2.0 | 4,851 | 0 |
# Flexlay - A Generic 2D Game Editor
# Copyright (C) 2014 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from flexlay.gui.editor_map_component import EditorMapComponent
from flexlay.math import Point
from flexlay.tools.tool import Tool
class Zoom2Tool(Tool):
def __init__(self):
super().__init__()
self.active = False
self.click_pos = Point(0, 0)
self.old_zoom = 0.0
def on_mouse_up(self, event):
self.active = False
def on_mouse_down(self, event):
self.active = True
self.click_pos = event.mouse_pos
gc = EditorMapComponent.current.get_gc_state()
self.old_zoom = gc.get_zoom()
def on_mouse_move(self, event):
if self.active:
gc = EditorMapComponent.current.get_gc_state()
zoom_pos = Point(gc.width / 2, gc.height / 2)
factor = (event.mouse_pos.y - self.click_pos.y) / 20.0
if factor > 0:
gc.set_zoom(self.old_zoom * pow(1.25, factor), zoom_pos)
elif factor < 0:
gc.set_zoom(self.old_zoom / pow(1.25, -factor), zoom_pos)
else:
gc.set_zoom(self.old_zoom, zoom_pos)
# EOF #
| SuperTux/flexlay | flexlay/tools/zoom2_tool.py | Python | gpl-3.0 | 1,822 | 0 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
import os
import sys
import uuid
import logging
import simplejson as json
import paho.mqtt.client as mqtt
from time import sleep
try:
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../../')
from sanji.connection.connection import Connection
except ImportError as e:
print(e)
print("Please check the python PATH for import test module.")
exit(1)
_logger = logging.getLogger("sanji.sdk.connection.mqtt")
class Mqtt(Connection):
"""
Mqtt
"""
def __init__(
self,
broker_host=os.getenv('BROKER_PORT_1883_TCP_ADDR', "localhost"),
broker_port=os.getenv('BROKER_PORT_1883_TCP_PORT', 1883),
broker_keepalive=60
):
# proerties
self.tunnels = {
"internel": (uuid.uuid4().hex, None),
"model": (None, None),
"view": (None, None)
}
self.broker_host = broker_host
self.broker_port = broker_port
self.broker_keepalive = broker_keepalive
self.client = mqtt.Client()
self.connect_delay = 3
# methods
self.subscribe = self.client.subscribe
self.unsubscribe = self.client.unsubscribe
self.message_callback_add = self.client.message_callback_add
self.message_callback_remove = self.client.message_callback_remove
self.client.on_log = self.on_log
def on_log(self, mosq, obj, level, string):
pass
def connect(self):
"""
connect
"""
_logger.debug("Start connecting to broker")
while True:
try:
self.client.connect(self.broker_host, self.broker_port,
self.broker_keepalive)
break
except Exception:
_logger.debug(
"Connect failed. wait %s sec" % self.connect_delay)
sleep(self.connect_delay)
self.client.loop_forever()
def disconnect(self):
"""
disconnect
"""
_logger.debug("Disconnect to broker")
self.client.loop_stop()
def set_tunnel(self, tunnel_type, tunnel, callback=None):
"""
set_tunnel(self, tunnel_type, tunnel, callback=None):
"""
orig_tunnel = self.tunnels.get(tunnel_type, (None, None))[0]
if orig_tunnel is not None:
_logger.debug("Unsubscribe: %s", (orig_tunnel,))
self.client.unsubscribe(str(orig_tunnel))
self.tunnels[tunnel_type] = (tunnel, callback)
if callback is not None:
self.message_callback_add(tunnel, callback)
self.client.subscribe(str(tunnel))
_logger.debug("Subscribe: %s", (tunnel,))
def set_tunnels(self, tunnels):
"""
set_tunnels(self, tunnels):
"""
for tunnel_type, (tunnel, callback) in tunnels.iteritems():
if tunnel is None:
continue
self.set_tunnel(tunnel_type, tunnel, callback)
def set_on_connect(self, func):
"""
set_on_connect
"""
self.client.on_connect = func
def set_on_message(self, func):
"""
set_on_message
"""
self.client.on_message = func
def set_on_publish(self, func):
"""
set_on_publish
"""
self.client.on_publish = func
def publish(self, topic="/controller", qos=0, payload=None):
"""
publish(self, topic, payload=None, qos=0, retain=False)
Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS to
indicate success or MQTT_ERR_NO_CONN if the client is not currently
connected. mid is the message ID for the publish request. The mid
value can be used to track the publish request by checking against the
mid argument in the on_publish() callback if it is defined.
"""
result = self.client.publish(topic,
payload=json.dumps(payload),
qos=qos)
if result[0] == mqtt.MQTT_ERR_NO_CONN:
raise RuntimeError("No connection")
return result[1]
| imZack/sanji | sanji/connection/mqtt.py | Python | mit | 4,240 | 0 |
"""Views for the node settings page."""
# -*- coding: utf-8 -*-
from dateutil.parser import parse as dateparse
import httplib as http
import logging
from flask import request, make_response
from framework.exceptions import HTTPError
from website.addons.base import generic_views
from website.addons.github.api import GitHubClient, ref_to_params
from website.addons.github.exceptions import NotFoundError, GitHubError
from website.addons.github.serializer import GitHubSerializer
from website.addons.github.utils import (
get_refs, check_permissions,
verify_hook_signature, MESSAGES
)
from website.models import NodeLog
from website.project.decorators import (
must_have_addon, must_be_addon_authorizer,
must_have_permission, must_not_be_registration,
must_be_contributor_or_public, must_be_valid_project,
)
from website.util import rubeus
logger = logging.getLogger(__name__)
logging.getLogger('github3').setLevel(logging.WARNING)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)
SHORT_NAME = 'github'
FULL_NAME = 'GitHub'
############
# Generics #
############
github_account_list = generic_views.account_list(
SHORT_NAME,
GitHubSerializer
)
github_import_auth = generic_views.import_auth(
SHORT_NAME,
GitHubSerializer
)
def _get_folders(node_addon, folder_id):
pass
github_folder_list = generic_views.folder_list(
SHORT_NAME,
FULL_NAME,
_get_folders
)
github_get_config = generic_views.get_config(
SHORT_NAME,
GitHubSerializer
)
github_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
github_root_folder = generic_views.root_folder(
SHORT_NAME
)
#################
# Special Cased #
#################
@must_not_be_registration
@must_have_addon(SHORT_NAME, 'user')
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
@must_have_permission('write')
def github_set_config(auth, **kwargs):
node_settings = kwargs.get('node_addon', None)
node = kwargs.get('node', None)
user_settings = kwargs.get('user_addon', None)
try:
if not node:
node = node_settings.owner
if not user_settings:
user_settings = node_settings.user_settings
except AttributeError:
raise HTTPError(http.BAD_REQUEST)
# Parse request
github_user_name = request.json.get('github_user', '')
github_repo_name = request.json.get('github_repo', '')
if not github_user_name or not github_repo_name:
raise HTTPError(http.BAD_REQUEST)
# Verify that repo exists and that user can access
connection = GitHubClient(external_account=node_settings.external_account)
repo = connection.repo(github_user_name, github_repo_name)
if repo is None:
if user_settings:
message = (
'Cannot access repo. Either the repo does not exist '
'or your account does not have permission to view it.'
)
else:
message = (
'Cannot access repo.'
)
return {'message': message}, http.BAD_REQUEST
changed = (
github_user_name != node_settings.user or
github_repo_name != node_settings.repo
)
# Update hooks
if changed:
# Delete existing hook, if any
node_settings.delete_hook()
# Update node settings
node_settings.user = github_user_name
node_settings.repo = github_repo_name
# Log repo select
node.add_log(
action='github_repo_linked',
params={
'project': node.parent_id,
'node': node._id,
'github': {
'user': github_user_name,
'repo': github_repo_name,
}
},
auth=auth,
)
# Add new hook
if node_settings.user and node_settings.repo:
node_settings.add_hook(save=False)
node_settings.save()
return {}
@must_be_contributor_or_public
@must_have_addon('github', 'node')
def github_download_starball(node_addon, **kwargs):
archive = kwargs.get('archive', 'tar')
ref = request.args.get('sha', 'master')
connection = GitHubClient(external_account=node_addon.external_account)
headers, data = connection.starball(
node_addon.user, node_addon.repo, archive, ref
)
resp = make_response(data)
for key, value in headers.iteritems():
resp.headers[key] = value
return resp
#########
# HGrid #
#########
@must_be_contributor_or_public
@must_have_addon('github', 'node')
def github_root_folder(*args, **kwargs):
"""View function returning the root container for a GitHub repo. In
contrast to other add-ons, this is exposed via the API for GitHub to
accommodate switching between branches and commits.
"""
node_settings = kwargs['node_addon']
auth = kwargs['auth']
data = request.args.to_dict()
return github_hgrid_data(node_settings, auth=auth, **data)
def github_hgrid_data(node_settings, auth, **kwargs):
# Quit if no repo linked
if not node_settings.complete:
return
connection = GitHubClient(external_account=node_settings.external_account)
# Initialize repo here in the event that it is set in the privacy check
# below. This potentially saves an API call in _check_permissions, below.
repo = None
# Quit if privacy mismatch and not contributor
node = node_settings.owner
if node.is_public and not node.is_contributor(auth.user):
try:
repo = connection.repo(node_settings.user, node_settings.repo)
except NotFoundError:
# TODO: Test me @jmcarp
# TODO: Add warning message
logger.error('Could not access GitHub repo')
return None
if repo.private:
return None
try:
branch, sha, branches = get_refs(
node_settings,
branch=kwargs.get('branch'),
sha=kwargs.get('sha'),
connection=connection,
)
except (NotFoundError, GitHubError):
# TODO: Show an alert or change GitHub configuration?
logger.error('GitHub repo not found')
return
if branch is not None:
ref = ref_to_params(branch, sha)
can_edit = check_permissions(
node_settings, auth, connection, branch, sha, repo=repo,
)
else:
ref = None
can_edit = False
name_tpl = '{user}/{repo}'.format(
user=node_settings.user, repo=node_settings.repo
)
permissions = {
'edit': can_edit,
'view': True,
'private': node_settings.is_private
}
urls = {
'upload': node_settings.owner.api_url + 'github/file/' + (ref or ''),
'fetch': node_settings.owner.api_url + 'github/hgrid/' + (ref or ''),
'branch': node_settings.owner.api_url + 'github/hgrid/root/',
'zip': node_settings.owner.api_url + 'github/zipball/' + (ref or ''),
'repo': "https://github.com/{0}/{1}/tree/{2}".format(node_settings.user, node_settings.repo, branch)
}
branch_names = [each.name for each in branches]
if not branch_names:
branch_names = [branch] # if repo un-init-ed then still add default branch to list of branches
return [rubeus.build_addon_root(
node_settings,
name_tpl,
urls=urls,
permissions=permissions,
branches=branch_names,
defaultBranch=branch,
)]
#########
# Repos #
#########
@must_have_addon(SHORT_NAME, 'user')
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
@must_have_permission('write')
def github_create_repo(**kwargs):
repo_name = request.json.get('name')
if not repo_name:
raise HTTPError(http.BAD_REQUEST)
node_settings = kwargs['node_addon']
connection = GitHubClient(external_account=node_settings.external_account)
try:
repo = connection.create_repo(repo_name, auto_init=True)
except GitHubError:
# TODO: Check status code
raise HTTPError(http.BAD_REQUEST)
return {
'user': repo.owner.login,
'repo': repo.name,
}
#########
# Hooks #
#########
# TODO: Refactor using NodeLogger
def add_hook_log(node, github, action, path, date, committer, include_urls=False,
sha=None, save=False):
"""Add log event for commit from webhook payload.
:param node: Node to add logs to
:param github: GitHub node settings record
:param path: Path to file
:param date: Date of commit
:param committer: Committer name
:param include_urls: Include URLs in `params`
:param sha: SHA of updated file
:param save: Save changes
"""
github_data = {
'user': github.user,
'repo': github.repo,
}
urls = {}
if include_urls:
# TODO: Move to helper function
url = node.web_url_for('addon_view_or_download_file', path=path, provider=SHORT_NAME)
urls = {
'view': '{0}?ref={1}'.format(url, sha),
'download': '{0}?action=download&ref={1}'.format(url, sha)
}
node.add_log(
action=action,
params={
'project': node.parent_id,
'node': node._id,
'path': path,
'github': github_data,
'urls': urls,
},
auth=None,
foreign_user=committer,
log_date=date,
save=save,
)
@must_be_valid_project
@must_not_be_registration
@must_have_addon('github', 'node')
def github_hook_callback(node_addon, **kwargs):
"""Add logs for commits from outside OSF.
"""
if request.json is None:
return {}
# Fail if hook signature is invalid
verify_hook_signature(
node_addon,
request.data,
request.headers,
)
node = kwargs['node'] or kwargs['project']
payload = request.json
for commit in payload.get('commits', []):
# TODO: Look up OSF user by commit
# Skip if pushed by OSF
if commit['message'] and commit['message'] in MESSAGES.values():
continue
_id = commit['id']
date = dateparse(commit['timestamp'])
committer = commit['committer']['name']
# Add logs
for path in commit.get('added', []):
add_hook_log(
node, node_addon, 'github_' + NodeLog.FILE_ADDED,
path, date, committer, include_urls=True, sha=_id,
)
for path in commit.get('modified', []):
add_hook_log(
node, node_addon, 'github_' + NodeLog.FILE_UPDATED,
path, date, committer, include_urls=True, sha=_id,
)
for path in commit.get('removed', []):
add_hook_log(
node, node_addon, 'github_' + NodeLog.FILE_REMOVED,
path, date, committer,
)
node.save()
| RomanZWang/osf.io | website/addons/github/views.py | Python | apache-2.0 | 10,973 | 0.001185 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Backend.AI Library documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 1 21:26:20 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Backend.AI API Documentation'
copyright = '2015-2020, Lablup Inc.'
author = 'Lablup Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'v5.20191215'
# The full version, including alpha/beta/rc tags.
release = '20.03'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'tango'
highlight_language = 'python3'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
numfig = True
intersphinx_mapping = {
'client-py':
('https://client-py.docs.backend.ai/en/latest/', None),
}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'BackendAIAPIDoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'BackendAIDoc.tex', 'Backend.AI API Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'backend.ai', 'Backend.AI API Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Backend.AI', 'Backend.AI API Documentation',
author, 'Backend.AI', 'Backend.AI is a hassle-free backend for AI programming and service.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| lablup/sorna | docs/conf.py | Python | lgpl-3.0 | 6,409 | 0.004681 |
import logging
from sft.common.commands.base import CommandFinished, ProgramFinished, CommandIds, ErrorIds
from sft.common.socket_manager import SocketManager
from .base import ClientCommandBase
from sft.common.utils.packets import (generate_packet, get_error_code)
from sft.common.config import Config
from sft.common.sessions.session_manager import SessionManager, SessionStatus
_socket_manager = SocketManager()
LOG = logging.getLogger(__name__)
_config = Config()
_packet_size = _config.package_size
__all__ = ['CloseCommand']
class CloseCommand(ClientCommandBase):
"""Usage: close"""
@staticmethod
def get_command_id():
return CommandIds.CLOSE_COMMAND_ID
@staticmethod
def get_command_alias():
return 'close'
def _initialize(self, args_line):
super()._initialize(args_line)
self._send_request = True
self._finish = False
session = SessionManager().get_all_not_inactive_sessions()[0]
client_uuid = session.client_uuid
self._request = generate_packet(self.get_command_id(), ErrorIds.SUCCESSFUL, client_uuid)
session.status = SessionStatus.wait_for_close
def receive_data(self, data):
pass
def generate_data(self):
if self._send_request:
self._finish = True
self._send_request = False
return self._request
return None
| AlexeiBuzuma/LocalComputeNetworks | sft/client/commands/close.py | Python | mit | 1,397 | 0.002147 |
##
# Copyright 2020 NVIDIA
#
# This file is triple-licensed under GPLv2 (see below), MIT, and
# BSD three-clause licenses.
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for XALT, implemented as an easyblock
@author: Scott McMillan (NVIDIA)
"""
import os
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_XALT(ConfigureMake):
"""Support for building and installing XALT."""
@staticmethod
def extra_options():
extra_vars = {
'config_py': [None, "XALT site filter file", MANDATORY],
'executable_tracking': [True, "Enable executable tracking", CUSTOM],
'gpu_tracking': [None, "Enable GPU tracking", CUSTOM],
'logging_url': [None, "Logging URL for transmission", CUSTOM],
'mysql': [False, "Build with MySQL support", CUSTOM],
'scalar_sampling': [True, "Enable scalar sampling", CUSTOM],
'static_cxx': [False, "Statically link libstdc++ and libgcc_s", CUSTOM],
'syshost': [None, "System name", MANDATORY],
'transmission': [None, "Data tranmission method", MANDATORY],
'file_prefix': [None, "XALT record files prefix", CUSTOM],
}
return ConfigureMake.extra_options(extra_vars)
def configure_step(self):
"""Custom configuration step for XALT."""
# By default, XALT automatically appends 'xalt/<version>' to the
# prefix, i.e., --prefix=/opt will actually install in
# /opt/xalt/<version>. To precisely control the install prefix and
# not append anything to the prefix, use the configure option
# '--with-siteControlledPrefix=yes'.
# See https://xalt.readthedocs.io/en/latest/050_install_and_test.html
self.cfg.update('configopts', '--with-siteControlledPrefix=yes')
# XALT site filter config file is mandatory
config_py = self.cfg['config_py']
if config_py:
if os.path.exists(config_py):
self.cfg.update('configopts', '--with-config=%s' % config_py)
else:
raise EasyBuildError("Specified XALT configuration file %s does not exist!", config_py)
else:
error_msg = "Location of XALT configuration file must be specified via 'config_py' easyconfig parameter. "
error_msg += "You can edit the easyconfig file, or use 'eb --try-amend=config_py=<path>'. "
error_msg += "See https://xalt.readthedocs.io/en/latest/030_site_filtering.html for more information."
raise EasyBuildError(error_msg)
# XALT system name is mandatory
if self.cfg['syshost']:
self.cfg.update('configopts', '--with-syshostConfig=%s' % self.cfg['syshost'])
else:
error_msg = "The name of the system must be specified via the 'syshost' easyconfig parameter. "
error_msg += "You can edit the easyconfig file, or use 'eb --try-amend=syshost=<string>'. "
error_msg += "See https://xalt.readthedocs.io/en/latest/020_site_configuration.html for more information."
raise EasyBuildError(error_msg)
# Transmission method is mandatory
if self.cfg['transmission']:
self.cfg.update('configopts', '--with-transmission=%s' % self.cfg['transmission'])
else:
error_msg = "The XALT transmission method must be specified via the 'transmission' easyconfig parameter. "
error_msg = "You can edit the easyconfig file, or use 'eb --try-amend=transmission=<string>'. "
error_msg += "See https://xalt.readthedocs.io/en/latest/020_site_configuration.html for more information."
raise EasyBuildError(error_msg)
# GPU tracking
if self.cfg['gpu_tracking'] is True:
# User enabled
self.cfg.update('configopts', '--with-trackGPU=yes')
elif self.cfg['gpu_tracking'] is None:
# Default value, enable GPU tracking if nvml.h is present
# and the CUDA module is loaded
cuda_root = get_software_root('CUDA')
if cuda_root:
nvml_h = os.path.join(cuda_root, "include", "nvml.h")
if os.path.isfile(nvml_h):
self.cfg.update('configopts', '--with-trackGPU=yes')
self.cfg['gpu_tracking'] = True
else:
# User disabled
self.cfg.update('configopts', '--with-trackGPU=no')
# MySQL
if self.cfg['mysql'] is True:
self.cfg.update('configopts', '--with-MySQL=yes')
else:
self.cfg.update('configopts', '--with-MySQL=no')
# If XALT is built with a more recent compiler than the system
# compiler, then XALT likely will depend on symbol versions not
# available in the system libraries. Link statically as a workaround.
if self.cfg['static_cxx'] is True:
self.cfg.update('configopts', 'LDFLAGS="${LDFLAGS} -static-libstdc++ -static-libgcc"')
# XALT file prefix (optional). The default is $HOME/.xalt.d/ which
# entails that record files are stored separately for each user.
# If this option is specified, XALT will write to the specified
# location for every user. The file prefix can also be modified
# after the install using the XALT_FILE_PREFIX environment variable.
if self.cfg['file_prefix']:
self.cfg.update('configopts', '--with-xaltFilePrefix=%s' % self.cfg['file_prefix'])
# Configure
super(EB_XALT, self).configure_step()
def make_module_extra(self, *args, **kwargs):
txt = super(EB_XALT, self).make_module_extra(*args, **kwargs)
txt += self.module_generator.prepend_paths('LD_PRELOAD', 'lib64/libxalt_init.%s' % get_shared_lib_ext())
txt += self.module_generator.set_environment('XALT_DIR', self.installdir)
txt += self.module_generator.set_environment('XALT_ETC_DIR', '%s' % os.path.join(self.installdir, 'etc'))
txt += self.module_generator.set_environment('XALT_EXECUTABLE_TRACKING',
('no', 'yes')[bool(self.cfg['executable_tracking'])])
txt += self.module_generator.set_environment('XALT_GPU_TRACKING',
('no', 'yes')[bool(self.cfg['gpu_tracking'])])
if self.cfg['transmission'].lower() == 'curl' and self.cfg['logging_url']:
txt += self.module_generator.set_environment('XALT_LOGGING_URL', self.cfg['logging_url'])
txt += self.module_generator.set_environment('XALT_SCALAR_SAMPLING',
('no', 'yes')[bool(self.cfg['scalar_sampling'])])
# In order to track containerized executables, bind mount the XALT
# directory in the Singularity container and preload the XALT library
# https://xalt.readthedocs.io/en/latest/050_install_and_test.html#xalt-modulefile
txt += self.module_generator.prepend_paths('SINGULARITY_BINDPATH', '')
txt += self.module_generator.prepend_paths('SINGULARITYENV_LD_PRELOAD',
'lib64/libxalt_init.%s' % get_shared_lib_ext())
return txt
def make_module_req_guess(self):
"""Custom guesses for environment variables"""
return {'COMPILER_PATH': 'bin',
'PATH': 'bin'}
def sanity_check_step(self):
"""Custom sanity check"""
custom_paths = {
'files': ['bin/ld', 'bin/ld.gold', 'bin/xalt_extract_record',
'lib64/libxalt_init.%s' % get_shared_lib_ext()],
'dirs': ['bin', 'libexec', 'sbin'],
}
custom_commands = ['xalt_configuration_report']
super(EB_XALT, self).sanity_check_step(custom_commands=custom_commands,
custom_paths=custom_paths)
| boegel/easybuild-easyblocks | easybuild/easyblocks/x/xalt.py | Python | gpl-2.0 | 9,134 | 0.003065 |
import copy
from typing import Any # noqa: F401
from typing import Dict # noqa: F401
from typing import Iterable # noqa: F401
from typing import List # noqa: F401
from typing import Optional # noqa: F401
from typing import Text # noqa: F401
import botocore.session
from botocore.exceptions import ClientError
from .models import Column
from .models import Partition
from .models import STORAGE_DESCRIPTOR_TEMPLATE
from .utils import chunks
from .utils import ensure_trailing_slash
from .utils import remove_trailing_slash
TABLE_INPUT_TEMPLATE = {
'Name': '',
'Owner': 'hadoop',
'StorageDescriptor': STORAGE_DESCRIPTOR_TEMPLATE,
'PartitionKeys': [],
'TableType': 'EXTERNAL_TABLE',
'Parameters': {'EXTERNAL': 'TRUE'},
} # type: Dict[Text, Any]
class Table(object):
__slots__ = ['database_name', 'name', 'columns', 'location', 'partition_keys']
def __init__(self, database_name, name, columns, location, partition_keys):
# type: (Text, Text, List[Column], Text, List[Column]) -> None
self.database_name = database_name
self.name = name
self.columns = columns
self.location = location
self.partition_keys = partition_keys
def list_partitions(self):
# type: () -> Iterable[Partition]
client = botocore.session.get_session().create_client('glue')
opts = {'DatabaseName': self.database_name, 'TableName': self.name}
while True:
result = client.get_partitions(**opts)
if 'Partitions' in result:
for pd in result['Partitions']:
yield Partition.from_input(pd)
if 'NextToken' in result:
opts['NextToken'] = result['NextToken']
else:
break
def get_partitions(self):
# type: () -> List[Partition]
client = botocore.session.get_session().create_client('glue')
opts = {'DatabaseName': self.database_name, 'TableName': self.name}
partitions = [] # type: List[Partition]
while True:
result = client.get_partitions(**opts)
if 'Partitions' in result:
partitions += [Partition.from_input(pd) for pd in result['Partitions']]
if 'NextToken' in result:
opts['NextToken'] = result['NextToken']
else:
break
return partitions
def add_partitions(self, partitions):
# type: (List[Partition]) -> None
client = botocore.session.get_session().create_client('glue')
for partition_chunk in chunks(partitions, 100):
data = {'DatabaseName': self.database_name,
'TableName': self.name,
'PartitionInputList': [partition.to_input() for partition in partition_chunk]}
client.batch_create_partition(**data)
def recreate_partitions(self, partitions):
# type: (List[Partition]) -> None
client = botocore.session.get_session().create_client('glue')
for partition_chunk in chunks(partitions, 25):
data = {'DatabaseName': self.database_name,
'TableName': self.name,
'PartitionsToDelete': [{'Values': partition.values} for partition in partition_chunk]}
client.batch_delete_partition(**data)
data = {'DatabaseName': self.database_name,
'TableName': self.name,
'PartitionInputList': [partition.to_input() for partition in partition_chunk]}
client.batch_create_partition(**data)
@classmethod
def from_input(cls, database_name, data):
# type: (Text, Dict[Text, Any]) -> Table
table = cls(
database_name=database_name,
name=data['Name'],
columns=[Column.from_input(cd) for cd in data['StorageDescriptor']['Columns']],
location=ensure_trailing_slash(data['StorageDescriptor']['Location']),
partition_keys=[Column.from_input(cd) for cd in data['PartitionKeys']],
)
return table
def to_input(self):
# type: () -> Dict[Text, Any]
data = copy.deepcopy(TABLE_INPUT_TEMPLATE)
data['Name'] = self.name
data['StorageDescriptor']['Columns'] = [column.to_input() for column in self.columns]
data['StorageDescriptor']['Location'] = remove_trailing_slash(self.location)
data['PartitionKeys'] = [column.to_input() for column in self.partition_keys]
return data
@classmethod
def get(cls, database_name, name):
# type: (Text, Text) -> Optional[Table]
client = botocore.session.get_session().create_client('glue')
try:
result = client.get_table(DatabaseName=database_name, Name=name)
except ClientError as ex:
if ex.response['Error']['Code'] == 'EntityNotFoundException':
return None
raise ex
return cls.from_input(database_name, result['Table'])
@classmethod
def create(cls, database_name, name, columns, location, partition_keys):
# type: (Text, Text, List[Column], Text, List[Column]) -> Table
client = botocore.session.get_session().create_client('glue')
table = cls(
database_name=database_name,
name=name,
columns=columns,
location=location,
partition_keys=partition_keys,
)
client.create_table(
DatabaseName=database_name,
TableInput=table.to_input(),
)
return table
@classmethod
def update(cls, database_name, name, columns, location, partition_keys):
# type: (Text, Text, List[Column], Text, List[Column]) -> Table
client = botocore.session.get_session().create_client('glue')
table = cls(
database_name=database_name,
name=name,
columns=columns,
location=location,
partition_keys=partition_keys,
)
client.update_table(
DatabaseName=database_name,
TableInput=table.to_input(),
)
return table
@classmethod
def drop(cls, database_name, name):
# type: (Text, Text) -> None
client = botocore.session.get_session().create_client('glue')
client.delete_table(
DatabaseName=database_name,
Name=name,
)
| robotblake/pdsm | src/pdsm/glue.py | Python | mit | 6,425 | 0.001712 |
from django.test import TestCase
from .templatetags.staticpages_tags import render_page_with_shortcode
ESCAPED_TEXT = '<div class="magnifier"><a href="x"><img src="x" class="img-responsive" />' + \
'</a></div><b>XSS</b>'
MULTILINE_TEXT = '<div class="magnifier"><a href="xxx"><img src="xxx" class="img-responsive" />' + \
'</a></div>'
BASIC_TEXT = MULTILINE_TEXT
class TempleTagsTestCase(TestCase):
def test_render_page_with_shortcode_for_valid(self):
TEST_CASE = {'[map]xxx[/map]': BASIC_TEXT, # Basic case
"[map]\nxxx\n[/map]": MULTILINE_TEXT, # Multiline case
"[map]x[/map]<b>XSS</b>": ESCAPED_TEXT # Tests of escaped text
}
for value, expected in TEST_CASE.items():
self.assertHTMLEqual(render_page_with_shortcode({}, value, safe=False), expected)
def test_render_page_with_shortcode_for_unchanged(self):
TEST_CASE = ['[/map]xxx[map]', # wrong order
'[map]xxx[/map', # no end of end tag
'[map][/map]', # empty tag
'[map]"[/map]' # with quote - XSS protection
]
for item in TEST_CASE:
self.assertHTMLEqual(render_page_with_shortcode({}, item, safe=True), item)
| watchdogpolska/watchdog-kj-kultura | watchdog_kj_kultura/staticpages/tests.py | Python | mit | 1,351 | 0.003701 |
# Create your views here.
from django.shortcuts import render
from .froms import SampleSearchForm
def index(request):
form = SampleSearchForm(request.GET)
results = form.search()
return render(request,'index.html', {'samples':results})
| srkama/haysolr | dataview/testapi/views.py | Python | apache-2.0 | 251 | 0.015936 |
# -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.six import PY3
from ansible.utils.unsafe_proxy import AnsibleUnsafe, AnsibleUnsafeBytes, AnsibleUnsafeText, wrap_var
def test_wrap_var_text():
assert isinstance(wrap_var(u'foo'), AnsibleUnsafeText)
def test_wrap_var_bytes():
assert isinstance(wrap_var(b'foo'), AnsibleUnsafeBytes)
def test_wrap_var_string():
if PY3:
assert isinstance(wrap_var('foo'), AnsibleUnsafeText)
else:
assert isinstance(wrap_var('foo'), AnsibleUnsafeBytes)
def test_wrap_var_dict():
assert isinstance(wrap_var(dict(foo='bar')), dict)
assert not isinstance(wrap_var(dict(foo='bar')), AnsibleUnsafe)
assert isinstance(wrap_var(dict(foo=u'bar'))['foo'], AnsibleUnsafeText)
def test_wrap_var_dict_None():
assert wrap_var(dict(foo=None))['foo'] is None
assert not isinstance(wrap_var(dict(foo=None))['foo'], AnsibleUnsafe)
def test_wrap_var_list():
assert isinstance(wrap_var(['foo']), list)
assert not isinstance(wrap_var(['foo']), AnsibleUnsafe)
assert isinstance(wrap_var([u'foo'])[0], AnsibleUnsafeText)
def test_wrap_var_list_None():
assert wrap_var([None])[0] is None
assert not isinstance(wrap_var([None])[0], AnsibleUnsafe)
def test_wrap_var_set():
assert isinstance(wrap_var(set(['foo'])), set)
assert not isinstance(wrap_var(set(['foo'])), AnsibleUnsafe)
for item in wrap_var(set([u'foo'])):
assert isinstance(item, AnsibleUnsafeText)
def test_wrap_var_set_None():
for item in wrap_var(set([None])):
assert item is None
assert not isinstance(item, AnsibleUnsafe)
def test_wrap_var_tuple():
assert isinstance(wrap_var(('foo',)), tuple)
assert not isinstance(wrap_var(('foo',)), AnsibleUnsafe)
assert isinstance(wrap_var(('foo',))[0], AnsibleUnsafe)
def test_wrap_var_tuple_None():
assert wrap_var((None,))[0] is None
assert not isinstance(wrap_var((None,))[0], AnsibleUnsafe)
def test_wrap_var_None():
assert wrap_var(None) is None
assert not isinstance(wrap_var(None), AnsibleUnsafe)
def test_wrap_var_unsafe_text():
assert isinstance(wrap_var(AnsibleUnsafeText(u'foo')), AnsibleUnsafeText)
def test_wrap_var_unsafe_bytes():
assert isinstance(wrap_var(AnsibleUnsafeBytes(b'foo')), AnsibleUnsafeBytes)
def test_wrap_var_no_ref():
thing = {
'foo': {
'bar': 'baz'
},
'bar': ['baz', 'qux'],
'baz': ('qux',),
'none': None,
'text': 'text',
}
wrapped_thing = wrap_var(thing)
thing is not wrapped_thing
thing['foo'] is not wrapped_thing['foo']
thing['bar'][0] is not wrapped_thing['bar'][0]
thing['baz'][0] is not wrapped_thing['baz'][0]
thing['none'] is not wrapped_thing['none']
thing['text'] is not wrapped_thing['text']
def test_AnsibleUnsafeText():
assert isinstance(AnsibleUnsafeText(u'foo'), AnsibleUnsafe)
def test_AnsibleUnsafeBytes():
assert isinstance(AnsibleUnsafeBytes(b'foo'), AnsibleUnsafe)
| azaghal/ansible | test/units/utils/test_unsafe_proxy.py | Python | gpl-3.0 | 3,234 | 0.000618 |
import json
from autobahn.twisted.websocket import WebSocketClientProtocol, WebSocketClientFactory
class SocketClientProtocol(WebSocketClientProtocol):
def emit(self, event_name, **kwargs):
payload = self._format_outbound_data(event_name, **kwargs)
self.sendMessage(payload)
def _format_outbound_data(self, event, **kwargs):
""" Format outbound message as JSON """
message = {'event': event}
for key in kwargs.keys():
message[key] = kwargs.get(key)
return json.dumps(message).encode('utf8')
def onMessage(self, payload, isBinary):
self.factory.handle_message(self, payload)
class BaseSocketClientFactory(WebSocketClientFactory):
protocol = SocketClientProtocol
def __init__(self, *args, **kwargs):
WebSocketClientFactory.__init__(self, *args, **kwargs)
self.callbacks = {}
self.register_callbacks()
def register_callbacks(self):
pass
def on(self, event_name, callback):
self.callbacks[event_name] = callback
def fire_callback(self, client, event_name, **kwargs):
if event_name in self.callbacks:
self.callbacks[event_name](client, **kwargs)
def handle_message(self, client, message):
payload = self.parse_message(message)
if payload:
event = payload.pop('event')
self.fire_callback(client, event, **payload)
def parse_message(self, message):
payload = json.loads(message)
if 'event' in payload:
output = payload
return output
| CptLemming/django-socket-server | socket_server/client.py | Python | bsd-3-clause | 1,594 | 0.000627 |
'''
Created on Jan 5, 2014
@author: Rob
'''
import unittest
class ObjectTreeTests(unittest.TestCase):
def setUp(self):
from byond.objtree import ObjectTree
self.tree = ObjectTree()
def test_consumeVariable_basics(self):
test_string = 'var/obj/item/weapon/chainsaw = new'
name, data = self.tree.consumeVariable(test_string, '', 0)
self.assertEqual(name, 'chainsaw')
self.assertEqual(data.type, '/obj/item/weapon')
self.assertEqual(data.value, 'new')
self.assertEqual(data.declaration, True)
self.assertEqual(data.inherited, False)
self.assertEqual(data.special, None)
def test_consumeVariable_alternate_array_declaration_01(self):
test_string = 'var/appearance_keylist[0]'
name, data = self.tree.consumeVariable(test_string, '', 0)
self.assertEqual(name, 'appearance_keylist')
self.assertEqual(data.type, '/list')
self.assertEqual(data.value, None)
self.assertEqual(data.size, 0)
self.assertEqual(data.declaration, True)
self.assertEqual(data.inherited, False)
self.assertEqual(data.special, None)
def test_consumeVariable_alternate_array_declaration_02(self):
test_string = 'var/medical[] = list()'
name, data = self.tree.consumeVariable(test_string, '', 0)
self.assertEqual(name, 'medical')
self.assertEqual(data.type, '/list')
self.assertEqual(data.value, 'list()')
self.assertEqual(data.size, -1)
self.assertEqual(data.declaration, True)
self.assertEqual(data.inherited, False)
self.assertEqual(data.special, None)
def test_consumeVariable_complex_types(self):
test_string = 'var/datum/gas_mixture/air_temporary'
name, data = self.tree.consumeVariable(test_string, '', 0)
self.assertEqual(name, 'air_temporary')
self.assertEqual(data.type, '/datum/gas_mixture')
self.assertEqual(data.value, None)
self.assertEqual(data.size, None)
self.assertEqual(data.declaration, True)
self.assertEqual(data.inherited, False)
self.assertEqual(data.special, None)
def test_consumeVariable_file_ref(self):
test_string = 'icon = \'butts.dmi\''
name, data = self.tree.consumeVariable(test_string, '', 0)
self.assertEqual(name, 'icon')
self.assertEqual(data.type, '/icon')
self.assertEqual(str(data.value), 'butts.dmi')
self.assertEqual(data.size, None)
self.assertEqual(data.declaration, False)
self.assertEqual(data.inherited, False)
self.assertEqual(data.special, None)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| ComicIronic/ByondToolsv3 | tests/ObjectTree.py | Python | mit | 2,925 | 0.004444 |
from questionnaire import *
from django.conf import settings
from django.utils.translation import ugettext as _
from json import dumps
@question_proc('range', 'number')
def question_range_or_number(request, question):
cd = question.getcheckdict()
rmin, rmax = parse_range(cd)
rstep = parse_step(cd)
runit = cd.get('unit', '')
required = cd.get('required', False)
current = request.POST.get('question_%s' % question.number, None)
jsinclude = []
if question.type == 'range':
jsinclude = [settings.STATIC_URL+'range.js']
return {
'required' : required,
'type': question.type,
'rmin' : rmin,
'rmax' : rmax,
'rstep' : rstep,
'runit' : runit,
'current' : current,
'jsinclude' : jsinclude
}
@answer_proc('range', 'number')
def process_range_or_number(question, answer):
cd = question.getcheckdict()
rmin, rmax = parse_range(cd)
rstep = parse_step(cd)
convert = range_type(rmin, rmax, rstep)
required = question.getcheckdict().get('required', 0)
ans = answer['ANSWER']
if not ans:
if required:
raise AnswerException(_(u"Field cannot be blank"))
else:
return []
try:
ans = convert(ans)
except:
raise AnswerException(_(u"Could not convert the number"))
if (rmax is not None and ans > convert(rmax)) or (rmin is not None and ans < convert(rmin)):
raise AnswerException(_(u"Out of range"))
return dumps([ans])
add_type('range', 'Range of numbers [select]')
add_type('number', 'Number [input]')
def parse_range(checkdict):
"Given a checkdict for a range widget return the min and max string values."
rmin, rmax = None, None
range = checkdict.get('range', None)
try:
if range:
rmin, rmax = range.split('-', 1)
rmin, rmax = rmin or None, rmax or None
except ValueError:
pass
return rmin, rmax
def parse_step(checkdict):
"Given a checkdict for a range widget return the step as string value."
return checkdict.get('step', '1')
def range_type(rmin, rmax, step):
"""Given the min, max and step value return float or int depending on
the number of digits after 0.
"""
if any((digits(rmin), digits(rmax), digits(step))):
return float
else:
return int
def digits(number):
"Given a number as string return the number of digits after 0."
if number is not None and ('.' in number or ',' in number):
if '.' in number:
return len(number.split('.')[1])
else:
return len(number.split(',')[1])
else:
return 0
| daniboy/seantis-questionnaire | questionnaire/qprocessors/range_or_number.py | Python | bsd-3-clause | 2,702 | 0.007772 |
#!/usr/bin/env python
from ZSI import Binding
MESSAGE = "Hello from Python!"
def main():
binding = Binding(url='http://localhost:8080/server.py')
print ' Sending: %s' % MESSAGE
response = binding.echo(MESSAGE)
print 'Response: %s' % MESSAGE
if __name__ == '__main__':
main()
| acigna/pywez | zsi/doc/examples/client/send_request/simple/Binding/client.py | Python | mit | 301 | 0.003322 |
import re
import datetime
from pymongo import MongoClient
from bson import ObjectId
from .exception import RecorderException, StructureException
__all__ = ['get_database', 'Recorder', 'Structure']
def get_database(db_name, host, port=27017):
return MongoClient(host, port)[db_name]
class Structure(dict):
__store = {}
def __init__(self, *args, **kwargs):
super(Structure, self).__init__(*args, **kwargs)
self.__dict__ = self
self._validate()
def _validate(self):
pass
def to_dict(self):
return self.__dict__
class Recorder:
struct = None
__store = None
class Meta:
database = None
class DataStore:
def get(self, key):
return self.__dict__.get(key)
def set(self, key, value):
self.__dict__[key] = value
def to_dict(self):
return self.__dict__
def __init__(self, key, data=None):
self._key = key
self.__store = self.DataStore()
self._init_from_dict(data)
def _init_from_dict(self, data):
if not isinstance(self.struct, Structure):
raise RecorderException("{0} struct is not a defined".format(self.__class__.__name__))
if not isinstance(data, dict):
data = dict()
# initialize store data
for k, v in self.struct.to_dict().items():
result = data.get(k)
if not result:
result = v
self.__store.set(k, result)
def key(self):
return self._key
def pk(self):
return ObjectId(self.key())
def __str__(self):
return self.__name__
def __getattr__(self, key):
if key in list(self.struct.keys()):
return self.__store.get(key)
else:
return super(Recorder, self).__getattr__(key)
def __setattr__(self, key, value):
if key in list(self.struct.keys()):
self.__store.set(key, value)
else:
super(Recorder, self).__setattr__(key, value)
@classmethod
def colname(cls):
return re.sub('(?!^)([A-Z]+)', r'_\1', cls.__name__).lower().__str__()
@classmethod
def collection(cls):
return cls.Meta.database[cls.colname()]
@classmethod
def new(cls, data=None):
return cls(None, data)
@classmethod
def create(cls, data):
key = None
if '_id' in data.keys():
key = data['_id']
if isinstance(data['_id'], ObjectId):
key = data['_id'].__str__()
return cls(key, data)
@classmethod
def get(cls, key, *args, **kwargs):
data = cls.collection().find_one({'_id': ObjectId(key)}, *args, **kwargs)
if not data:
return None
return cls(key, data)
@classmethod
def get_by(cls, key, value, *args, **kwargs):
data = cls.collection().find_one({key: value}, *args, **kwargs)
if not data:
return None
return cls.create(data)
@classmethod
def find(cls, *args, **kwargs):
return [cls.create(x) for x in cls.collection().find(*args, **kwargs)]
def save(self):
if not self.key():
return self.insert()
return self.update()
def insert(self):
result = self.collection().insert_one(self.to_mongo())
self._key = result.inserted_id.__str__()
self.__store.set('_id', self.key())
return True
def update(self, upsert=False):
if not self.key():
return self.insert()
self.collection().update_one({'_id': self.pk()}, {'$set': self.to_mongo()}, upsert=upsert)
return True
def delete(self):
if not self.key():
return False
self.collection().delete_one({'_id': self.pk()})
return True
@classmethod
def exists(cls, key, value):
return cls.find(filter={key: value}, limit=1).__len__() > 0
def to_dict(self):
return self.__store.to_dict()
def to_mongo(self):
store = self.to_dict()
now = datetime.datetime.now()
if not 'created_at' in store.keys():
store['created_at'] = now
store['modified_at'] = now
if '_id' in store.keys():
del store['_id']
return store
| teitei-tk/ice-pick | icePick/recorder.py | Python | mit | 4,310 | 0.000928 |
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y.'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. E Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.4/django/conf/locale/hr/formats.py | Python | bsd-3-clause | 1,758 | 0 |
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='openstacknagios',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='1.1.0',
description='nagios/icinga plugins to monitor an openstack installation',
long_description=long_description,
# The project's main homepage.
url='https://github.com/cirrax/openstack-nagios-plugins',
# Author details
author='Benedikt Trefzer',
author_email='benedikt.trefzer@cirrax.com',
# Choose your license
license='GPLv3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)'
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='openstack icinga nagios check',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
#packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=[
'gnocchiclient',
'nagiosplugin',
'python-novaclient',
'python-keystoneclient',
'python-neutronclient',
'python-cinderclient',
'python-ceilometerclient',
'python-ironicclient',
],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'check_nova-images=openstacknagios.nova.Images:main',
'check_nova-services=openstacknagios.nova.Services:main',
'check_nova-hypervisors=openstacknagios.nova.Hypervisors:main',
'check_cinder-services=openstacknagios.cinder.Services:main',
'check_neutron-agents=openstacknagios.neutron.Agents:main',
'check_neutron-floatingips=openstacknagios.neutron.Floatingips:main',
'check_neutron-networkipavailabilities=openstacknagios.neutron.Networkipavailabilities:main',
'check_neutron-routers=openstacknagios.neutron.Routers:main',
'check_keystone-token=openstacknagios.keystone.Token:main',
'check_keystone-endpoints=openstacknagios.keystone.Endpoints:main',
'check_ceilometer-statistics=openstacknagios.ceilometer.Statistics:main',
'check_gnocchi-metrics=openstacknagios.gnocchi.Metrics:main',
'check_gnocchi-status=openstacknagios.gnocchi.Status:main',
'check_rally-results=openstacknagios.rally.Results:main',
'check_ironic-nodes=openstacknagios.ironic.Nodes:main',
'check_ironic-node-consoles=openstacknagios.ironic.Consoles:main',
],
},
)
| ChameleonCloud/openstack-nagios-plugins | setup.py | Python | gpl-3.0 | 4,527 | 0.001546 |
# chat_client.py
import sys
import socket
import select
def chat_client():
if(len(sys.argv) < 3) :
print('Usage : python chat_client.py hostname port')
sys.exit()
host = sys.argv[1]
port = int(sys.argv[2])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
# connect to remote host
try :
s.connect((host, port))
except :
print('Unable to connect')
sys.exit()
print('Connected to remote host. You can start sending messages')
sys.stdout.write('[Me] '); sys.stdout.flush()
while 1:
socket_list = [sys.stdin, s]
# Get the list sockets which are readable
ready_to_read,ready_to_write,in_error = select.select(socket_list , [], [])
for sock in ready_to_read:
if sock == s:
# incoming message from remote server, s
data = sock.recv(4096)
if not data :
print('\nDisconnected from chat server')
sys.exit()
else :
#print data
sys.stdout.write(data)
sys.stdout.write('[Me] '); sys.stdout.flush()
else :
# user entered a message
msg = sys.stdin.readline()
s.send(msg)
sys.stdout.write('[Me] '); sys.stdout.flush()
if __name__ == "__main__":
sys.exit(chat_client())
| Professor-RED/Erik | TestPrograms/Chat_Client.py | Python | mit | 1,527 | 0.018337 |
class Solution(object):
def __init__(self):
self.l=[]
def helper(self,root,level):
if not root:
return None
else:
if level<len(self.l):
self.l[level].append(root.val)
else:
self.l.append([root.val])
self.helper(root.left,level+1)
self.helper(root.right,level+1)
return self.l
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
return self.helper(root,0)
| sadad111/leetcodebox | Binary Tree Level Order Traversal.py | Python | gpl-3.0 | 603 | 0.016584 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Optional
from kubernetes import client
from airflow.exceptions import AirflowException
from airflow.providers.cncf.kubernetes.hooks.kubernetes import KubernetesHook
from airflow.sensors.base import BaseSensorOperator
class SparkKubernetesSensor(BaseSensorOperator):
"""
Checks sparkApplication object in kubernetes cluster:
.. seealso::
For more detail about Spark Application Object have a look at the reference:
https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/v1beta2-1.1.0-2.4.5/docs/api-docs.md#sparkapplication
:param application_name: spark Application resource name
:type application_name: str
:param namespace: the kubernetes namespace where the sparkApplication reside in
:type namespace: str
:param kubernetes_conn_id: The :ref:`kubernetes connection<howto/connection:kubernetes>`
to Kubernetes cluster.
:type kubernetes_conn_id: str
:param attach_log: determines whether logs for driver pod should be appended to the sensor log
:type attach_log: bool
:param api_group: kubernetes api group of sparkApplication
:type api_group: str
:param api_version: kubernetes api version of sparkApplication
:type api_version: str
"""
template_fields = ("application_name", "namespace")
FAILURE_STATES = ("FAILED", "UNKNOWN")
SUCCESS_STATES = ("COMPLETED",)
def __init__(
self,
*,
application_name: str,
attach_log: bool = False,
namespace: Optional[str] = None,
kubernetes_conn_id: str = "kubernetes_default",
api_group: str = 'sparkoperator.k8s.io',
api_version: str = 'v1beta2',
**kwargs,
) -> None:
super().__init__(**kwargs)
self.application_name = application_name
self.attach_log = attach_log
self.namespace = namespace
self.kubernetes_conn_id = kubernetes_conn_id
self.hook = KubernetesHook(conn_id=self.kubernetes_conn_id)
self.api_group = api_group
self.api_version = api_version
def _log_driver(self, application_state: str, response: dict) -> None:
if not self.attach_log:
return
status_info = response["status"]
if "driverInfo" not in status_info:
return
driver_info = status_info["driverInfo"]
if "podName" not in driver_info:
return
driver_pod_name = driver_info["podName"]
namespace = response["metadata"]["namespace"]
log_method = self.log.error if application_state in self.FAILURE_STATES else self.log.info
try:
log = ""
for line in self.hook.get_pod_logs(driver_pod_name, namespace=namespace):
log += line.decode()
log_method(log)
except client.rest.ApiException as e:
self.log.warning(
"Could not read logs for pod %s. It may have been disposed.\n"
"Make sure timeToLiveSeconds is set on your SparkApplication spec.\n"
"underlying exception: %s",
driver_pod_name,
e,
)
def poke(self, context: Dict) -> bool:
self.log.info("Poking: %s", self.application_name)
response = self.hook.get_custom_object(
group=self.api_group,
version=self.api_version,
plural="sparkapplications",
name=self.application_name,
namespace=self.namespace,
)
try:
application_state = response["status"]["applicationState"]["state"]
except KeyError:
return False
if self.attach_log and application_state in self.FAILURE_STATES + self.SUCCESS_STATES:
self._log_driver(application_state, response)
if application_state in self.FAILURE_STATES:
raise AirflowException(f"Spark application failed with state: {application_state}")
elif application_state in self.SUCCESS_STATES:
self.log.info("Spark application ended successfully")
return True
else:
self.log.info("Spark application is still in state: %s", application_state)
return False
| apache/incubator-airflow | airflow/providers/cncf/kubernetes/sensors/spark_kubernetes.py | Python | apache-2.0 | 5,015 | 0.001994 |
from ..base import ShopifyResource
class Location(ShopifyResource):
pass
| asiviero/shopify_python_api | shopify/resources/location.py | Python | mit | 79 | 0 |
from chatterbot import ChatBot
from chatterbot.adapters import Adapter
from .base_case import ChatBotTestCase
class AdapterValidationTests(ChatBotTestCase):
def test_invalid_storage_adapter(self):
kwargs = self.get_kwargs()
kwargs['storage_adapter'] = 'chatterbot.input.TerminalAdapter'
with self.assertRaises(Adapter.InvalidAdapterTypeException):
self.chatbot = ChatBot('Test Bot', **kwargs)
def test_valid_storage_adapter(self):
kwargs = self.get_kwargs()
kwargs['storage_adapter'] = 'chatterbot.storage.JsonFileStorageAdapter'
try:
self.chatbot = ChatBot('Test Bot', **kwargs)
except Adapter.InvalidAdapterTypeException:
self.fail('Test raised InvalidAdapterException unexpectedly!')
def test_invalid_input_adapter(self):
kwargs = self.get_kwargs()
kwargs['input_adapter'] = 'chatterbot.storage.JsonFileStorageAdapter'
with self.assertRaises(Adapter.InvalidAdapterTypeException):
self.chatbot = ChatBot('Test Bot', **kwargs)
def test_valid_input_adapter(self):
kwargs = self.get_kwargs()
kwargs['input_adapter'] = 'chatterbot.input.TerminalAdapter'
try:
self.chatbot = ChatBot('Test Bot', **kwargs)
except Adapter.InvalidAdapterTypeException:
self.fail('Test raised InvalidAdapterException unexpectedly!')
def test_invalid_output_adapter(self):
kwargs = self.get_kwargs()
kwargs['output_adapter'] = 'chatterbot.input.TerminalAdapter'
with self.assertRaises(Adapter.InvalidAdapterTypeException):
self.chatbot = ChatBot('Test Bot', **kwargs)
def test_valid_output_adapter(self):
kwargs = self.get_kwargs()
kwargs['output_adapter'] = 'chatterbot.output.TerminalAdapter'
try:
self.chatbot = ChatBot('Test Bot', **kwargs)
except Adapter.InvalidAdapterTypeException:
self.fail('Test raised InvalidAdapterException unexpectedly!')
def test_invalid_logic_adapter(self):
kwargs = self.get_kwargs()
kwargs['logic_adapters'] = ['chatterbot.input.TerminalAdapter']
with self.assertRaises(Adapter.InvalidAdapterTypeException):
self.chatbot = ChatBot('Test Bot', **kwargs)
def test_valid_logic_adapter(self):
kwargs = self.get_kwargs()
kwargs['logic_adapters'] = ['chatterbot.logic.BestMatch']
try:
self.chatbot = ChatBot('Test Bot', **kwargs)
except Adapter.InvalidAdapterTypeException:
self.fail('Test raised InvalidAdapterException unexpectedly!')
def test_valid_adapter_dictionary(self):
kwargs = self.get_kwargs()
kwargs['storage_adapter'] = {
'import_path': 'chatterbot.storage.JsonFileStorageAdapter'
}
try:
self.chatbot = ChatBot('Test Bot', **kwargs)
except Adapter.InvalidAdapterTypeException:
self.fail('Test raised InvalidAdapterException unexpectedly!')
def test_invalid_adapter_dictionary(self):
kwargs = self.get_kwargs()
kwargs['storage_adapter'] = {
'import_path': 'chatterbot.logic.BestMatch'
}
with self.assertRaises(Adapter.InvalidAdapterTypeException):
self.chatbot = ChatBot('Test Bot', **kwargs)
class MultiAdapterTests(ChatBotTestCase):
def test_add_logic_adapter(self):
count_before = len(self.chatbot.logic.adapters)
self.chatbot.logic.add_adapter(
'chatterbot.logic.BestMatch'
)
self.assertIsLength(self.chatbot.logic.adapters, count_before + 1)
def test_insert_logic_adapter(self):
self.chatbot.logic.add_adapter('chatterbot.logic.TimeLogicAdapter')
self.chatbot.logic.add_adapter('chatterbot.logic.BestMatch')
self.chatbot.logic.insert_logic_adapter('chatterbot.logic.MathematicalEvaluation', 1)
self.assertEqual(
type(self.chatbot.logic.adapters[1]).__name__,
'MathematicalEvaluation'
)
def test_remove_logic_adapter(self):
self.chatbot.logic.add_adapter('chatterbot.logic.TimeLogicAdapter')
self.chatbot.logic.add_adapter('chatterbot.logic.MathematicalEvaluation')
adapter_count = len(self.chatbot.logic.adapters)
removed = self.chatbot.logic.remove_logic_adapter('MathematicalEvaluation')
self.assertTrue(removed)
self.assertIsLength(self.chatbot.logic.adapters, adapter_count - 1)
def test_remove_logic_adapter_not_found(self):
self.chatbot.logic.add_adapter('chatterbot.logic.TimeLogicAdapter')
adapter_count = len(self.chatbot.logic.adapters)
removed = self.chatbot.logic.remove_logic_adapter('MathematicalEvaluation')
self.assertFalse(removed)
self.assertIsLength(self.chatbot.logic.adapters, adapter_count)
| maclogan/VirtualPenPal | tests/test_adapter_validation.py | Python | bsd-3-clause | 4,912 | 0.000814 |
# -*- coding: utf-8 -*-
import copy
import datetime
import json
import logging
import subprocess
import sys
import warnings
from email.mime.text import MIMEText
from email.utils import formatdate
from smtplib import SMTP
from smtplib import SMTP_SSL
from smtplib import SMTPAuthenticationError
from smtplib import SMTPException
from socket import error
import boto3
import requests
import stomp
from exotel import Exotel
from jira.client import JIRA
from jira.exceptions import JIRAError
from requests.exceptions import RequestException
from staticconf.loader import yaml_loader
from texttable import Texttable
from twilio.base.exceptions import TwilioRestException
from twilio.rest import Client as TwilioClient
from util import EAException
from util import elastalert_logger
from util import lookup_es_key
from util import pretty_ts
from util import ts_now
from util import ts_to_dt
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return json.JSONEncoder.default(self, obj)
class BasicMatchString(object):
""" Creates a string containing fields in match for the given rule. """
def __init__(self, rule, match):
self.rule = rule
self.match = match
def _ensure_new_line(self):
while self.text[-2:] != '\n\n':
self.text += '\n'
def _add_custom_alert_text(self):
missing = '<MISSING VALUE>'
alert_text = unicode(self.rule.get('alert_text', ''))
if 'alert_text_args' in self.rule:
alert_text_args = self.rule.get('alert_text_args')
alert_text_values = [lookup_es_key(self.match, arg) for arg in alert_text_args]
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
for i in xrange(len(alert_text_values)):
if alert_text_values[i] is None:
alert_value = self.rule.get(alert_text_args[i])
if alert_value:
alert_text_values[i] = alert_value
alert_text_values = [missing if val is None else val for val in alert_text_values]
alert_text = alert_text.format(*alert_text_values)
elif 'alert_text_kw' in self.rule:
kw = {}
for name, kw_name in self.rule.get('alert_text_kw').items():
val = lookup_es_key(self.match, name)
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
if val is None:
val = self.rule.get(name)
kw[kw_name] = missing if val is None else val
alert_text = alert_text.format(**kw)
self.text += alert_text
def _add_rule_text(self):
self.text += self.rule['type'].get_match_str(self.match)
def _add_top_counts(self):
for key, counts in self.match.items():
if key.startswith('top_events_'):
self.text += '%s:\n' % (key[11:])
top_events = counts.items()
if not top_events:
self.text += 'No events found.\n'
else:
top_events.sort(key=lambda x: x[1], reverse=True)
for term, count in top_events:
self.text += '%s: %s\n' % (term, count)
self.text += '\n'
def _add_match_items(self):
match_items = self.match.items()
match_items.sort(key=lambda x: x[0])
for key, value in match_items:
if key.startswith('top_events_'):
continue
value_str = unicode(value)
value_str.replace('\\n', '\n')
if type(value) in [list, dict]:
try:
value_str = self._pretty_print_as_json(value)
except TypeError:
# Non serializable object, fallback to str
pass
self.text += '%s: %s\n' % (key, value_str)
def _pretty_print_as_json(self, blob):
try:
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False)
except UnicodeDecodeError:
# This blob contains non-unicode, so lets pretend it's Latin-1 to show something
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False)
def __str__(self):
self.text = ''
if 'alert_text' not in self.rule:
self.text += self.rule['name'] + '\n\n'
self._add_custom_alert_text()
self._ensure_new_line()
if self.rule.get('alert_text_type') != 'alert_text_only':
self._add_rule_text()
self._ensure_new_line()
if self.rule.get('top_count_keys'):
self._add_top_counts()
if self.rule.get('alert_text_type') != 'exclude_fields':
self._add_match_items()
return self.text
class JiraFormattedMatchString(BasicMatchString):
def _add_match_items(self):
match_items = dict([(x, y) for x, y in self.match.items() if not x.startswith('top_events_')])
json_blob = self._pretty_print_as_json(match_items)
preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob)
self.text += preformatted_text
class Alerter(object):
""" Base class for types of alerts.
:param rule: The rule configuration.
"""
required_options = frozenset([])
def __init__(self, rule):
self.rule = rule
# pipeline object is created by ElastAlerter.send_alert()
# and attached to each alerters used by a rule before calling alert()
self.pipeline = None
self.resolve_rule_references(self.rule)
def resolve_rule_references(self, root):
# Support referencing other top-level rule properties to avoid redundant copy/paste
if type(root) == list:
# Make a copy since we may be modifying the contents of the structure we're walking
for i, item in enumerate(copy.copy(root)):
if type(item) == dict or type(item) == list:
self.resolve_rule_references(root[i])
else:
root[i] = self.resolve_rule_reference(item)
elif type(root) == dict:
# Make a copy since we may be modifying the contents of the structure we're walking
for key, value in root.copy().iteritems():
if type(value) == dict or type(value) == list:
self.resolve_rule_references(root[key])
else:
root[key] = self.resolve_rule_reference(value)
def resolve_rule_reference(self, value):
strValue = unicode(value)
if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule:
if type(value) == int:
return int(self.rule[strValue[1:-1]])
else:
return self.rule[strValue[1:-1]]
else:
return value
def alert(self, match):
""" Send an alert. Match is a dictionary of information about the alert.
:param match: A dictionary of relevant information to the alert.
"""
raise NotImplementedError()
def get_info(self):
""" Returns a dictionary of data related to this alert. At minimum, this should contain
a field type corresponding to the type of Alerter. """
return {'type': 'Unknown'}
def create_title(self, matches):
""" Creates custom alert title to be used, e.g. as an e-mail subject or JIRA issue summary.
:param matches: A list of dictionaries of relevant information to the alert.
"""
if 'alert_subject' in self.rule:
return self.create_custom_title(matches)
return self.create_default_title(matches)
def create_custom_title(self, matches):
alert_subject = unicode(self.rule['alert_subject'])
if 'alert_subject_args' in self.rule:
alert_subject_args = self.rule['alert_subject_args']
alert_subject_values = [lookup_es_key(matches[0], arg) for arg in alert_subject_args]
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
for i in xrange(len(alert_subject_values)):
if alert_subject_values[i] is None:
alert_value = self.rule.get(alert_subject_args[i])
if alert_value:
alert_subject_values[i] = alert_value
alert_subject_values = ['<MISSING VALUE>' if val is None else val for val in alert_subject_values]
return alert_subject.format(*alert_subject_values)
return alert_subject
def create_alert_body(self, matches):
body = self.get_aggregation_summary_text(matches)
for match in matches:
body += unicode(BasicMatchString(self.rule, match))
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
return body
def get_aggregation_summary_text(self, matches):
text = ''
if 'aggregation' in self.rule and 'summary_table_fields' in self.rule:
summary_table_fields = self.rule['summary_table_fields']
if not isinstance(summary_table_fields, list):
summary_table_fields = [summary_table_fields]
# Include a count aggregation so that we can see at a glance how many of each aggregation_key were encountered
summary_table_fields_with_count = summary_table_fields + ['count']
text += "Aggregation resulted in the following data for summary_table_fields ==> {0}:\n\n".format(
summary_table_fields_with_count
)
text_table = Texttable()
text_table.header(summary_table_fields_with_count)
match_aggregation = {}
# Maintain an aggregate count for each unique key encountered in the aggregation period
for match in matches:
key_tuple = tuple([unicode(lookup_es_key(match, key)) for key in summary_table_fields])
if key_tuple not in match_aggregation:
match_aggregation[key_tuple] = 1
else:
match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1
for keys, count in match_aggregation.iteritems():
text_table.add_row([key for key in keys] + [count])
text += text_table.draw() + '\n\n'
return unicode(text)
def create_default_title(self, matches):
return self.rule['name']
def get_account(self, account_file):
""" Gets the username and password from an account file.
:param account_file: Name of the file which contains user and password information.
"""
account_conf = yaml_loader(account_file)
if 'user' not in account_conf or 'password' not in account_conf:
raise EAException('Account file must have user and password fields')
self.user = account_conf['user']
self.password = account_conf['password']
class StompAlerter(Alerter):
""" The stomp alerter publishes alerts via stomp to a broker. """
required_options = frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password'])
def alert(self, matches):
alerts = []
qk = self.rule.get('query_key', None)
fullmessage = {}
for match in matches:
if qk in match:
elastalert_logger.info(
'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])))
alerts.append(
'1)Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))
)
fullmessage['match'] = match[qk]
else:
elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])))
alerts.append(
'2)Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))
)
fullmessage['match'] = lookup_es_key(match, self.rule['timestamp_field'])
elastalert_logger.info(unicode(BasicMatchString(self.rule, match)))
fullmessage['alerts'] = alerts
fullmessage['rule'] = self.rule['name']
fullmessage['matching'] = unicode(BasicMatchString(self.rule, match))
fullmessage['alertDate'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
fullmessage['body'] = self.create_alert_body(matches)
self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost')
self.stomp_hostport = self.rule.get('stomp_hostport', '61613')
self.stomp_login = self.rule.get('stomp_login', 'admin')
self.stomp_password = self.rule.get('stomp_password', 'admin')
self.stomp_destination = self.rule.get('stomp_destination', '/queue/ALERT')
conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)])
conn.start()
conn.connect(self.stomp_login, self.stomp_password)
conn.send(self.stomp_destination, json.dumps(fullmessage))
conn.disconnect()
def get_info(self):
return {'type': 'stomp'}
class DebugAlerter(Alerter):
""" The debug alerter uses a Python logger (by default, alerting to terminal). """
def alert(self, matches):
qk = self.rule.get('query_key', None)
for match in matches:
if qk in match:
elastalert_logger.info(
'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])))
else:
elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])))
elastalert_logger.info(unicode(BasicMatchString(self.rule, match)))
def get_info(self):
return {'type': 'debug'}
class EmailAlerter(Alerter):
""" Sends an email alert """
required_options = frozenset(['email'])
def __init__(self, *args):
super(EmailAlerter, self).__init__(*args)
self.smtp_host = self.rule.get('smtp_host', 'localhost')
self.smtp_ssl = self.rule.get('smtp_ssl', False)
self.from_addr = self.rule.get('from_addr', 'ElastAlert')
self.smtp_port = self.rule.get('smtp_port')
if self.rule.get('smtp_auth_file'):
self.get_account(self.rule['smtp_auth_file'])
self.smtp_key_file = self.rule.get('smtp_key_file')
self.smtp_cert_file = self.rule.get('smtp_cert_file')
# Convert email to a list if it isn't already
if isinstance(self.rule['email'], basestring):
self.rule['email'] = [self.rule['email']]
# If there is a cc then also convert it a list if it isn't
cc = self.rule.get('cc')
if cc and isinstance(cc, basestring):
self.rule['cc'] = [self.rule['cc']]
# If there is a bcc then also convert it to a list if it isn't
bcc = self.rule.get('bcc')
if bcc and isinstance(bcc, basestring):
self.rule['bcc'] = [self.rule['bcc']]
add_suffix = self.rule.get('email_add_domain')
if add_suffix and not add_suffix.startswith('@'):
self.rule['email_add_domain'] = '@' + add_suffix
def alert(self, matches):
body = self.create_alert_body(matches)
# Add JIRA ticket if it exists
if self.pipeline is not None and 'jira_ticket' in self.pipeline:
url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket'])
body += '\nJIRA ticket: %s' % (url)
to_addr = self.rule['email']
if 'email_from_field' in self.rule:
recipient = lookup_es_key(matches[0], self.rule['email_from_field'])
if isinstance(recipient, basestring):
if '@' in recipient:
to_addr = [recipient]
elif 'email_add_domain' in self.rule:
to_addr = [recipient + self.rule['email_add_domain']]
elif isinstance(recipient, list):
to_addr = recipient
if 'email_add_domain' in self.rule:
to_addr = [name + self.rule['email_add_domain'] for name in to_addr]
email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8')
email_msg['Subject'] = self.create_title(matches)
email_msg['To'] = ', '.join(to_addr)
email_msg['From'] = self.from_addr
email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To'])
email_msg['Date'] = formatdate()
if self.rule.get('cc'):
email_msg['CC'] = ','.join(self.rule['cc'])
to_addr = to_addr + self.rule['cc']
if self.rule.get('bcc'):
to_addr = to_addr + self.rule['bcc']
try:
if self.smtp_ssl:
if self.smtp_port:
self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)
else:
self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)
else:
if self.smtp_port:
self.smtp = SMTP(self.smtp_host, self.smtp_port)
else:
self.smtp = SMTP(self.smtp_host)
self.smtp.ehlo()
if self.smtp.has_extn('STARTTLS'):
self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)
if 'smtp_auth_file' in self.rule:
self.smtp.login(self.user, self.password)
except (SMTPException, error) as e:
raise EAException("Error connecting to SMTP host: %s" % (e))
except SMTPAuthenticationError as e:
raise EAException("SMTP username/password rejected: %s" % (e))
self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string())
self.smtp.close()
elastalert_logger.info("Sent email to %s" % (to_addr))
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
# If the rule has a query_key, add that value plus timestamp to subject
if 'query_key' in self.rule:
qk = matches[0].get(self.rule['query_key'])
if qk:
subject += ' - %s' % (qk)
return subject
def get_info(self):
return {'type': 'email',
'recipients': self.rule['email']}
class JiraAlerter(Alerter):
""" Creates a Jira ticket for each alert """
required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype'])
# Maintain a static set of built-in fields that we explicitly know how to set
# For anything else, we will do best-effort and try to set a string value
known_field_list = [
'jira_account_file',
'jira_assignee',
'jira_bump_after_inactivity',
'jira_bump_in_statuses',
'jira_bump_not_in_statuses',
'jira_bump_tickets',
'jira_component',
'jira_components',
'jira_description',
'jira_ignore_in_title',
'jira_issuetype',
'jira_label',
'jira_labels',
'jira_max_age',
'jira_priority',
'jira_project',
'jira_server',
'jira_watchers',
]
# Some built-in jira types that can be used as custom fields require special handling
# Here is a sample of one of them:
# {"id":"customfield_12807","name":"My Custom Field","custom":true,"orderable":true,"navigable":true,"searchable":true,
# "clauseNames":["cf[12807]","My Custom Field"],"schema":{"type":"array","items":"string",
# "custom":"com.atlassian.jira.plugin.system.customfieldtypes:multiselect","customId":12807}}
# There are likely others that will need to be updated on a case-by-case basis
custom_string_types_with_special_handling = [
'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes',
'com.atlassian.jira.plugin.system.customfieldtypes:multiselect',
'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons',
]
def __init__(self, rule):
super(JiraAlerter, self).__init__(rule)
self.server = self.rule['jira_server']
self.get_account(self.rule['jira_account_file'])
self.project = self.rule['jira_project']
self.issue_type = self.rule['jira_issuetype']
# We used to support only a single component. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.components = self.rule.get('jira_components', self.rule.get('jira_component'))
# We used to support only a single label. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.labels = self.rule.get('jira_labels', self.rule.get('jira_label'))
self.description = self.rule.get('jira_description', '')
self.assignee = self.rule.get('jira_assignee')
self.max_age = self.rule.get('jira_max_age', 30)
self.priority = self.rule.get('jira_priority')
self.bump_tickets = self.rule.get('jira_bump_tickets', False)
self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses')
self.bump_in_statuses = self.rule.get('jira_bump_in_statuses')
self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', self.max_age)
self.watchers = self.rule.get('jira_watchers')
if self.bump_in_statuses and self.bump_not_in_statuses:
msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.' % \
(','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses))
intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses))
if intersection:
msg = '%s Both have common statuses of (%s). As such, no tickets will ever be found.' % (
msg, ','.join(intersection))
msg += ' This should be simplified to use only one or the other.'
logging.warning(msg)
self.jira_args = {'project': {'key': self.project},
'issuetype': {'name': self.issue_type}}
if self.components:
# Support single component or list
if type(self.components) != list:
self.jira_args['components'] = [{'name': self.components}]
else:
self.jira_args['components'] = [{'name': component} for component in self.components]
if self.labels:
# Support single label or list
if type(self.labels) != list:
self.labels = [self.labels]
self.jira_args['labels'] = self.labels
if self.watchers:
# Support single watcher or list
if type(self.watchers) != list:
self.watchers = [self.watchers]
if self.assignee:
self.jira_args['assignee'] = {'name': self.assignee}
try:
self.client = JIRA(self.server, basic_auth=(self.user, self.password))
self.get_priorities()
self.get_arbitrary_fields()
except JIRAError as e:
# JIRAError may contain HTML, pass along only first 1024 chars
raise EAException("Error connecting to JIRA: %s" % (str(e)[:1024]))
try:
if self.priority is not None:
self.jira_args['priority'] = {'id': self.priority_ids[self.priority]}
except KeyError:
logging.error("Priority %s not found. Valid priorities are %s" % (self.priority, self.priority_ids.keys()))
def get_arbitrary_fields(self):
# This API returns metadata about all the fields defined on the jira server (built-ins and custom ones)
fields = self.client.fields()
for jira_field, value in self.rule.iteritems():
# If we find a field that is not covered by the set that we are aware of, it means it is either:
# 1. A built-in supported field in JIRA that we don't have on our radar
# 2. A custom field that a JIRA admin has configured
if jira_field.startswith('jira_') and jira_field not in self.known_field_list:
# Remove the jira_ part. Convert underscores to spaces
normalized_jira_field = jira_field[5:].replace('_', ' ').lower()
# All jira fields should be found in the 'id' or the 'name' field. Therefore, try both just in case
for identifier in ['name', 'id']:
field = next((f for f in fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None)
if field:
break
if not field:
# Log a warning to ElastAlert saying that we couldn't find that type?
# OR raise and fail to load the alert entirely? Probably the latter...
raise Exception("Could not find a definition for the jira field '{0}'".format(normalized_jira_field))
arg_name = field['id']
# Check the schema information to decide how to set the value correctly
# If the schema information is not available, raise an exception since we don't know how to set it
# Note this is only the case for two built-in types, id: issuekey and id: thumbnail
if not ('schema' in field or 'type' in field['schema']):
raise Exception("Could not determine schema information for the jira field '{0}'".format(normalized_jira_field))
arg_type = field['schema']['type']
# Handle arrays of simple types like strings or numbers
if arg_type == 'array':
# As a convenience, support the scenario wherein the user only provides
# a single value for a multi-value field e.g. jira_labels: Only_One_Label
if type(value) != list:
value = [value]
array_items = field['schema']['items']
# Simple string types
if array_items in ['string', 'date', 'datetime']:
# Special case for multi-select custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling:
self.jira_args[arg_name] = [{'value': v} for v in value]
else:
self.jira_args[arg_name] = value
elif array_items == 'number':
self.jira_args[arg_name] = [int(v) for v in value]
# Also attempt to handle arrays of complex types that have to be passed as objects with an identifier 'key'
elif array_items == 'option':
self.jira_args[arg_name] = [{'value': v} for v in value]
else:
# Try setting it as an object, using 'name' as the key
# This may not work, as the key might actually be 'key', 'id', 'value', or something else
# If it works, great! If not, it will manifest itself as an API error that will bubble up
self.jira_args[arg_name] = [{'name': v} for v in value]
# Handle non-array types
else:
# Simple string types
if arg_type in ['string', 'date', 'datetime']:
# Special case for custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling:
self.jira_args[arg_name] = {'value': value}
else:
self.jira_args[arg_name] = value
# Number type
elif arg_type == 'number':
self.jira_args[arg_name] = int(value)
elif arg_type == 'option':
self.jira_args[arg_name] = {'value': value}
# Complex type
else:
self.jira_args[arg_name] = {'name': value}
def get_priorities(self):
""" Creates a mapping of priority index to id. """
priorities = self.client.priorities()
self.priority_ids = {}
for x in range(len(priorities)):
self.priority_ids[x] = priorities[x].id
def set_assignee(self, assignee):
self.assignee = assignee
if assignee:
self.jira_args['assignee'] = {'name': assignee}
elif 'assignee' in self.jira_args:
self.jira_args.pop('assignee')
def find_existing_ticket(self, matches):
# Default title, get stripped search version
if 'alert_subject' not in self.rule:
title = self.create_default_title(matches, True)
else:
title = self.create_title(matches)
if 'jira_ignore_in_title' in self.rule:
title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '')
# This is necessary for search to work. Other special characters and dashes
# directly adjacent to words appear to be ok
title = title.replace(' - ', ' ')
title = title.replace('\\', '\\\\')
date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d')
jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date)
if self.bump_in_statuses:
jql = '%s and status in (%s)' % (jql, ','.join(self.bump_in_statuses))
if self.bump_not_in_statuses:
jql = '%s and status not in (%s)' % (jql, ','.join(self.bump_not_in_statuses))
try:
issues = self.client.search_issues(jql)
except JIRAError as e:
logging.exception("Error while searching for JIRA ticket using jql '%s': %s" % (jql, e))
return None
if len(issues):
return issues[0]
def comment_on_ticket(self, ticket, match):
text = unicode(JiraFormattedMatchString(self.rule, match))
timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field']))
comment = "This alert was triggered again at %s\n%s" % (timestamp, text)
self.client.add_comment(ticket, comment)
def alert(self, matches):
title = self.create_title(matches)
if self.bump_tickets:
ticket = self.find_existing_ticket(matches)
if ticket:
inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity)
if ts_to_dt(ticket.fields.updated) >= inactivity_datetime:
if self.pipeline is not None:
self.pipeline['jira_ticket'] = None
self.pipeline['jira_server'] = self.server
return None
elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key))
for match in matches:
try:
self.comment_on_ticket(ticket, match)
except JIRAError as e:
logging.exception("Error while commenting on ticket %s: %s" % (ticket, e))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = ticket
self.pipeline['jira_server'] = self.server
return None
self.jira_args['summary'] = title
self.jira_args['description'] = self.create_alert_body(matches)
try:
self.issue = self.client.create_issue(**self.jira_args)
# You can not add watchers on initial creation. Only as a follow-up action
if self.watchers:
for watcher in self.watchers:
try:
self.client.add_watcher(self.issue.key, watcher)
except Exception as ex:
# Re-raise the exception, preserve the stack-trace, and give some
# context as to which watcher failed to be added
raise Exception(
"Exception encountered when trying to add '{0}' as a watcher. Does the user exist?\n{1}" .format(
watcher,
ex
)), None, sys.exc_info()[2]
except JIRAError as e:
raise EAException("Error creating JIRA ticket using jira_args (%s): %s" % (self.jira_args, e))
elastalert_logger.info("Opened Jira ticket: %s" % (self.issue))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = self.issue
self.pipeline['jira_server'] = self.server
def create_alert_body(self, matches):
body = self.description + '\n'
body += self.get_aggregation_summary_text(matches)
for match in matches:
body += unicode(JiraFormattedMatchString(self.rule, match))
if len(matches) > 1:
body += '\n----------------------------------------\n'
return body
def get_aggregation_summary_text(self, matches):
text = super(JiraAlerter, self).get_aggregation_summary_text(matches)
if text:
text = u'{{noformat}}{0}{{noformat}}'.format(text)
return text
def create_default_title(self, matches, for_search=False):
# If there is a query_key, use that in the title
if 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']):
title = 'ElastAlert: %s matched %s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name'])
else:
title = 'ElastAlert: %s' % (self.rule['name'])
if for_search:
return title
title += ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time')))
# Add count for spikes
count = matches[0].get('spike_count')
if count:
title += ' - %s+ events' % (count)
return title
def get_info(self):
return {'type': 'jira'}
class CommandAlerter(Alerter):
required_options = set(['command'])
def __init__(self, *args):
super(CommandAlerter, self).__init__(*args)
self.last_command = []
self.shell = False
if isinstance(self.rule['command'], basestring):
self.shell = True
if '%' in self.rule['command']:
logging.warning('Warning! You could be vulnerable to shell injection!')
self.rule['command'] = [self.rule['command']]
self.new_style_string_format = False
if 'new_style_string_format' in self.rule and self.rule['new_style_string_format']:
self.new_style_string_format = True
def alert(self, matches):
# Format the command and arguments
try:
if self.new_style_string_format:
command = [command_arg.format(match=matches[0]) for command_arg in self.rule['command']]
else:
command = [command_arg % matches[0] for command_arg in self.rule['command']]
self.last_command = command
except KeyError as e:
raise EAException("Error formatting command: %s" % (e))
# Run command and pipe data
try:
subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell)
if self.rule.get('pipe_match_json'):
match_json = json.dumps(matches, cls=DateTimeEncoder) + '\n'
stdout, stderr = subp.communicate(input=match_json)
if self.rule.get("fail_on_non_zero_exit", False) and subp.wait():
raise EAException("Non-zero exit code while running command %s" % (' '.join(command)))
except OSError as e:
raise EAException("Error while running command %s: %s" % (' '.join(command), e))
def get_info(self):
return {'type': 'command',
'command': ' '.join(self.last_command)}
class SnsAlerter(Alerter):
""" Send alert using AWS SNS service """
required_options = frozenset(['sns_topic_arn'])
def __init__(self, *args):
super(SnsAlerter, self).__init__(*args)
self.sns_topic_arn = self.rule.get('sns_topic_arn', '')
self.aws_access_key_id = self.rule.get('aws_access_key_id')
self.aws_secret_access_key = self.rule.get('aws_secret_access_key')
self.aws_region = self.rule.get('aws_region', 'us-east-1')
self.profile = self.rule.get('boto_profile', None) # Deprecated
self.profile = self.rule.get('aws_profile', None)
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
return subject
def alert(self, matches):
body = self.create_alert_body(matches)
session = boto3.Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.aws_region,
profile_name=self.profile
)
sns_client = session.client('sns')
sns_client.publish(
TopicArn=self.sns_topic_arn,
Message=body,
Subject=self.create_title(matches)
)
elastalert_logger.info("Sent sns notification to %s" % (self.sns_topic_arn))
class HipChatAlerter(Alerter):
""" Creates a HipChat room notification for each alert """
required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id'])
def __init__(self, rule):
super(HipChatAlerter, self).__init__(rule)
self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red')
self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html')
self.hipchat_auth_token = self.rule['hipchat_auth_token']
self.hipchat_room_id = self.rule['hipchat_room_id']
self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com')
self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False)
self.hipchat_notify = self.rule.get('hipchat_notify', True)
self.hipchat_from = self.rule.get('hipchat_from', '')
self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' % (
self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token)
self.hipchat_proxy = self.rule.get('hipchat_proxy', None)
def alert(self, matches):
body = self.create_alert_body(matches)
# HipChat sends 400 bad request on messages longer than 10000 characters
if (len(body) > 9999):
body = body[:9980] + '..(truncated)'
# Use appropriate line ending for text/html
if self.hipchat_message_format == 'html':
body = body.replace('\n', '<br />')
# Post to HipChat
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.hipchat_proxy} if self.hipchat_proxy else None
payload = {
'color': self.hipchat_msg_color,
'message': body,
'message_format': self.hipchat_message_format,
'notify': self.hipchat_notify,
'from': self.hipchat_from
}
try:
if self.hipchat_ignore_ssl_errors:
requests.packages.urllib3.disable_warnings()
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers,
verify=not self.hipchat_ignore_ssl_errors,
proxies=proxies)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to HipChat: %s" % e)
elastalert_logger.info("Alert sent to HipChat room %s" % self.hipchat_room_id)
def get_info(self):
return {'type': 'hipchat',
'hipchat_room_id': self.hipchat_room_id}
class MsTeamsAlerter(Alerter):
""" Creates a Microsoft Teams Conversation Message for each alert """
required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary'])
def __init__(self, rule):
super(MsTeamsAlerter, self).__init__(rule)
self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url']
if isinstance(self.ms_teams_webhook_url, basestring):
self.ms_teams_webhook_url = [self.ms_teams_webhook_url]
self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None)
self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert Message')
self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False)
self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '')
def format_body(self, body):
body = body.encode('UTF-8')
if self.ms_teams_alert_fixed_width:
body = body.replace('`', "'")
body = "```{0}```".format('```\n\n```'.join(x for x in body.split('\n'))).replace('\n``````', '')
return body
def alert(self, matches):
body = self.create_alert_body(matches)
body = self.format_body(body)
# post to Teams
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.ms_teams_proxy} if self.ms_teams_proxy else None
payload = {
'@type': 'MessageCard',
'@context': 'http://schema.org/extensions',
'summary': self.ms_teams_alert_summary,
'title': self.create_title(matches),
'text': body
}
if self.ms_teams_theme_color != '':
payload['themeColor'] = self.ms_teams_theme_color
for url in self.ms_teams_webhook_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to ms teams: %s" % e)
elastalert_logger.info("Alert sent to MS Teams")
def get_info(self):
return {'type': 'ms_teams',
'ms_teams_webhook_url': self.ms_teams_webhook_url}
class SlackAlerter(Alerter):
""" Creates a Slack room message for each alert """
required_options = frozenset(['slack_webhook_url'])
def __init__(self, rule):
super(SlackAlerter, self).__init__(rule)
self.slack_webhook_url = self.rule['slack_webhook_url']
if isinstance(self.slack_webhook_url, basestring):
self.slack_webhook_url = [self.slack_webhook_url]
self.slack_proxy = self.rule.get('slack_proxy', None)
self.slack_username_override = self.rule.get('slack_username_override', 'elastalert')
self.slack_channel_override = self.rule.get('slack_channel_override', '')
self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:')
self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '')
self.slack_msg_color = self.rule.get('slack_msg_color', 'danger')
self.slack_parse_override = self.rule.get('slack_parse_override', 'none')
self.slack_text_string = self.rule.get('slack_text_string', '')
def format_body(self, body):
# https://api.slack.com/docs/formatting
body = body.encode('UTF-8')
body = body.replace('&', '&')
body = body.replace('<', '<')
body = body.replace('>', '>')
return body
def alert(self, matches):
body = self.create_alert_body(matches)
body = self.format_body(body)
# post to slack
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.slack_proxy} if self.slack_proxy else None
payload = {
'username': self.slack_username_override,
'channel': self.slack_channel_override,
'parse': self.slack_parse_override,
'text': self.slack_text_string,
'attachments': [
{
'color': self.slack_msg_color,
'title': self.create_title(matches),
'text': body,
'mrkdwn_in': ['text', 'pretext'],
'fields': []
}
]
}
if self.slack_icon_url_override != '':
payload['icon_url'] = self.slack_icon_url_override
else:
payload['icon_emoji'] = self.slack_emoji_override
for url in self.slack_webhook_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to slack: %s" % e)
elastalert_logger.info("Alert sent to Slack")
def get_info(self):
return {'type': 'slack',
'slack_username_override': self.slack_username_override,
'slack_webhook_url': self.slack_webhook_url}
class PagerDutyAlerter(Alerter):
""" Create an incident on PagerDuty for each alert """
required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name'])
def __init__(self, rule):
super(PagerDutyAlerter, self).__init__(rule)
self.pagerduty_service_key = self.rule['pagerduty_service_key']
self.pagerduty_client_name = self.rule['pagerduty_client_name']
self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '')
self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None)
self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None)
self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json'
def alert(self, matches):
body = self.create_alert_body(matches)
# post to pagerduty
headers = {'content-type': 'application/json'}
payload = {
'service_key': self.pagerduty_service_key,
'description': self.create_title(matches),
'event_type': 'trigger',
'incident_key': self.get_incident_key(matches),
'client': self.pagerduty_client_name,
'details': {
"information": body.encode('UTF-8'),
},
}
# set https proxy, if it was provided
proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None
try:
response = requests.post(
self.url,
data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False),
headers=headers,
proxies=proxies
)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to pagerduty: %s" % e)
elastalert_logger.info("Trigger sent to PagerDuty")
def get_incident_key(self, matches):
if self.pagerduty_incident_key_args:
incident_key_values = [lookup_es_key(matches[0], arg) for arg in self.pagerduty_incident_key_args]
# Populate values with rule level properties too
for i in range(len(incident_key_values)):
if incident_key_values[i] is None:
key_value = self.rule.get(self.pagerduty_incident_key_args[i])
if key_value:
incident_key_values[i] = key_value
incident_key_values = ['<MISSING VALUE>' if val is None else val for val in incident_key_values]
return self.pagerduty_incident_key.format(*incident_key_values)
else:
return self.pagerduty_incident_key
def get_info(self):
return {'type': 'pagerduty',
'pagerduty_client_name': self.pagerduty_client_name}
class ExotelAlerter(Alerter):
required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number'])
def __init__(self, rule):
super(ExotelAlerter, self).__init__(rule)
self.exotel_account_sid = self.rule['exotel_account_sid']
self.exotel_auth_token = self.rule['exotel_auth_token']
self.exotel_to_number = self.rule['exotel_to_number']
self.exotel_from_number = self.rule['exotel_from_number']
self.sms_body = self.rule.get('exotel_message_body', '')
def alert(self, matches):
client = Exotel(self.exotel_account_sid, self.exotel_auth_token)
try:
message_body = self.rule['name'] + self.sms_body
response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body)
if response != 200:
raise EAException("Error posting to Exotel, response code is %s" % response)
except:
raise EAException("Error posting to Exotel"), None, sys.exc_info()[2]
elastalert_logger.info("Trigger sent to Exotel")
def get_info(self):
return {'type': 'exotel', 'exotel_account': self.exotel_account_sid}
class TwilioAlerter(Alerter):
required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number'])
def __init__(self, rule):
super(TwilioAlerter, self).__init__(rule)
self.twilio_account_sid = self.rule['twilio_account_sid']
self.twilio_auth_token = self.rule['twilio_auth_token']
self.twilio_to_number = self.rule['twilio_to_number']
self.twilio_from_number = self.rule['twilio_from_number']
def alert(self, matches):
client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token)
try:
client.messages.create(body=self.rule['name'],
to=self.twilio_to_number,
from_=self.twilio_from_number)
except TwilioRestException as e:
raise EAException("Error posting to twilio: %s" % e)
elastalert_logger.info("Trigger sent to Twilio")
def get_info(self):
return {'type': 'twilio',
'twilio_client_name': self.twilio_from_number}
class VictorOpsAlerter(Alerter):
""" Creates a VictorOps Incident for each alert """
required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type'])
def __init__(self, rule):
super(VictorOpsAlerter, self).__init__(rule)
self.victorops_api_key = self.rule['victorops_api_key']
self.victorops_routing_key = self.rule['victorops_routing_key']
self.victorops_message_type = self.rule['victorops_message_type']
self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display name')
self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % (
self.victorops_api_key, self.victorops_routing_key)
self.victorops_proxy = self.rule.get('victorops_proxy', None)
def alert(self, matches):
body = self.create_alert_body(matches)
# post to victorops
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.victorops_proxy} if self.victorops_proxy else None
payload = {
"message_type": self.victorops_message_type,
"entity_display_name": self.victorops_entity_display_name,
"monitoring_tool": "ElastAlert",
"state_message": body
}
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to VictorOps: %s" % e)
elastalert_logger.info("Trigger sent to VictorOps")
def get_info(self):
return {'type': 'victorops',
'victorops_routing_key': self.victorops_routing_key}
class TelegramAlerter(Alerter):
""" Send a Telegram message via bot api for each alert """
required_options = frozenset(['telegram_bot_token', 'telegram_room_id'])
def __init__(self, rule):
super(TelegramAlerter, self).__init__(rule)
self.telegram_bot_token = self.rule['telegram_bot_token']
self.telegram_room_id = self.rule['telegram_room_id']
self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org')
self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, "sendMessage")
self.telegram_proxy = self.rule.get('telegram_proxy', None)
def alert(self, matches):
body = u'⚠ *%s* ⚠ ```\n' % (self.create_title(matches))
for match in matches:
body += unicode(BasicMatchString(self.rule, match))
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
body += u' ```'
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None
payload = {
'chat_id': self.telegram_room_id,
'text': body,
'parse_mode': 'markdown',
'disable_web_page_preview': True
}
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Telegram: %s" % e)
elastalert_logger.info(
"Alert sent to Telegram room %s" % self.telegram_room_id)
def get_info(self):
return {'type': 'telegram',
'telegram_room_id': self.telegram_room_id}
class GitterAlerter(Alerter):
""" Creates a Gitter activity message for each alert """
required_options = frozenset(['gitter_webhook_url'])
def __init__(self, rule):
super(GitterAlerter, self).__init__(rule)
self.gitter_webhook_url = self.rule['gitter_webhook_url']
self.gitter_proxy = self.rule.get('gitter_proxy', None)
self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error')
def alert(self, matches):
body = self.create_alert_body(matches)
# post to Gitter
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None
payload = {
'message': body,
'level': self.gitter_msg_level
}
try:
response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Gitter: %s" % e)
elastalert_logger.info("Alert sent to Gitter")
def get_info(self):
return {'type': 'gitter',
'gitter_webhook_url': self.gitter_webhook_url}
class ServiceNowAlerter(Alerter):
""" Creates a ServiceNow alert """
required_options = set([
'username',
'password',
'servicenow_rest_url',
'short_description',
'comments',
'assignment_group',
'category',
'subcategory',
'cmdb_ci',
'caller_id'
])
def __init__(self, rule):
super(ServiceNowAlerter, self).__init__(rule)
self.servicenow_rest_url = self.rule['servicenow_rest_url']
self.servicenow_proxy = self.rule.get('servicenow_proxy', None)
def alert(self, matches):
for match in matches:
# Parse everything into description.
description = str(BasicMatchString(self.rule, match))
# Set proper headers
headers = {
"Content-Type": "application/json",
"Accept": "application/json;charset=utf-8"
}
proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None
payload = {
"description": description,
"short_description": self.rule['short_description'],
"comments": self.rule['comments'],
"assignment_group": self.rule['assignment_group'],
"category": self.rule['category'],
"subcategory": self.rule['subcategory'],
"cmdb_ci": self.rule['cmdb_ci'],
"caller_id": self.rule["caller_id"]
}
try:
response = requests.post(
self.servicenow_rest_url,
auth=(self.rule['username'], self.rule['password']),
headers=headers,
data=json.dumps(payload, cls=DateTimeEncoder),
proxies=proxies
)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to ServiceNow: %s" % e)
elastalert_logger.info("Alert sent to ServiceNow")
def get_info(self):
return {'type': 'ServiceNow',
'self.servicenow_rest_url': self.servicenow_rest_url}
class HTTPPostAlerter(Alerter):
""" Requested elasticsearch indices are sent by HTTP POST. Encoded with JSON. """
def __init__(self, rule):
super(HTTPPostAlerter, self).__init__(rule)
post_url = self.rule.get('http_post_url')
if isinstance(post_url, basestring):
post_url = [post_url]
self.post_url = post_url
self.post_proxy = self.rule.get('http_post_proxy')
self.post_payload = self.rule.get('http_post_payload', {})
self.post_static_payload = self.rule.get('http_post_static_payload', {})
self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload)
def alert(self, matches):
""" Each match will trigger a POST to the specified endpoint(s). """
for match in matches:
payload = match if self.post_all_values else {}
payload.update(self.post_static_payload)
for post_key, es_key in self.post_payload.items():
payload[post_key] = lookup_es_key(match, es_key)
headers = {
"Content-Type": "application/json",
"Accept": "application/json;charset=utf-8"
}
proxies = {'https': self.post_proxy} if self.post_proxy else None
for url in self.post_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder),
headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting HTTP Post alert: %s" % e)
elastalert_logger.info("HTTP Post alert sent.")
def get_info(self):
return {'type': 'http_post',
'http_post_webhook_url': self.post_url}
| rprabhat/elastalert | elastalert/alerts.py | Python | apache-2.0 | 60,947 | 0.00274 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from unittest import TestCase
import mock
from preggy import expect
import thumbor.server
from tests.fixtures.custom_error_handler import ErrorHandler as CustomErrorHandler
from thumbor.app import ThumborServiceApp
from thumbor.config import Config
from thumbor.server import (
configure_log,
get_application,
get_as_integer,
get_config,
get_context,
get_importer,
main,
run_server,
validate_config,
)
class ServerTestCase(TestCase):
def test_can_get_value_as_integer(self):
expect(get_as_integer("1")).to_equal(1)
expect(get_as_integer("a")).to_be_null()
expect(get_as_integer("")).to_be_null()
expect(get_as_integer(None)).to_be_null()
def test_can_get_config_from_path(self):
config = get_config("./tests/fixtures/thumbor_config_server_test.conf")
with mock.patch.dict("os.environ", {"ENGINE": "test"}):
expect(config).not_to_be_null()
expect(config.ALLOWED_SOURCES).to_be_like(["mydomain.com"])
expect(config.ENGINE).to_be_like("thumbor.engines.pil")
def test_can_get_config_with_env_enabled(self):
config = get_config("./tests/fixtures/thumbor_config_server_test.conf", True)
with mock.patch.dict("os.environ", {"ENGINE": "test"}):
expect(config).not_to_be_null()
expect(config.ALLOWED_SOURCES).to_be_like(["mydomain.com"])
expect(config.ENGINE).to_be_like("test")
@mock.patch("logging.basicConfig")
def test_can_configure_log_from_config(self, basic_config_mock):
conf = Config()
configure_log(conf, "DEBUG")
params = dict(
datefmt="%Y-%m-%d %H:%M:%S",
level=10,
format="%(asctime)s %(name)s:%(levelname)s %(message)s",
)
basic_config_mock.assert_called_with(**params)
@mock.patch("logging.config.dictConfig")
def test_can_configure_log_from_dict_config(self, dict_config_mock):
conf = Config(THUMBOR_LOG_CONFIG={"level": "INFO"})
configure_log(conf, "DEBUG")
params = dict(level="INFO",)
dict_config_mock.assert_called_with(params)
def test_can_import_default_modules(self):
conf = Config()
importer = get_importer(conf)
expect(importer).not_to_be_null()
expect(importer.filters).not_to_be_empty()
def test_can_import_with_custom_error_handler_class(self):
conf = Config(
USE_CUSTOM_ERROR_HANDLING=True,
ERROR_HANDLER_MODULE="tests.fixtures.custom_error_handler",
)
importer = get_importer(conf)
expect(importer).not_to_be_null()
expect(importer.error_handler_class).not_to_be_null()
expect(importer.error_handler_class).to_be_instance_of(CustomErrorHandler)
def test_validate_config_security_key(self):
server_parameters = mock.Mock(security_key=None)
conf = Config(SECURITY_KEY=None)
with expect.error_to_happen(
RuntimeError,
message="No security key was found for this instance of thumbor. "
"Please provide one using the conf file or a security key file.",
):
validate_config(conf, server_parameters)
def test_validate_config_security_key_from_config(self):
server_parameters = mock.Mock(security_key=None)
conf = Config(SECURITY_KEY="something")
validate_config(conf, server_parameters)
expect(server_parameters.security_key).to_equal("something")
@mock.patch.object(thumbor.server, "which")
def test_validate_gifsicle_path(self, which_mock):
server_parameters = mock.Mock(security_key=None)
conf = Config(SECURITY_KEY="test", USE_GIFSICLE_ENGINE=True)
which_mock.return_value = "/usr/bin/gifsicle"
validate_config(conf, server_parameters)
expect(server_parameters.gifsicle_path).to_equal("/usr/bin/gifsicle")
@mock.patch.object(thumbor.server, "which")
def test_validate_null_gifsicle_path(self, which_mock):
server_parameters = mock.Mock(security_key=None)
conf = Config(SECURITY_KEY="test", USE_GIFSICLE_ENGINE=True)
which_mock.return_value = None
with expect.error_to_happen(
RuntimeError,
message="If using USE_GIFSICLE_ENGINE configuration to True, "
"the `gifsicle` binary must be in the PATH and must be an executable.",
):
validate_config(conf, server_parameters)
def test_get_context(self):
server_parameters = mock.Mock(
security_key=None, app_class="thumbor.app.ThumborServiceApp"
)
conf = Config(SECURITY_KEY="test")
importer = get_importer(conf)
context = get_context(server_parameters, conf, importer)
expect(context).not_to_be_null()
def test_get_application(self):
server_parameters = mock.Mock(
security_key=None, app_class="thumbor.app.ThumborServiceApp"
)
conf = Config(SECURITY_KEY="test")
importer = get_importer(conf)
context = get_context(server_parameters, conf, importer)
app = get_application(context)
expect(app).not_to_be_null()
expect(app).to_be_instance_of(ThumborServiceApp)
@mock.patch.object(thumbor.server, "HTTPServer")
def test_can_run_server_with_default_params(self, server_mock):
application = mock.Mock()
context = mock.Mock()
context.server = mock.Mock(fd=None, port=1234, ip="0.0.0.0", processes=1)
server_instance_mock = mock.Mock()
server_mock.return_value = server_instance_mock
run_server(application, context)
server_instance_mock.bind.assert_called_with(1234, "0.0.0.0")
server_instance_mock.start.assert_called_with(1)
@mock.patch.object(thumbor.server, "HTTPServer")
def test_can_run_server_with_multiple_processes(self, server_mock):
application = mock.Mock()
context = mock.Mock()
context.server = mock.Mock(fd=None, port=1234, ip="0.0.0.0", processes=5)
server_instance_mock = mock.Mock()
server_mock.return_value = server_instance_mock
run_server(application, context)
server_instance_mock.start.assert_called_with(5)
@mock.patch.object(thumbor.server, "HTTPServer")
@mock.patch.object(thumbor.server, "socket_from_fd")
def test_can_run_server_with_fd(self, socket_from_fd_mock, server_mock):
application = mock.Mock()
context = mock.Mock()
context.server = mock.Mock(fd=11, port=1234, ip="0.0.0.0", processes=1)
server_instance_mock = mock.Mock()
server_mock.return_value = server_instance_mock
socket_from_fd_mock.return_value = "socket mock"
run_server(application, context)
server_instance_mock.add_socket.assert_called_with("socket mock")
server_instance_mock.start.assert_called_with(1)
@mock.patch.object(thumbor.server, "HTTPServer")
@mock.patch.object(thumbor.server, "bind_unix_socket")
def test_can_run_server_with_unix_socket(self, bind_unix_socket, server_mock):
application = mock.Mock()
context = mock.Mock()
context.server = mock.Mock(fd="/path/bin", port=1234, ip="0.0.0.0", processes=1)
server_instance_mock = mock.Mock()
server_mock.return_value = server_instance_mock
bind_unix_socket.return_value = "socket mock"
run_server(application, context)
bind_unix_socket.assert_called_with("/path/bin")
server_instance_mock.add_socket.assert_called_with("socket mock")
server_instance_mock.start.assert_called_with(1)
@mock.patch.object(thumbor.server, "HTTPServer")
def test_run_server_returns_server(self, server_mock):
application = mock.Mock()
context = mock.Mock()
context.server = mock.Mock(fd=None, port=1234, ip="0.0.0.0")
server_instance_mock = mock.Mock()
server_mock.return_value = server_instance_mock
server = run_server(application, context)
self.assertEqual(server, server_instance_mock)
@mock.patch.object(thumbor.server, "setup_signal_handler")
@mock.patch.object(thumbor.server, "HTTPServer")
@mock.patch.object(thumbor.server, "get_server_parameters")
@mock.patch("tornado.ioloop.IOLoop.instance", create=True)
def test_can_run_main(
self,
ioloop_mock,
get_server_parameters_mock,
server_mock,
setup_signal_handler_mock,
):
server_parameters = mock.Mock(
config_path="./tests/fixtures/thumbor_config_server_test.conf",
log_level="DEBUG",
debug=False,
security_key="sec",
app_class="thumbor.app.ThumborServiceApp",
fd=None,
ip="0.0.0.0",
port=1234,
)
get_server_parameters_mock.return_value = server_parameters
ioloop_instance_mock = mock.Mock()
ioloop_mock.return_value = ioloop_instance_mock
main()
ioloop_instance_mock.start.assert_any_call()
self.assertTrue(setup_signal_handler_mock.called)
self.assertTrue(server_mock.called)
def cleanup(self):
ServerTestCase.cleanup_called = True
| gi11es/thumbor | tests/test_server.py | Python | mit | 9,543 | 0.000838 |
# -*- coding: utf-8 -*-
import webview
"""
This example demonstrates how to localize GUI strings used by pywebview.
"""
if __name__ == '__main__':
localization = {
'global.saveFile': u'Сохранить файл',
'cocoa.menu.about': u'О программе',
'cocoa.menu.services': u'Cлужбы',
'cocoa.menu.view': u'Вид',
'cocoa.menu.hide': u'Скрыть',
'cocoa.menu.hideOthers': u'Скрыть остальные',
'cocoa.menu.showAll': u'Показать все',
'cocoa.menu.quit': u'Завершить',
'cocoa.menu.fullscreen': u'Перейти ',
'windows.fileFilter.allFiles': u'Все файлы',
'windows.fileFilter.otherFiles': u'Остальлные файльы',
'linux.openFile': u'Открыть файл',
'linux.openFiles': u'Открыть файлы',
'linux.openFolder': u'Открыть папку',
}
window_localization_override = {
'global.saveFile': u'Save file',
}
webview.create_window(
'Localization Example',
'https://pywebview.flowrl.com/hello',
localization=window_localization_override,
)
webview.start(localization=localization)
| r0x0r/pywebview | examples/localization.py | Python | bsd-3-clause | 1,251 | 0 |
from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt
from PyQt5.QtWidgets import QDialog
from urh.signalprocessing.Filter import Filter, FilterType
from urh.ui.ui_filter_dialog import Ui_FilterDialog
class FilterDialog(QDialog):
filter_accepted = pyqtSignal(Filter)
def __init__(self, dsp_filter: Filter, parent=None):
super().__init__(parent)
self.ui = Ui_FilterDialog()
self.ui.setupUi(self)
self.setWindowFlags(Qt.Window)
self.error_message = ""
self.set_dsp_filter_status(dsp_filter.filter_type)
self.create_connects()
def set_dsp_filter_status(self, dsp_filter_type: FilterType):
if dsp_filter_type == FilterType.moving_average:
self.ui.radioButtonMovingAverage.setChecked(True)
self.ui.lineEditCustomTaps.setEnabled(False)
self.ui.spinBoxNumTaps.setEnabled(True)
elif dsp_filter_type == FilterType.dc_correction:
self.ui.radioButtonDCcorrection.setChecked(True)
self.ui.lineEditCustomTaps.setEnabled(False)
self.ui.spinBoxNumTaps.setEnabled(False)
else:
self.ui.radioButtonCustomTaps.setChecked(True)
self.ui.spinBoxNumTaps.setEnabled(True)
self.ui.lineEditCustomTaps.setEnabled(True)
def create_connects(self):
self.ui.radioButtonMovingAverage.clicked.connect(self.on_radio_button_moving_average_clicked)
self.ui.radioButtonCustomTaps.clicked.connect(self.on_radio_button_custom_taps_clicked)
self.ui.radioButtonDCcorrection.clicked.connect(self.on_radio_button_dc_correction_clicked)
self.ui.spinBoxNumTaps.valueChanged.connect(self.set_error_status)
self.ui.lineEditCustomTaps.textEdited.connect(self.set_error_status)
self.ui.buttonBox.accepted.connect(self.on_accept_clicked)
self.ui.buttonBox.rejected.connect(self.reject)
def build_filter(self) -> Filter:
if self.ui.radioButtonMovingAverage.isChecked():
n = self.ui.spinBoxNumTaps.value()
return Filter([1/n for _ in range(n)], filter_type=FilterType.moving_average)
elif self.ui.radioButtonDCcorrection.isChecked():
return Filter([], filter_type=FilterType.dc_correction)
else:
# custom filter
try:
taps = eval(self.ui.lineEditCustomTaps.text())
try:
taps = list(map(float, taps))
self.error_message = ""
return Filter(taps)
except (ValueError, TypeError) as e:
self.error_message = "Error casting taps:\n" + str(e)
return None
except SyntaxError as e:
self.error_message = "Error parsing taps:\n" + str(e)
return None
def set_error_status(self):
dsp_filter = self.build_filter()
if dsp_filter is None:
self.ui.lineEditCustomTaps.setStyleSheet("background: red")
self.ui.lineEditCustomTaps.setToolTip(self.error_message)
elif len(dsp_filter.taps) != self.ui.spinBoxNumTaps.value():
self.ui.lineEditCustomTaps.setStyleSheet("background: yellow")
self.ui.lineEditCustomTaps.setToolTip("The number of the filter taps does not match the configured number of taps. I will use your configured filter taps.")
else:
self.ui.lineEditCustomTaps.setStyleSheet("")
self.ui.lineEditCustomTaps.setToolTip("")
@pyqtSlot(bool)
def on_radio_button_moving_average_clicked(self, checked: bool):
if checked:
self.set_dsp_filter_status(FilterType.moving_average)
@pyqtSlot(bool)
def on_radio_button_custom_taps_clicked(self, checked: bool):
if checked:
self.set_dsp_filter_status(FilterType.custom)
self.set_error_status()
@pyqtSlot(bool)
def on_radio_button_dc_correction_clicked(self, checked: bool):
if checked:
self.set_dsp_filter_status(FilterType.dc_correction)
@pyqtSlot()
def on_accept_clicked(self):
dsp_filter = self.build_filter()
self.filter_accepted.emit(dsp_filter)
self.accept()
| jopohl/urh | src/urh/controller/dialogs/FilterDialog.py | Python | gpl-3.0 | 4,227 | 0.001183 |
"""
This file should only work on Python 3.6 and newer.
Its purpose is to test a correct installation of Python 3.
"""
from random import randint
print("Generating one thousand random numbers...")
for i in range(1000):
random_number = randint(0, 100000)
print(f"Number {i} was: {random_number}")
| PhantomAppDevelopment/python-getting-started | step-1/myscript.py | Python | mit | 308 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Creates a regular polygon (triangles, pentagrams, ...)
as a special case of a :class:`~psychopy.visual.ShapeStim`'''
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from __future__ import absolute_import, print_function
from builtins import range
import psychopy # so we can get the __path__
from psychopy.visual.shape import BaseShapeStim
from psychopy.tools.attributetools import attributeSetter, setAttribute
import numpy
class Polygon(BaseShapeStim):
"""Creates a regular polygon (triangles, pentagrams, ...).
A special case of a :class:`~psychopy.visual.ShapeStim`.
(New in version 1.72.00)
"""
def __init__(self, win, edges=3, radius=.5, **kwargs):
"""Polygon accepts all input parameters that
:class:`~psychopy.visual.ShapeStim` accepts, except for
vertices and closeShape.
"""
# what local vars are defined (these are the init params) for use by
# __repr__
self._initParams = dir()
self._initParams.remove('self')
# kwargs isn't a parameter, but a list of params
self._initParams.remove('kwargs')
self._initParams.extend(kwargs)
self.autoLog = False # but will be changed if needed at end of init
self.__dict__['edges'] = edges
self.radius = numpy.asarray(radius)
self._calcVertices()
kwargs['closeShape'] = True # Make sure nobody messes around here
kwargs['vertices'] = self.vertices
super(Polygon, self).__init__(win, **kwargs)
def _calcVertices(self):
d = numpy.pi * 2 / self.edges
self.vertices = numpy.asarray(
[numpy.asarray((numpy.sin(e * d), numpy.cos(e * d))) * self.radius
for e in range(int(round(self.edges)))])
@attributeSetter
def edges(self, edges):
"""Number of edges of the polygon. Floats are rounded to int.
:ref:`Operations <attrib-operations>` supported.
"""
self.__dict__['edges'] = edges
self._calcVertices()
self.setVertices(self.vertices, log=False)
def setEdges(self, edges, operation='', log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message"""
setAttribute(self, 'edges', edges, log, operation)
@attributeSetter
def radius(self, radius):
"""float, int, tuple, list or 2x1 array
Radius of the Polygon (distance from the center to the corners).
May be a -2tuple or list to stretch the polygon asymmetrically.
:ref:`Operations <attrib-operations>` supported.
Usually there's a setAttribute(value, log=False) method for each
attribute. Use this if you want to disable logging.
"""
self.__dict__['radius'] = numpy.array(radius)
self._calcVertices()
self.setVertices(self.vertices, log=False)
def setRadius(self, radius, operation='', log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message
"""
setAttribute(self, 'radius', radius, log, operation)
| psychopy/versions | psychopy/visual/polygon.py | Python | gpl-3.0 | 3,361 | 0 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Virtual adversarial text models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
# Dependency imports
import tensorflow as tf
import adversarial_losses as adv_lib
import inputs as inputs_lib
import layers as layers_lib
flags = tf.app.flags
FLAGS = flags.FLAGS
# Flags governing adversarial training are defined in adversarial_losses.py.
# Classifier
flags.DEFINE_integer('num_classes', 2, 'Number of classes for classification')
# Data path
flags.DEFINE_string('data_dir', '/tmp/IMDB',
'Directory path to preprocessed text dataset.')
flags.DEFINE_string('vocab_freq_path', None,
'Path to pre-calculated vocab frequency data. If '
'None, use FLAGS.data_dir/vocab_freq.txt.')
flags.DEFINE_integer('batch_size', 64, 'Size of the batch.')
flags.DEFINE_integer('num_timesteps', 100, 'Number of timesteps for BPTT')
# Model architechture
flags.DEFINE_bool('bidir_lstm', False, 'Whether to build a bidirectional LSTM.')
flags.DEFINE_bool('single_label', True, 'Whether the sequence has a single '
'label, for optimization.')
flags.DEFINE_integer('rnn_num_layers', 1, 'Number of LSTM layers.')
flags.DEFINE_integer('rnn_cell_size', 512,
'Number of hidden units in the LSTM.')
flags.DEFINE_integer('cl_num_layers', 1,
'Number of hidden layers of classification model.')
flags.DEFINE_integer('cl_hidden_size', 30,
'Number of hidden units in classification layer.')
flags.DEFINE_integer('num_candidate_samples', -1,
'Num samples used in the sampled output layer.')
flags.DEFINE_bool('use_seq2seq_autoencoder', False,
'If True, seq2seq auto-encoder is used to pretrain. '
'If False, standard language model is used.')
# Vocabulary and embeddings
flags.DEFINE_integer('embedding_dims', 256, 'Dimensions of embedded vector.')
flags.DEFINE_integer('vocab_size', 86934,
'The size of the vocaburary. This value '
'should be exactly same as the number of the '
'vocabulary used in dataset. Because the last '
'indexed vocabulary of the dataset preprocessed by '
'my preprocessed code, is always <eos> and here we '
'specify the <eos> with the the index.')
flags.DEFINE_bool('normalize_embeddings', True,
'Normalize word embeddings by vocab frequency')
# Optimization
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate while fine-tuning.')
flags.DEFINE_float('learning_rate_decay_factor', 1.0,
'Learning rate decay factor')
flags.DEFINE_boolean('sync_replicas', False, 'sync_replica or not')
flags.DEFINE_integer('replicas_to_aggregate', 1,
'The number of replicas to aggregate')
# Regularization
flags.DEFINE_float('max_grad_norm', 1.0,
'Clip the global gradient norm to this value.')
flags.DEFINE_float('keep_prob_emb', 1.0, 'keep probability on embedding layer. '
'0.5 is optimal on IMDB with virtual adversarial training.')
flags.DEFINE_float('keep_prob_lstm_out', 1.0,
'keep probability on lstm output.')
flags.DEFINE_float('keep_prob_cl_hidden', 1.0,
'keep probability on classification hidden layer')
def get_model():
if FLAGS.bidir_lstm:
return VatxtBidirModel()
else:
return VatxtModel()
class VatxtModel(object):
"""Constructs training and evaluation graphs.
Main methods: `classifier_training()`, `language_model_training()`,
and `eval_graph()`.
Variable reuse is a critical part of the model, both for sharing variables
between the language model and the classifier, and for reusing variables for
the adversarial loss calculation. To ensure correct variable reuse, all
variables are created in Keras-style layers, wherein stateful layers (i.e.
layers with variables) are represented as callable instances of the Layer
class. Each time the Layer instance is called, it is using the same variables.
All Layers are constructed in the __init__ method and reused in the various
graph-building functions.
"""
def __init__(self, cl_logits_input_dim=None):
self.global_step = tf.train.get_or_create_global_step()
self.vocab_freqs = _get_vocab_freqs()
# Cache VatxtInput objects
self.cl_inputs = None
self.lm_inputs = None
# Cache intermediate Tensors that are reused
self.tensors = {}
# Construct layers which are reused in constructing the LM and
# Classification graphs. Instantiating them all once here ensures that
# variable reuse works correctly.
self.layers = {}
self.layers['embedding'] = layers_lib.Embedding(
FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings,
self.vocab_freqs, FLAGS.keep_prob_emb)
self.layers['lstm'] = layers_lib.LSTM(
FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out)
self.layers['lm_loss'] = layers_lib.SoftmaxLoss(
FLAGS.vocab_size,
FLAGS.num_candidate_samples,
self.vocab_freqs,
name='LM_loss')
cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size
self.layers['cl_logits'] = layers_lib.cl_logits_subgraph(
[FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim,
FLAGS.num_classes, FLAGS.keep_prob_cl_hidden)
@property
def pretrained_variables(self):
return (self.layers['embedding'].trainable_weights +
self.layers['lstm'].trainable_weights)
def classifier_training(self):
loss = self.classifier_graph()
train_op = optimize(loss, self.global_step)
return train_op, loss, self.global_step
def language_model_training(self):
loss = self.language_model_graph()
train_op = optimize(loss, self.global_step)
return train_op, loss, self.global_step
def classifier_graph(self):
"""Constructs classifier graph from inputs to classifier loss.
* Caches the VatxtInput object in `self.cl_inputs`
* Caches tensors: `cl_embedded`, `cl_logits`, `cl_loss`
Returns:
loss: scalar float.
"""
inputs = _inputs('train', pretrain=False)
self.cl_inputs = inputs
embedded = self.layers['embedding'](inputs.tokens)
self.tensors['cl_embedded'] = embedded
_, next_state, logits, loss = self.cl_loss_from_embedding(
embedded, return_intermediates=True)
tf.summary.scalar('classification_loss', loss)
self.tensors['cl_logits'] = logits
self.tensors['cl_loss'] = loss
if FLAGS.single_label:
indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
else:
labels = inputs.labels
weights = inputs.weights
acc = layers_lib.accuracy(logits, labels, weights)
tf.summary.scalar('accuracy', acc)
adv_loss = (self.adversarial_loss() * tf.constant(
FLAGS.adv_reg_coeff, name='adv_reg_coeff'))
tf.summary.scalar('adversarial_loss', adv_loss)
total_loss = loss + adv_loss
with tf.control_dependencies([inputs.save_state(next_state)]):
total_loss = tf.identity(total_loss)
tf.summary.scalar('total_classification_loss', total_loss)
return total_loss
def language_model_graph(self, compute_loss=True):
"""Constructs LM graph from inputs to LM loss.
* Caches the VatxtInput object in `self.lm_inputs`
* Caches tensors: `lm_embedded`
Args:
compute_loss: bool, whether to compute and return the loss or stop after
the LSTM computation.
Returns:
loss: scalar float.
"""
inputs = _inputs('train', pretrain=True)
self.lm_inputs = inputs
return self._lm_loss(inputs, compute_loss=compute_loss)
def _lm_loss(self,
inputs,
emb_key='lm_embedded',
lstm_layer='lstm',
lm_loss_layer='lm_loss',
loss_name='lm_loss',
compute_loss=True):
embedded = self.layers['embedding'](inputs.tokens)
self.tensors[emb_key] = embedded
lstm_out, next_state = self.layers[lstm_layer](embedded, inputs.state,
inputs.length)
if compute_loss:
loss = self.layers[lm_loss_layer](
[lstm_out, inputs.labels, inputs.weights])
with tf.control_dependencies([inputs.save_state(next_state)]):
loss = tf.identity(loss)
tf.summary.scalar(loss_name, loss)
return loss
def eval_graph(self, dataset='test'):
"""Constructs classifier evaluation graph.
Args:
dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}.
Returns:
eval_ops: dict<metric name, tuple(value, update_op)>
var_restore_dict: dict mapping variable restoration names to variables.
Trainable variables will be mapped to their moving average names.
"""
inputs = _inputs(dataset, pretrain=False)
embedded = self.layers['embedding'](inputs.tokens)
_, next_state, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=inputs, return_intermediates=True)
if FLAGS.single_label:
indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
else:
labels = inputs.labels
weights = inputs.weights
eval_ops = {
'accuracy':
tf.contrib.metrics.streaming_accuracy(
layers_lib.predictions(logits), labels, weights)
}
with tf.control_dependencies([inputs.save_state(next_state)]):
acc, acc_update = eval_ops['accuracy']
acc_update = tf.identity(acc_update)
eval_ops['accuracy'] = (acc, acc_update)
var_restore_dict = make_restore_average_vars_dict()
return eval_ops, var_restore_dict
def cl_loss_from_embedding(self,
embedded,
inputs=None,
return_intermediates=False):
"""Compute classification loss from embedding.
Args:
embedded: 3-D float Tensor [batch_size, num_timesteps, embedding_dim]
inputs: VatxtInput, defaults to self.cl_inputs.
return_intermediates: bool, whether to return intermediate tensors or only
the final loss.
Returns:
If return_intermediates is True:
lstm_out, next_state, logits, loss
Else:
loss
"""
if inputs is None:
inputs = self.cl_inputs
lstm_out, next_state = self.layers['lstm'](embedded, inputs.state,
inputs.length)
if FLAGS.single_label:
indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
lstm_out = tf.expand_dims(tf.gather_nd(lstm_out, indices), 1)
labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
else:
labels = inputs.labels
weights = inputs.weights
logits = self.layers['cl_logits'](lstm_out)
loss = layers_lib.classification_loss(logits, labels, weights)
if return_intermediates:
return lstm_out, next_state, logits, loss
else:
return loss
def adversarial_loss(self):
"""Compute adversarial loss based on FLAGS.adv_training_method."""
def random_perturbation_loss():
return adv_lib.random_perturbation_loss(self.tensors['cl_embedded'],
self.cl_inputs.length,
self.cl_loss_from_embedding)
def adversarial_loss():
return adv_lib.adversarial_loss(self.tensors['cl_embedded'],
self.tensors['cl_loss'],
self.cl_loss_from_embedding)
def virtual_adversarial_loss():
"""Computes virtual adversarial loss.
Uses lm_inputs and constructs the language model graph if it hasn't yet
been constructed.
Also ensures that the LM input states are saved for LSTM state-saving
BPTT.
Returns:
loss: float scalar.
"""
if self.lm_inputs is None:
self.language_model_graph(compute_loss=False)
def logits_from_embedding(embedded, return_next_state=False):
_, next_state, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=self.lm_inputs, return_intermediates=True)
if return_next_state:
return next_state, logits
else:
return logits
next_state, lm_cl_logits = logits_from_embedding(
self.tensors['lm_embedded'], return_next_state=True)
va_loss = adv_lib.virtual_adversarial_loss(
lm_cl_logits, self.tensors['lm_embedded'], self.lm_inputs,
logits_from_embedding)
with tf.control_dependencies([self.lm_inputs.save_state(next_state)]):
va_loss = tf.identity(va_loss)
return va_loss
def combo_loss():
return adversarial_loss() + virtual_adversarial_loss()
adv_training_methods = {
# Random perturbation
'rp': random_perturbation_loss,
# Adversarial training
'at': adversarial_loss,
# Virtual adversarial training
'vat': virtual_adversarial_loss,
# Both at and vat
'atvat': combo_loss,
'': lambda: tf.constant(0.),
None: lambda: tf.constant(0.),
}
with tf.name_scope('adversarial_loss'):
return adv_training_methods[FLAGS.adv_training_method]()
class VatxtBidirModel(VatxtModel):
"""Extension of VatxtModel that supports bidirectional input."""
def __init__(self):
super(VatxtBidirModel,
self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2)
# Reverse LSTM and LM loss for bidirectional models
self.layers['lstm_reverse'] = layers_lib.LSTM(
FLAGS.rnn_cell_size,
FLAGS.rnn_num_layers,
FLAGS.keep_prob_lstm_out,
name='LSTM_Reverse')
self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss(
FLAGS.vocab_size,
FLAGS.num_candidate_samples,
self.vocab_freqs,
name='LM_loss_reverse')
@property
def pretrained_variables(self):
variables = super(VatxtBidirModel, self).pretrained_variables
variables.extend(self.layers['lstm_reverse'].trainable_weights)
return variables
def classifier_graph(self):
"""Constructs classifier graph from inputs to classifier loss.
* Caches the VatxtInput objects in `self.cl_inputs`
* Caches tensors: `cl_embedded` (tuple of forward and reverse), `cl_logits`,
`cl_loss`
Returns:
loss: scalar float.
"""
inputs = _inputs('train', pretrain=False, bidir=True)
self.cl_inputs = inputs
f_inputs, _ = inputs
# Embed both forward and reverse with a shared embedding
embedded = [self.layers['embedding'](inp.tokens) for inp in inputs]
self.tensors['cl_embedded'] = embedded
_, next_states, logits, loss = self.cl_loss_from_embedding(
embedded, return_intermediates=True)
tf.summary.scalar('classification_loss', loss)
self.tensors['cl_logits'] = logits
self.tensors['cl_loss'] = loss
acc = layers_lib.accuracy(logits, f_inputs.labels, f_inputs.weights)
tf.summary.scalar('accuracy', acc)
adv_loss = (self.adversarial_loss() * tf.constant(
FLAGS.adv_reg_coeff, name='adv_reg_coeff'))
tf.summary.scalar('adversarial_loss', adv_loss)
total_loss = loss + adv_loss
saves = [inp.save_state(state) for (inp, state) in zip(inputs, next_states)]
with tf.control_dependencies(saves):
total_loss = tf.identity(total_loss)
tf.summary.scalar('total_classification_loss', total_loss)
return total_loss
def language_model_graph(self, compute_loss=True):
"""Constructs forward and reverse LM graphs from inputs to LM losses.
* Caches the VatxtInput objects in `self.lm_inputs`
* Caches tensors: `lm_embedded`, `lm_embedded_reverse`
Args:
compute_loss: bool, whether to compute and return the loss or stop after
the LSTM computation.
Returns:
loss: scalar float, sum of forward and reverse losses.
"""
inputs = _inputs('train', pretrain=True, bidir=True)
self.lm_inputs = inputs
f_inputs, r_inputs = inputs
f_loss = self._lm_loss(f_inputs, compute_loss=compute_loss)
r_loss = self._lm_loss(
r_inputs,
emb_key='lm_embedded_reverse',
lstm_layer='lstm_reverse',
lm_loss_layer='lm_loss_reverse',
loss_name='lm_loss_reverse',
compute_loss=compute_loss)
if compute_loss:
return f_loss + r_loss
def eval_graph(self, dataset='test'):
"""Constructs classifier evaluation graph.
Args:
dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}.
Returns:
eval_ops: dict<metric name, tuple(value, update_op)>
var_restore_dict: dict mapping variable restoration names to variables.
Trainable variables will be mapped to their moving average names.
"""
inputs = _inputs(dataset, pretrain=False, bidir=True)
embedded = [self.layers['embedding'](inp.tokens) for inp in inputs]
_, next_states, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=inputs, return_intermediates=True)
f_inputs, _ = inputs
eval_ops = {
'accuracy':
tf.contrib.metrics.streaming_accuracy(
layers_lib.predictions(logits), f_inputs.labels,
f_inputs.weights)
}
# Save states on accuracy update
saves = [inp.save_state(state) for (inp, state) in zip(inputs, next_states)]
with tf.control_dependencies(saves):
acc, acc_update = eval_ops['accuracy']
acc_update = tf.identity(acc_update)
eval_ops['accuracy'] = (acc, acc_update)
var_restore_dict = make_restore_average_vars_dict()
return eval_ops, var_restore_dict
def cl_loss_from_embedding(self,
embedded,
inputs=None,
return_intermediates=False):
"""Compute classification loss from embedding.
Args:
embedded: Length 2 tuple of 3-D float Tensor
[batch_size, num_timesteps, embedding_dim].
inputs: Length 2 tuple of VatxtInput, defaults to self.cl_inputs.
return_intermediates: bool, whether to return intermediate tensors or only
the final loss.
Returns:
If return_intermediates is True:
lstm_out, next_states, logits, loss
Else:
loss
"""
if inputs is None:
inputs = self.cl_inputs
out = []
for (layer_name, emb, inp) in zip(['lstm', 'lstm_reverse'], embedded,
inputs):
out.append(self.layers[layer_name](emb, inp.state, inp.length))
lstm_outs, next_states = zip(*out)
# Concatenate output of forward and reverse LSTMs
lstm_out = tf.concat(lstm_outs, 1)
logits = self.layers['cl_logits'](lstm_out)
f_inputs, _ = inputs # pylint: disable=unpacking-non-sequence
loss = layers_lib.classification_loss(logits, f_inputs.labels,
f_inputs.weights)
if return_intermediates:
return lstm_out, next_states, logits, loss
else:
return loss
def adversarial_loss(self):
"""Compute adversarial loss based on FLAGS.adv_training_method."""
def random_perturbation_loss():
return adv_lib.random_perturbation_loss_bidir(self.tensors['cl_embedded'],
self.cl_inputs[0].length,
self.cl_loss_from_embedding)
def adversarial_loss():
return adv_lib.adversarial_loss_bidir(self.tensors['cl_embedded'],
self.tensors['cl_loss'],
self.cl_loss_from_embedding)
def virtual_adversarial_loss():
"""Computes virtual adversarial loss.
Uses lm_inputs and constructs the language model graph if it hasn't yet
been constructed.
Also ensures that the LM input states are saved for LSTM state-saving
BPTT.
Returns:
loss: float scalar.
"""
if self.lm_inputs is None:
self.language_model_graph(compute_loss=False)
def logits_from_embedding(embedded, return_next_state=False):
_, next_states, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=self.lm_inputs, return_intermediates=True)
if return_next_state:
return next_states, logits
else:
return logits
lm_embedded = (self.tensors['lm_embedded'],
self.tensors['lm_embedded_reverse'])
next_states, lm_cl_logits = logits_from_embedding(
lm_embedded, return_next_state=True)
va_loss = adv_lib.virtual_adversarial_loss_bidir(
lm_cl_logits, lm_embedded, self.lm_inputs, logits_from_embedding)
saves = [
inp.save_state(state)
for (inp, state) in zip(self.lm_inputs, next_states)
]
with tf.control_dependencies(saves):
va_loss = tf.identity(va_loss)
return va_loss
def combo_loss():
return adversarial_loss() + virtual_adversarial_loss()
adv_training_methods = {
# Random perturbation
'rp': random_perturbation_loss,
# Adversarial training
'at': adversarial_loss,
# Virtual adversarial training
'vat': virtual_adversarial_loss,
# Both at and vat
'atvat': combo_loss,
'': lambda: tf.constant(0.),
None: lambda: tf.constant(0.),
}
with tf.name_scope('adversarial_loss'):
return adv_training_methods[FLAGS.adv_training_method]()
def _inputs(dataset='train', pretrain=False, bidir=False):
return inputs_lib.inputs(
data_dir=FLAGS.data_dir,
phase=dataset,
bidir=bidir,
pretrain=pretrain,
use_seq2seq=pretrain and FLAGS.use_seq2seq_autoencoder,
state_size=FLAGS.rnn_cell_size,
num_layers=FLAGS.rnn_num_layers,
batch_size=FLAGS.batch_size,
unroll_steps=FLAGS.num_timesteps,
eos_id=FLAGS.vocab_size - 1)
def _get_vocab_freqs():
"""Returns vocab frequencies.
Returns:
List of integers, length=FLAGS.vocab_size.
Raises:
ValueError: if the length of the frequency file is not equal to the vocab
size, or if the file is not found.
"""
path = FLAGS.vocab_freq_path or os.path.join(FLAGS.data_dir, 'vocab_freq.txt')
if tf.gfile.Exists(path):
with tf.gfile.Open(path) as f:
# Get pre-calculated frequencies of words.
reader = csv.reader(f, quoting=csv.QUOTE_NONE)
freqs = [int(row[-1]) for row in reader]
if len(freqs) != FLAGS.vocab_size:
raise ValueError('Frequency file length %d != vocab size %d' %
(len(freqs), FLAGS.vocab_size))
else:
if FLAGS.vocab_freq_path:
raise ValueError('vocab_freq_path not found')
freqs = [1] * FLAGS.vocab_size
return freqs
def make_restore_average_vars_dict():
"""Returns dict mapping moving average names to variables."""
var_restore_dict = {}
variable_averages = tf.train.ExponentialMovingAverage(0.999)
for v in tf.global_variables():
if v in tf.trainable_variables():
name = variable_averages.average_name(v)
else:
name = v.op.name
var_restore_dict[name] = v
return var_restore_dict
def optimize(loss, global_step):
return layers_lib.optimize(
loss, global_step, FLAGS.max_grad_norm, FLAGS.learning_rate,
FLAGS.learning_rate_decay_factor, FLAGS.sync_replicas,
FLAGS.replicas_to_aggregate, FLAGS.task)
| jiaphuan/models | research/adversarial_text/graphs.py | Python | apache-2.0 | 24,710 | 0.004816 |
from common_fixtures import * # NOQA
from gdapi import ApiError
_USER_LIST = [
"Owner",
"Member",
"Stranger",
"OutThereUser"
]
PROJECTS = set([])
@pytest.fixture(autouse=True, scope="module")
def clean_up_projects(super_client, request):
# This randomly times out, don't know why, disabling it
# on = super_client.create_setting(name='api.projects.use.rancher_id',
# value='true')
# wait_setting_active(super_client, on)
def fin():
for project in PROJECTS:
try:
super_client.delete(super_client.by_id('project', project))
except ApiError as e:
assert e.error.status == 404
assert len(get_ids(super_client.list_project()) & PROJECTS) == 0
# super_client.delete(on)
request.addfinalizer(fin)
pass
@pytest.fixture()
def project(user_clients, admin_user_client, request):
project = _create_project(admin_user_client, user_clients, 'Owner')
def fin():
try:
admin_user_client.delete(admin_user_client.by_id('project',
project))
except ApiError as e:
assert e.error.status == 404
request.addfinalizer(fin)
return project
@pytest.fixture(scope='session')
def user_clients(admin_user_client):
clients = {}
for user in _USER_LIST:
clients[user] = create_context(admin_user_client,
kind='user').user_client
clients['admin'] = admin_user_client
return clients
@pytest.fixture()
def members(user_clients):
members = ['Owner', 'Member']
return _create_members(user_clients, members)
def get_plain_members(members):
plain_members = []
for member in members.data:
plain_members.append({
'role': member.role,
'externalId': member.externalId,
'externalIdType': member.externalIdType
})
return plain_members
def get_ids(items):
ids = []
for item in items:
ids.append(item.id)
return set(ids)
def diff_members(members, got_members):
assert len(members) == len(got_members)
members_a = set([])
members_b = set([])
for member in members:
members_a.add(member['externalId'] + ' ' + member['externalIdType']
+ ' ' + member['role'])
for member in got_members:
members_b.add(member['externalId'] + ' ' + member['externalIdType']
+ ' ' + member['role'])
assert members_a == members_b
def all_owners(members):
for member in members:
member['role'] = 'owner'
return members
def test_update_project(user_clients, project):
user_clients['Owner'].update(
project, name='Project Name', description='Some description')
assert user_clients['Owner'].by_id(
'project', project.id).name == 'Project Name'
assert user_clients['Owner'].by_id(
'project', project.id).description == 'Some description'
with pytest.raises(ApiError) as e:
user_clients['Member'].update(
project, name='Project Name from Member', description='Loop hole?')
assert e.value.error.status == 404
with pytest.raises(ApiError) as e:
user_clients['Stranger'].update(
project, name='Project Name from Stranger', description='Changed')
assert e.value.error.status == 404
def test_set_members(admin_user_client, user_clients, project):
members = get_plain_members(project.projectMembers())
members.append({
'role': 'member',
'externalId': acc_id(user_clients['Member']),
'externalIdType': 'rancher_id'
})
_set_members(admin_user_client, user_clients['Owner'], project.id, None,
422)
_set_members(admin_user_client, user_clients['Owner'], project.id, [],
422)
_set_members(admin_user_client, user_clients['Owner'], project.id,
members, None)
_set_members(admin_user_client, user_clients['Member'], project.id,
None, 'Attribute')
_set_members(admin_user_client, user_clients['Member'], project.id, [],
'Attribute')
_set_members(admin_user_client, user_clients['Member'], project.id,
members, 'Attribute')
with pytest.raises(ApiError) as e:
_set_members(admin_user_client, user_clients['Stranger'],
project.id, None, 422)
assert e.value.error.status == 404
with pytest.raises(ApiError) as e:
_set_members(admin_user_client, user_clients['Stranger'],
project.id, [], 422)
assert e.value.error.status == 404
with pytest.raises(ApiError) as e:
_set_members(admin_user_client, user_clients['Stranger'],
project.id, members, 403)
assert e.value.error.status == 404
def test_get_members(admin_user_client, user_clients, members):
project = _create_project_with_members(admin_user_client,
user_clients['Owner'], members)
members = project.projectMembers()
_get_members(user_clients['Owner'], project.id, members)
_get_members(user_clients['Member'], project.id, members)
_get_members(user_clients['admin'], project.id, members)
with pytest.raises(ApiError) as e:
_get_members(user_clients['Stranger'], project.id, members)
assert e.value.error.status == 404
def test_list_all_projects(admin_user_client):
projects = admin_user_client.list_project()
projectAccounts = admin_user_client.list_account(kind='project',
limit=4000)
ids = []
ids_2 = []
for project in projects:
ids.append(project.id)
for project in projectAccounts:
ids_2.append(project.id)
assert len(list(set(ids) - set(ids_2))) == 0
def check_state(client, project_id, states, excludes):
for type in client.schema.types:
if type not in excludes:
try:
for resource in client.list(type, accountId=project_id):
assert resource.state in states
assert resource.removed is not None
except AttributeError:
pass
def client_for_project(project, admin_user_client):
project_key = admin_user_client.create_api_key(accountId=project.id)
admin_user_client.wait_success(project_key)
return api_client(project_key.publicValue, project_key.secretValue)
def test_delete_project(admin_user_client, new_context,
super_client):
project = new_context.user_client.reload(new_context.project)
proj_id = new_context.project.id
_create_resources(new_context.client)
assert len(new_context.client.list_projectMember()) == 1
project = super_client.wait_success(project.deactivate())
project = super_client.wait_success(project.remove())
check_state(new_context.client, proj_id,
['removed'], ['account', 'project', 'host', 'subscribe'])
super_client.wait_success(project.purge())
project = new_context.client.by_id('project', id=proj_id)
assert project.state == 'purged'
check_state(new_context.client, proj_id,
['purged', 'removed'], ['account', 'project', 'subscribe'])
project_members = admin_user_client\
.list('projectMember')
for member in project_members:
assert member.projectId != proj_id
def test_delete_members(admin_user_client, user_clients, members):
project = _create_project_with_members(admin_user_client,
user_clients['Owner'], members)
members = [members[0]]
assert len(user_clients['Member']
.by_id('project', project.id).projectMembers()) == 2
project.setmembers(members=members)
project = user_clients['Owner'].by_id('project', project.id)
assert len(project.projectMembers()) == 1
with pytest.raises(ApiError) as e:
user_clients['Member'].by_id('project', project.id)
assert e.value.error.status == 404
def test_change_roles(admin_user_client, user_clients, members):
project = _create_project_with_members(admin_user_client,
user_clients['Owner'], members)
assert len(project.projectMembers()) == 2
new_members = all_owners(get_plain_members(project.projectMembers()))
project_from_member = user_clients['Member'].by_id('project',
project.id)
with pytest.raises(AttributeError) as e:
project_from_member.setmembers(members=new_members)
assert 'setmembers' in e.value.message
project.setmembers(members=new_members)
project_from_member = user_clients['Member'].reload(project_from_member)
project_from_member.setmembers(members=new_members)
project_members_after = get_plain_members(project.projectMembers())
project_from_member_members_after = get_plain_members(
project_from_member.projectMembers())
for member in project_members_after:
assert member['role'] == 'owner'
for member in project_from_member_members_after:
assert member['role'] == 'owner'
def test_delete_other_owners(admin_user_client, user_clients, members):
project = _create_project_with_members(admin_user_client,
user_clients['Owner'],
all_owners(members))
project.setmembers(members=members)
project = user_clients['Member'].by_id('project', project.id)
new_members = get_plain_members(project.projectMembers())
for member in new_members:
if((member['role'] == 'owner') & (member['externalId'] != acc_id(
user_clients['Member']))):
new_members.remove(member)
project.setmembers(members=new_members)
assert len(project.projectMembers()) == 1
with pytest.raises(ApiError) as e:
user_clients['Owner'].by_id('project', project.id)
assert e.value.error.status == 404
project = client_for_project(project, admin_user_client).list_project()[0]
got_members = project.projectMembers()
assert len(got_members) == 1
def test_multiple_owners_add_members(admin_user_client, user_clients,
members):
project = _create_project_with_members(admin_user_client,
user_clients['Owner'],
all_owners(members))
current_members = get_plain_members(project.projectMembers())
current_members.append({
'role': 'member',
'externalId': acc_id(user_clients['Stranger']),
'externalIdType': 'rancher_id'
})
_set_members(admin_user_client, user_clients['Owner'], project.id,
current_members, None)
_set_members(admin_user_client, user_clients['Stranger'], project.id,
current_members,
'Attribute')
project = user_clients['Stranger'].by_id('project', project.id)
assert len(project.projectMembers()) == 3
_set_members(admin_user_client, user_clients['Member'], project.id,
members, None)
with pytest.raises(ApiError) as e:
project.projectMembers()
assert e.value.error.status == 404
_set_members(admin_user_client, user_clients['Member'], project.id,
current_members, None)
assert len(project.projectMembers()) == len(current_members)
def test_members_cant_delete(admin_user_client, user_clients, members):
project = _create_project_with_members(admin_user_client,
user_clients['Owner'], members)
project = user_clients['Member'].by_id("project", project.id)
got_members = get_plain_members(project.projectMembers())
id = acc_id(user_clients['Member'])
for member in got_members:
if member['externalId'] == id:
assert member['role'] == 'member'
with pytest.raises(ApiError) as e:
user_clients['Member'].delete(project)
assert e.value.error.status == 403
_set_members(admin_user_client, user_clients['Member'], project.id, [{
'externalId': acc_id(user_clients['Member']),
'externalIdType': 'rancher_id',
'role': 'owner'
}], 'Attribute')
def test_project_cant_create_project(user_clients, members, project,
admin_user_client):
uuid = project.uuid
client = client_for_project(project, admin_user_client)
assert 'POST' not in client.schema.types['project'].collectionMethods
got_project = client.list_project()[0]
assert got_project.uuid == uuid
assert len(project.projectMembers()) == len(got_project.projectMembers())
pass
def test_list_projects_flag(admin_user_client, user_clients):
projects = admin_user_client.list('project')
ids = set([])
for project in projects:
ids.add(project.id)
projects_with_flag = admin_user_client.list('project', all='true')
admin_id = acc_id(admin_user_client)
assert len(projects) != len(projects_with_flag)
for project in projects:
include = False
for member in get_plain_members(project.projectMembers()):
if (member['externalIdType'] == 'rancher_id'):
if (member['externalId'] == admin_id):
include = True
if (include):
assert project.id in ids
else:
assert project.id not in ids
def test_get_project_not_mine(user_clients, project):
with pytest.raises(ApiError) as e:
user_clients['Member'].by_id('project', project.id)
assert e.value.error.status == 404
def test_project_deactivate(user_clients, project, members):
project.setmembers(members=members)
diff_members(members, get_plain_members(project.projectMembers()))
project = user_clients['Member'].reload(project)
with pytest.raises(AttributeError) as e:
project.deactivate()
assert 'deactivate' in e.value.message
project = user_clients['Owner'].reload(project)
project.deactivate()
project = user_clients['Owner'].wait_success(project)
assert project.state == 'inactive'
project.activate()
project = user_clients['Owner'].wait_success(project)
project.deactivate()
project = user_clients['Owner'].wait_success(project)
assert project.state == 'inactive'
def _create_resources(client):
for x in range(0, 4):
uuid = "sim:{}".format(random_num())
client.wait_success(client.create_container(imageUuid=uuid))
registry = client.create_registry(serverAddress='quay.io',
name='Quay')
registry = client.wait_success(registry)
reg_cred = client.create_registry_credential(
registryId=registry.id,
email='test@rancher.com',
publicValue='rancher',
secretValue='rancher')
client.wait_success(reg_cred)
def _set_members(admin_user_client, client, id, members, status):
project = client.by_id('project', id)
if status is None:
got_members = project.setmembers(members=members)
for member in got_members.data:
admin_user_client.wait_success(member)
got_members = get_plain_members(project.projectMembers())
assert len(got_members) == len(members)
diff_members(members, got_members)
elif (status == 'Attribute'):
with pytest.raises(AttributeError) as e:
project.setmembers(members=members)
assert 'setmembers' in e.value.message
else:
with pytest.raises(ApiError) as e:
project.setmembers(members=members)
assert e.value.error.status == status
def _get_plain_members(client, project):
members = client.list_project_member(projectId=project.id)
return get_plain_members(members)
def _create_project(admin_user_client, user_clients, user):
client = user_clients[user]
members = _create_members(user_clients, [user])
project = client.create_project(members=members)
project = admin_user_client.wait_success(project)
project = client.by_id('project', project.id)
got_members = get_plain_members(project.projectMembers())
diff_members(members, got_members)
PROJECTS.add(project.id)
assert project.id == project.id
return project
def _get_members(client, id, actual_members):
project = client.by_id('project', id)
assert len(project.projectMembers()) == len(actual_members)
def _create_project_with_members(admin_user_client, client, members):
project = client.create_project(members=members)
project = admin_user_client.wait_success(project)
project = client.by_id('project', project.id)
PROJECTS.add(project.id)
got_members = get_plain_members(project.projectMembers())
assert len(members) == len(got_members)
diff_members(members, got_members)
return project
def _create_members(user_clients, members):
newMembers = []
for member in members:
newMembers.append({
'role': 'owner' if member == 'Owner' else 'member',
'externalId': acc_id(user_clients[member]),
'externalIdType': 'rancher_id'
})
return newMembers
| gpndata/cattle | tests/integration/cattletest/core/test_projects.py | Python | apache-2.0 | 17,330 | 0 |
from ceph_deploy.lib import remoto
from ceph_deploy.hosts.centos.install import repo_install, mirror_install # noqa
from ceph_deploy.hosts.util import install_yum_priorities
def install(distro, version_kind, version, adjust_repos, **kw):
# note: when split packages for ceph land for Fedora,
# `kw['components']` will have those. Unused for now.
logger = distro.conn.logger
release = distro.release
machine = distro.machine_type
if version_kind in ['stable', 'testing']:
key = 'release'
else:
key = 'autobuild'
if adjust_repos:
install_yum_priorities(distro)
distro.conn.remote_module.enable_yum_priority_obsoletes()
logger.warning('check_obsoletes has been enabled for Yum priorities plugin')
if version_kind != 'dev':
remoto.process.run(
distro.conn,
[
'rpm',
'--import',
"https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc".format(key=key)
]
)
if version_kind == 'stable':
url = 'http://ceph.com/rpm-{version}/fc{release}/'.format(
version=version,
release=release,
)
elif version_kind == 'testing':
url = 'http://ceph.com/rpm-testing/fc{release}'.format(
release=release,
)
remoto.process.run(
distro.conn,
[
'rpm',
'-Uvh',
'--replacepkgs',
'--force',
'--quiet',
'{url}noarch/ceph-release-1-0.fc{release}.noarch.rpm'.format(
url=url,
release=release,
),
]
)
if version_kind == 'dev':
logger.info('skipping install of ceph-release package')
logger.info('repo file will be created manually')
mirror_install(
distro,
'http://gitbuilder.ceph.com/ceph-rpm-fc{release}-{machine}-basic/ref/{version}/'.format(
release=release.split(".", 1)[0],
machine=machine,
version=version),
"https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc".format(key=key),
adjust_repos=True,
extra_installs=False
)
# set the right priority
logger.warning('ensuring that /etc/yum.repos.d/ceph.repo contains a high priority')
distro.conn.remote_module.set_repo_priority(['Ceph', 'Ceph-noarch', 'ceph-source'])
logger.warning('altered ceph.repo priorities to contain: priority=1')
remoto.process.run(
distro.conn,
[
'yum',
'-y',
'-q',
'install',
'ceph',
'ceph-radosgw',
],
)
| alfredodeza/ceph-deploy | ceph_deploy/hosts/fedora/install.py | Python | mit | 3,044 | 0.0023 |
# -*- coding: utf-8 -*-
#
# Copyright(C) 2013 Binson Zhang.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
__author__ = 'Binson Zhang <bin183cs@gmail.com>'
__date__ = '2013-8-25'
import os
import console
BUILD_ROOT_FILE = 'BUILD_ROOT'
BUILD_FILE = 'BUILD'
PATH_SEPARATOR = '/'
ROOT_PREFIX = '/'
WORKSPACE_PREFIX = '//'
EXTERNAL_PREFIX = '#'
def find_root_dir(working_dir):
""" Find the first directory which has the TROWEL_ROOT file.
Search from the bottom to up.
"""
root_dir = os.path.normpath(working_dir)
while root_dir != PATH_SEPARATOR:
root_file = os.path.join(root_dir, BUILD_ROOT_FILE)
if os.path.isfile(root_file):
return root_dir
root_dir = os.path.dirname(root_dir)
console.abort('Failed to find the root directory for working directory %s, '
'which has the file %s. ' % (working_dir, BUILD_ROOT_FILE))
def get_root_file(root_dir):
return os.path.join(root_dir, BUILD_ROOT_FILE)
def get_build_file(abs_work_dir):
return os.path.join(abs_work_dir, BUILD_FILE)
def get_work_dir(root_dir, abs_work_dir):
"""get work_dir from root_dir and abs_work_dir
e.g. '/home/ws/', '/home/ws/example/util' -> 'example/util'
"""
root_dir = os.path.normpath(root_dir)
abs_work_dir = os.path.normpath(abs_work_dir)
assert abs_work_dir.startswith(root_dir)
return abs_work_dir[len(root_dir)+1:]
def get_work_dir_from_path(path):
return os.path.dirname(path)
def get_sconstruct_file(root_dir):
return os.path.join(root_dir, 'SConstruct')
def norm_path(path):
"""normalize path from user input
e.g. '/util/adder' -> 'util/adder'
"""
if path.startswith(WORKSPACE_PREFIX):
# TODO(bin3): impl
console.abort('Not implemented')
elif path.startswith(ROOT_PREFIX):
return path[len(ROOT_PREFIX):]
return path
def is_external_path(path):
return path.startswith(EXTERNAL_PREFIX)
def get_exteranl_name(path):
assert is_external_path(path)
return path[len(EXTERNAL_PREFIX):] | bin3/bobo | bobo/fileutil.py | Python | apache-2.0 | 2,554 | 0.006265 |
from typing import Optional, Iterable, Iterator, List
import itertools
import os.path
from pydev_docker import models
from pydev_docker import utils
class ContainerOptions:
"""
Options for running a docker container
"""
DEFAULT_PYPATH_DIR = "/pypath"
DEFAULT_SRC_DIR = "/src"
def __init__(self,
image: str,
source_directory: str,
*, # force kwargs only for optional
command: Optional[str]=None,
container_source_directory: str=DEFAULT_SRC_DIR,
environment_variables: Optional[Iterable[models.Environment]]=None,
ext_volumes: Optional[Iterable[models.Volume]]=None,
network: Optional[str]=None,
py_volumes: Optional[Iterable[str]]=None,
ports: Optional[Iterable[models.Port]]=None,
pypath_directory: str=DEFAULT_PYPATH_DIR,
remove_container: bool=True
) -> None:
"""
Args:
image: A valid docker image
source_directory: The absolute path of the development directory that will be
mounted as the main python "source"
command: The command that will be ran once the container is created
pypath_directory: The directory that will contain all of the mounted
extra python packages, defaults to `ContainerOptions.DEFAULT_PYPATH_DIR`
container_source_directory: Specifies the directory that will be mounted
on the docker container that contains the main source code
py_volumes: The additional python packages
ext_volumes: Additional volumes to mount that are not related to python packages
environment_variables: Additional environment variables
network: The network to connect the container to
remove_container: Remove the container after the container is finished running
"""
self._image = image
self._source_directory = source_directory
self._command = command
self._pypath_directory = pypath_directory
self._container_source_directory = container_source_directory
self._py_volumes = utils.set_default(py_volumes, []) # type: Iterable[str]
self._ext_volumes = utils.set_default(ext_volumes, []) # type: Iterable[models.Volume]
self._environment_variables = utils.set_default(
environment_variables, []
) # type: Iterable[models.Environment]
self._ports = utils.set_default(ports, []) # type: Iterable[models.Port]
self._network = network
self._remove_container = remove_container
@property
def image(self) -> str:
return self._image
@property
def command(self) -> Optional[str]:
return self._command
@property
def network(self) -> Optional[str]:
return self._network
@property
def remove_container(self) -> bool:
return self._remove_container
def get_source_volume(self) -> models.Volume:
return models.Volume(
host_location=self._source_directory,
container_location=self._container_source_directory,
)
def get_pythonpath_environment(self) -> models.Environment:
return models.Environment("PYTHONPATH", self._pypath_directory)
def iter_pypath_volumes(self) -> Iterator[models.Volume]:
for v in self._py_volumes:
pypath_dir = "{}/{}".format(self._pypath_directory, os.path.basename(v))
yield models.Volume(v, pypath_dir, mode=models.VolumeMode.RO)
def iter_ext_volumes(self) -> Iterator[models.Volume]:
return iter(self._ext_volumes)
def iter_environment_variables(self) -> Iterator[models.Environment]:
return iter(self._environment_variables)
def get_ports(self) -> List[models.Port]:
return list(self._ports)
def get_volume_collection(self) -> List[models.Volume]:
"""
Returns a list of `models.Volume` objects that contains all of the volumes to
mount, which includes the source volume and all external volumes
"""
volume_collection = [self.get_source_volume()]
volume_collection.extend(
itertools.chain(self.iter_pypath_volumes(), self.iter_ext_volumes())
)
return volume_collection
def get_environment_collection(self) -> List[models.Environment]:
"""
Returns a list of `models.Environment` objects that contains all of the
environment variables for the docker container including the PYTHONPATH variable
"""
environment_collection = [self.get_pythonpath_environment()]
environment_collection.extend(self.iter_environment_variables())
return environment_collection
| Rastii/pydev_docker | pydev_docker/options.py | Python | mit | 4,867 | 0.006575 |
import json
import logging
from tornado.web import RequestHandler
from tornado.gen import coroutine
from db import RealDb
class RealDataHandler(RequestHandler):
def initialize(self):
self.db = RealDb()
self.db.get_all()
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "Content-Type")
self.set_header('Access-Control-Allow-Methods', 'POST')
@coroutine
def post(self):
params = json.loads(self.request.body.decode('utf-8'))
logging.info('putting new point (%s, %s) to line %s', params['lat'], params['lon'], params['line'])
self.db.insert_point(params['id'], params['lat'], params['lon'], params['line'], params['ts'])
self.write("OK")
@coroutine
def get(self):
mes_id = self.get_argument('id')
lat = self.get_argument('lat')
lon = self.get_argument('lon')
line = self.get_argument('line')
timestamp = self.get_argument('ts')
logging.info('putting new point (%s, %s) to line %s', lat, lon, line)
self.db.insert_point(mes_id, lat, lon, line, timestamp)
self.write("OK")
| evemorgen/GdzieJestTenCholernyTramwajProject | backend/schedule_worker/handlers/real_data_handler.py | Python | mit | 1,218 | 0.001642 |
"""
File: tonal_permutation.py
Purpose: Class defining a function whose cycles are composed of tone strings (no None).
"""
from function.permutation import Permutation
from tonalmodel.diatonic_tone_cache import DiatonicToneCache
from tonalmodel.diatonic_tone import DiatonicTone
class TonalPermutation(Permutation):
"""
Class implementation of a permuation on a set of tones, given in string format.
"""
def __init__(self, cycles, domain_tones=None):
"""
Concstructor.
:param cycles: The cycles of a permutation. List of lists. Strings or DiatonicTones.
:param domain_tones: Tones to use in cycles, if empty or None, use tones in cycles. String or
DiatonicTones.
"""
self._tone_domain = TonalPermutation.check_domain(domain_tones)
self._tone_cycles = TonalPermutation.convert_cycles_to_tones(self.tone_domain, cycles)
# if the tone_domain is not specified, we use the tones in the cycles as the domain.
if len(self._tone_domain) == 0:
for cycle in self._tone_cycles:
for tone in cycle:
self._tone_domain.add(tone)
Permutation.__init__(self, self.tone_domain, self.tone_cycles)
@property
def tone_domain(self):
return self._tone_domain
@property
def tone_cycles(self):
return self._tone_cycles
@staticmethod
def check_domain(tone_domain):
tones = set()
if tone_domain is not None:
if not isinstance(tone_domain, list) and not isinstance(tone_domain, set):
raise Exception('Tone domain must be a list or set.')
for tone_rep in tone_domain:
if isinstance(tone_rep, str):
tone = DiatonicToneCache.get_tone(tone_rep)
if tone is None:
raise Exception('Tone domain item \'{0}\' illegal syntax.'.format(tone_rep))
elif isinstance(tone_rep, DiatonicTone):
tone = tone_rep
else:
raise Exception('Tone domain item \'{0}\' must be string.'.format(tone_rep))
tones.add(tone)
return tones
@staticmethod
def convert_cycles_to_tones(tone_domain, cycles):
if cycles is None:
return []
if not isinstance(cycles, list):
raise Exception('Cycles paramater is not a list.')
new_cycles = list()
for cycle in cycles:
if not isinstance(cycles, list):
raise Exception('Cycle \'{0}\' must be a list.'.format(cycle))
new_cycle = list()
for tone_rep in cycle:
if isinstance(tone_rep, str):
tone = DiatonicToneCache.get_tone(tone_rep)
if tone is None:
raise Exception('Tone domain item \'{0}\' illegal syntax.'.format(tone_rep))
elif isinstance(tone_rep, DiatonicTone):
tone = tone_rep
else:
raise Exception('Tone domain item \'{0}\' must be string.'.format(tone_rep))
if len(tone_domain) != 0:
if tone not in tone_domain:
raise Exception('Tone \'{0}\' not in explicit tone domain.'.format(tone))
new_cycle.append(tone)
new_cycles.append(new_cycle)
return new_cycles
def __getitem__(self, key):
if isinstance(key, str):
key = DiatonicToneCache.get_tone(key)
if key is None:
raise Exception('Illegal tone syntax \'{0}\'.'.format(key))
if not isinstance(key, DiatonicTone):
raise Exception('Key \'{0}\' must be instance of DiatonticTone'.format(key))
return super(TonalPermutation, self).__getitem__(key)
| dpazel/music_rep | transformation/functions/tonalfunctions/tonal_permutation.py | Python | mit | 3,907 | 0.003583 |
import datetime
from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.urls import reverse
import reversion
from users.models import Lageruser
from devicetypes.models import Type
from devicegroups.models import Devicegroup
from locations.models import Section
from Lagerregal import utils
from users.models import Department
@reversion.register()
class Building(models.Model):
name = models.CharField(_('Name'), max_length=200, unique=True)
street = models.CharField(_('Street'), max_length=100, blank=True)
number = models.CharField(_('Number'), max_length=30, blank=True)
zipcode = models.CharField(_('ZIP code'), max_length=5, blank=True)
city = models.CharField(_('City'), max_length=100, blank=True)
state = models.CharField(_('State'), max_length=100, blank=True)
country = models.CharField(_('Country'), max_length=100, blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name = _('Building')
verbose_name_plural = _('Buildings')
permissions = (
("read_building", _("Can read Building")),
)
def get_absolute_url(self):
return reverse('building-detail', kwargs={'pk': self.pk})
def get_edit_url(self):
return reverse('building-edit', kwargs={'pk': self.pk})
@reversion.register()
class Room(models.Model):
name = models.CharField(_('Name'), max_length=200)
building = models.ForeignKey(Building, null=True, on_delete=models.SET_NULL)
section = models.ForeignKey(Section, null=True, on_delete=models.SET_NULL, related_name="rooms", blank=True)
def __str__(self):
if self.building:
return self.name + " (" + str(self.building) + ")"
else:
return self.name
class Meta:
verbose_name = _('Room')
verbose_name_plural = _('Rooms')
permissions = (
("read_room", _("Can read Room")),
)
def get_absolute_url(self):
return reverse('room-detail', kwargs={'pk': self.pk})
def get_edit_url(self):
return reverse('room-edit', kwargs={'pk': self.pk})
@reversion.register()
class Manufacturer(models.Model):
name = models.CharField(_('Manufacturer'), max_length=200, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = _('Manufacturer')
verbose_name_plural = _('Manufacturers')
permissions = (
("read_manufacturer", _("Can read Manufacturer")),
)
def get_absolute_url(self):
return reverse('manufacturer-detail', kwargs={'pk': self.pk})
def get_edit_url(self):
return reverse('manufacturer-edit', kwargs={'pk': self.pk})
class Bookmark(models.Model):
device = models.ForeignKey("Device", on_delete=models.CASCADE)
user = models.ForeignKey(Lageruser, on_delete=models.CASCADE)
@reversion.register(follow=["typeattributevalue_set", ], exclude=[
"archived", "currentlending", "inventoried", "bookmarks", "trashed",
], ignore_duplicates=True)
class Device(models.Model):
created_at = models.DateTimeField(auto_now_add=True, blank=True, null=True)
creator = models.ForeignKey(Lageruser, on_delete=models.SET_NULL, null=True)
name = models.CharField(_('Name'), max_length=200)
inventorynumber = models.CharField(_('Inventorynumber'), max_length=50, blank=True)
serialnumber = models.CharField(_('Serialnumber'), max_length=50, blank=True)
manufacturer = models.ForeignKey(Manufacturer, blank=True, null=True, on_delete=models.SET_NULL)
hostname = models.CharField(_('Hostname'), max_length=40, blank=True)
description = models.CharField(_('Description'), max_length=10000, blank=True)
devicetype = models.ForeignKey(Type, blank=True, null=True, on_delete=models.SET_NULL)
room = models.ForeignKey(Room, blank=True, null=True, on_delete=models.SET_NULL)
group = models.ForeignKey(Devicegroup, blank=True, null=True, related_name="devices", on_delete=models.SET_NULL)
webinterface = models.CharField(_('Webinterface'), max_length=60, blank=True)
templending = models.BooleanField(default=False, verbose_name=_("For short term lending"))
currentlending = models.ForeignKey("Lending", related_name="currentdevice", null=True, blank=True,
on_delete=models.SET_NULL)
manual = models.FileField(upload_to=utils.get_file_location, null=True, blank=True)
contact = models.ForeignKey(Lageruser, related_name="as_contact",
help_text=_("Person to contact about using this device"), blank=True,
null=True, on_delete=models.SET_NULL)
archived = models.DateTimeField(null=True, blank=True)
trashed = models.DateTimeField(null=True, blank=True)
inventoried = models.DateTimeField(null=True, blank=True)
bookmarkers = models.ManyToManyField(Lageruser, through=Bookmark, related_name="bookmarks", blank=True)
department = models.ForeignKey(Department, null=True, blank=True, related_name="devices", on_delete=models.SET_NULL)
is_private = models.BooleanField(default=False)
used_in = models.ForeignKey('self', null=True, blank=True, on_delete=models.SET_NULL,)
def __str__(self):
return self.name
class Meta:
verbose_name = _('Device')
verbose_name_plural = _('Devices')
permissions = (
("boss_mails", _("Emails for bosses")),
("managment_mails", _("Emails for managment")),
("support_mails", _("Emails for support")),
("read_device", _("Can read Device")),
("lend_device", _("Can lend Device")),
("read_puppetdetails", _("Read Puppet Details"))
)
def get_absolute_url(self):
return reverse('device-detail', kwargs={'pk': self.pk})
def get_edit_url(self):
return reverse('device-edit', kwargs={'pk': self.pk})
def get_as_dict(self):
dict = {}
dict["name"] = self.name
dict["description"] = self.description
dict["manufacturer"] = self.manufacturer
dict["devicetype"] = self.devicetype
dict["room"] = self.room
return dict
def is_overdue(self):
if self.currentlending is None:
return False
if self.currentlending.duedate < datetime.date.today():
return True
return False
@staticmethod
def active():
return Device.objects.filter(archived=None, trashed=None)
@staticmethod
def devices_for_departments(departments=[]):
return Device.objects.filter(department__in=departments).exclude(
~Q(department__in=departments), is_private=True)
class DeviceInformationType(models.Model):
keyname = models.CharField(_('Name'), max_length=200)
humanname = models.CharField(_('Human readable name'), max_length=200)
def __str__(self):
return self.humanname
class Meta:
verbose_name = _('Information Type')
verbose_name_plural = _('Information Type')
class DeviceInformation(models.Model):
information = models.CharField(_('Information'), max_length=200)
device = models.ForeignKey(Device, related_name="information", on_delete=models.CASCADE)
infotype = models.ForeignKey(DeviceInformationType, on_delete=models.CASCADE)
def __str__(self):
return str(self.infotype) + ": " + self.information
class Meta:
verbose_name = _('Information')
verbose_name_plural = _('Information')
@reversion.register(ignore_duplicates=True)
class Lending(models.Model):
owner = models.ForeignKey(Lageruser, verbose_name=_("Lent to"), on_delete=models.SET_NULL, null=True)
lenddate = models.DateField(auto_now_add=True)
duedate = models.DateField(blank=True, null=True)
duedate_email = models.DateField(blank=True, null=True)
returndate = models.DateField(blank=True, null=True)
device = models.ForeignKey(Device, null=True, blank=True, on_delete=models.CASCADE)
smalldevice = models.CharField(_("Small Device"), max_length=200, null=True, blank=True)
class Meta:
verbose_name = _('Lending')
verbose_name_plural = _('Lendings')
class Template(models.Model):
templatename = models.CharField(_('Templatename'), max_length=200)
name = models.CharField(_('Name'), max_length=200)
manufacturer = models.ForeignKey(Manufacturer, blank=True, null=True, on_delete=models.CASCADE)
description = models.CharField(_('Description'), max_length=1000, blank=True)
devicetype = models.ForeignKey(Type, blank=True, null=True, on_delete=models.CASCADE)
def __str__(self):
return self.templatename
class Meta:
ordering = ['name']
verbose_name = _('Template')
verbose_name_plural = _('Templates')
permissions = (
("read_template", _("Can read Template")),
)
def get_absolute_url(self):
return reverse('device-list')
def get_as_dict(self):
dict = {}
dict["name"] = self.name
dict["description"] = self.description
dict["manufacturer"] = self.manufacturer
dict["devicetype"] = self.devicetype
return dict
class Note(models.Model):
device = models.ForeignKey(Device, related_name="notes", on_delete=models.CASCADE)
note = models.CharField(max_length=5000)
creator = models.ForeignKey(Lageruser, on_delete=models.SET_NULL, null=True)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = _("Note")
verbose_name_plural = _("Notes")
def get_absolute_url(self):
return reverse("device-detail", kwargs={'pk': self.device.pk})
class Picture(models.Model):
device = models.ForeignKey(Device, related_name="pictures", on_delete=models.CASCADE)
image = models.ImageField(upload_to=utils.get_file_location)
caption = models.CharField(max_length=200, null=True, blank=True)
class Meta:
verbose_name = _("Picture")
verbose_name_plural = _("Pictures")
def get_absolute_url(self):
return reverse("device-detail", kwargs={'pk': self.device.pk})
| vIiRuS/Lagerregal | devices/models.py | Python | bsd-3-clause | 10,243 | 0.002831 |
## $id: webstatadmin.py,v 1.28 2007/04/01 23:46:46 tibor exp $
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
__lastupdated__ = "$Date$"
import sys, webstat
from invenio.dbquery import run_sql
from invenio.bibtask import task_init, task_get_option, task_set_option, \
task_has_option, task_update_progress, write_message
from invenio.webstat_config import CFG_WEBSTAT_CONFIG_PATH
from invenio.config import CFG_SITE_RECORD
def main():
"""Main dealing with all the BibTask magic."""
task_init(authorization_action="runwebstatadmin",
authorization_msg="Webstat Administrator",
description="Description: %s Creates/deletes custom events. Can be set\n"
" to cache key events and previously defined custom events.\n" % sys.argv[0],
help_specific_usage=" -n, --new-event=ID create a new custom event with the human-readable ID\n"
" -r, --remove-event=ID remote the custom event with id ID and all its data\n"
" -S, --show-events show all currently available custom events\n"
" -c, --cache-events=CLASS|[ID] caches the events defined by the class or IDs, e.g.:\n"
" -c ALL\n"
" -c KEYEVENTS\n"
" -c CUSTOMEVENTS\n"
" -c 'event id1',id2,'testevent'\n"
" -d,--dump-config dump default config file\n"
" -e,--load-config create the custom events described in config_file\n"
"\nWhen creating events (-n) the following parameters are also applicable:\n"
" -l, --event-label=NAME set a descriptive label to the custom event\n"
" -a, --args=[NAME] set column headers for additional custom event arguments\n"
" (e.g. -a country,person,car)\n",
version=__revision__,
specific_params=("n:r:Sl:a:c:de", ["new-event=", "remove-event=", "show-events",
"event-label=", "args=", "cache-events=", "dump-config",
"load-config"]),
task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,
task_submit_check_options_fnc=task_submit_check_options,
task_run_fnc=task_run_core)
def task_submit_elaborate_specific_parameter(key, value, opts, args):
"""
Given the string key it checks it's meaning, eventually using the value.
Usually it fills some key in the options dict. It must return True if
it has elaborated the key, False, if it doesn't know that key. eg:
"""
if key in ("-n", "--new-event"):
task_set_option("create_event_with_id", value)
elif key in ("-r", "--remove-event"):
task_set_option("destroy_event_with_id", value)
elif key in ("-S", "--show-events"):
task_set_option("list_events", True)
elif key in ("-l", "--event-label"):
task_set_option("event_name", value)
elif key in ("-a", "--args"):
task_set_option("column_headers", value.split(','))
elif key in ("-c", "--cache-events"):
task_set_option("cache_events", value.split(','))
elif key in ("-d", "--dump-config"):
task_set_option("dump_config", True)
elif key in ("-e", "--load-config"):
task_set_option("load_config", True)
else:
return False
return True
def task_submit_check_options():
"""
NOTE: Depending on the parameters, either "BibSched mode" or plain
straigh-forward execution mode is entered.
"""
if task_has_option("create_event_with_id"):
print webstat.create_customevent(task_get_option("create_event_with_id"),
task_get_option("event_name", None),
task_get_option("column_headers", []))
sys.exit(0)
elif task_has_option("destroy_event_with_id"):
print webstat.destroy_customevent(task_get_option("destroy_event_with_id"))
sys.exit(0)
elif task_has_option("list_events"):
events = webstat._get_customevents()
if len(events) == 0:
print "There are no custom events available."
else:
print "Available custom events are:\n"
print '\n'.join([x[0] + ": " + ((x[1] == None) and "No descriptive name" or str(x[1])) for x in events])
sys.exit(0)
elif task_has_option("cache_events"):
events = task_get_option("cache_events")
write_message(str(events), verbose=9)
if events[0] == 'ALL':
keyevents_to_cache = webstat.KEYEVENT_REPOSITORY.keys()
customevents_to_cache = [x[0] for x in webstat._get_customevents()]
elif events[0] == 'KEYEVENTS':
keyevents_to_cache = webstat.KEYEVENT_REPOSITORY.keys()
customevents_to_cache = []
elif events[0] == 'CUSTOMEVENTS':
keyevents_to_cache = []
customevents_to_cache = [x[0] for x in webstat._get_customevents()]
elif events[0] != '':
keyevents_to_cache = [x for x in webstat.KEYEVENT_REPOSITORY.keys() if x in events]
customevents_to_cache = [x[0] for x in webstat._get_customevents() if x in events]
# Control so that we have valid event names
if len(keyevents_to_cache + customevents_to_cache) == 0:
# Oops, no events. Abort and display help.
return False
else:
task_set_option("keyevents", keyevents_to_cache)
task_set_option("customevents", customevents_to_cache)
return True
elif task_has_option("dump_config"):
print """\
[general]
visitors_box = True
search_box = True
record_box = True
bibsched_box = True
basket_box = True
apache_box = True
uptime_box = True
[webstat_custom_event_1]
name = baskets
param1 = action
param2 = basket
param3 = user
[apache_log_analyzer]
profile = nil
nb-histogram-items-to-print = 20
exclude-ip-list = ("137.138.249.162")
home-collection = "Atlantis Institute of Fictive Science"
search-interface-url = "/?"
detailed-record-url = "/%s/"
search-engine-url = "/search?"
search-engine-url-old-style = "/search.py?"
basket-url = "/yourbaskets/"
add-to-basket-url = "/yourbaskets/add"
display-basket-url = "/yourbaskets/display"
display-public-basket-url = "/yourbaskets/display_public"
alert-url = "/youralerts/"
display-your-alerts-url = "/youralerts/list"
display-your-searches-url = "/youralerts/display"
""" % CFG_SITE_RECORD
sys.exit(0)
elif task_has_option("load_config"):
from ConfigParser import ConfigParser
conf = ConfigParser()
conf.read(CFG_WEBSTAT_CONFIG_PATH)
for section in conf.sections():
if section[:21] == "webstat_custom_event_":
cols = []
name = ""
for option, value in conf.items(section):
if option == "name":
name = value
if option[:5] == "param":
# add the column name in it's position
index = int(option[-1]) - 1
while len(cols) <= index:
cols.append("")
cols[index] = value
if name:
res = run_sql("SELECT COUNT(id) FROM staEVENT WHERE id = %s", (name, ))
if res[0][0] == 0:
# name does not exist, create customevent
webstat.create_customevent(name, name, cols)
else:
# name already exists, update customevent
webstat.modify_customevent(name, cols=cols)
sys.exit(0)
else:
# False means that the --help should be displayed
return False
def task_run_core():
"""
When this function is called, the tool has entered BibSched mode, which means
that we're going to cache events according to the parameters.
"""
write_message("Initiating rawdata caching")
task_update_progress("Initating rawdata caching")
# Cache key events
keyevents = task_get_option("keyevents")
if keyevents and len(keyevents) > 0:
for i in range(len(keyevents)):
write_message("Caching key event 1: %s" % keyevents[i])
webstat.cache_keyevent_trend(keyevents)
task_update_progress("Part 1/2: done %d/%d" % (i + 1, len(keyevents)))
# Cache custom events
customevents = task_get_option("customevents")
if len(customevents) > 0:
for i in range(len(customevents)):
write_message("Caching custom event 1: %s" % customevents[i])
webstat.cache_customevent_trend(customevents)
task_update_progress("Part 2/2: done %d/%d" % (i + 1, len(customevents)))
write_message("Finished rawdata caching succesfully")
task_update_progress("Finished rawdata caching succesfully")
return True
| NikolaYolov/invenio_backup | modules/webstat/lib/webstatadmin.py | Python | gpl-2.0 | 10,293 | 0.004372 |
#
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
import sys
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.codec.der import encoder as der_encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc2511
try:
import unittest2 as unittest
except ImportError:
import unittest
class CertificateReqTestCase(unittest.TestCase):
pem_text = """\
MIIBozCCAZ8wggEFAgUAwTnj2jCByoABAqURMA8xDTALBgNVBAMTBHVzZXKmgZ8w
DQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAJ6ZQ2cYbn/lFsmBOlRltbRbFQUvvE0Q
nbopOu1kC7Bmaaz7QTx8nxeiHi4m7uxCbGGxHNoGCt7EmdG8eZUBNAcHyGlXrJdm
0z3/uNEGiBHq+xB8FnFJCA5EIJ3RWFnlbu9otSITLxWK7c5+/NHmWM+yaeHD/f/h
rp01c/8qXZfZAgMBAAGpEDAOBgNVHQ8BAf8EBAMCBeAwLzASBgkrBgEFBQcFAQEM
BTExMTExMBkGCSsGAQUFBwUBAgwMc2VydmVyX21hZ2ljoYGTMA0GCSqGSIb3DQEB
BQUAA4GBAEI3KNEvTq/n1kNVhNhPkovk1AZxyJrN1u1+7Gkc4PLjWwjLOjcEVWt4
AajUk/gkIJ6bbeO+fZlMjHfPSDKcD6AV2hN+n72QZwfzcw3icNvBG1el9EU4XfIm
xfu5YVWi81/fw8QQ6X6YGHFQkomLd7jxakVyjxSng9BhO6GpjJNF
"""
def setUp(self):
self.asn1Spec = rfc2511.CertReqMessages()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
assert not rest
assert asn1Object.prettyPrint()
assert der_encoder.encode(asn1Object) == substrate
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| catapult-project/catapult | third_party/gsutil/third_party/pyasn1-modules/tests/test_rfc2511.py | Python | bsd-3-clause | 1,591 | 0.000629 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from snapcraft import formatting_utils, project
from snapcraft.internal import common, elf, pluginhandler
from typing import Dict, List
def env_for_classic(base: str, arch_triplet: str) -> List[str]:
"""Set the required environment variables for a classic confined build."""
env = []
core_path = common.get_core_path(base)
paths = common.get_library_paths(core_path, arch_triplet, existing_only=False)
env.append(
formatting_utils.format_path_variable(
"LD_LIBRARY_PATH", paths, prepend="", separator=":"
)
)
return env
def runtime_env(root: str, arch_triplet: str) -> List[str]:
"""Set the environment variables required for running binaries."""
env = []
env.append(
'PATH="'
+ ":".join(
["{0}/usr/sbin", "{0}/usr/bin", "{0}/sbin", "{0}/bin", "$PATH"]
).format(root)
+ '"'
)
# Add the default LD_LIBRARY_PATH
paths = common.get_library_paths(root, arch_triplet)
# Add more specific LD_LIBRARY_PATH from staged packages if necessary
paths += elf.determine_ld_library_path(root)
if paths:
env.append(
formatting_utils.format_path_variable(
"LD_LIBRARY_PATH", paths, prepend="", separator=":"
)
)
return env
def build_env(root: str, snap_name: str, arch_triplet: str) -> List[str]:
"""Set the environment variables required for building.
This is required for the current parts installdir due to stage-packages
and also to setup the stagedir.
"""
env = []
paths = common.get_include_paths(root, arch_triplet)
if paths:
for envvar in ["CPPFLAGS", "CFLAGS", "CXXFLAGS"]:
env.append(
formatting_utils.format_path_variable(
envvar, paths, prepend="-I", separator=" "
)
)
paths = common.get_library_paths(root, arch_triplet)
if paths:
env.append(
formatting_utils.format_path_variable(
"LDFLAGS", paths, prepend="-L", separator=" "
)
)
paths = common.get_pkg_config_paths(root, arch_triplet)
if paths:
env.append(
formatting_utils.format_path_variable(
"PKG_CONFIG_PATH", paths, prepend="", separator=":"
)
)
return env
def build_env_for_stage(stagedir: str, snap_name: str, arch_triplet: str) -> List[str]:
env = build_env(stagedir, snap_name, arch_triplet)
env.append('PERL5LIB="{0}/usr/share/perl5/"'.format(stagedir))
return env
def snapcraft_global_environment(project: project.Project) -> Dict[str, str]:
if project.info.name:
name = project.info.name
else:
name = ""
if project.info.version:
version = project.info.version
else:
version = ""
if project.info.grade:
grade = project.info.grade
else:
grade = ""
return {
"SNAPCRAFT_ARCH_TRIPLET": project.arch_triplet,
"SNAPCRAFT_PARALLEL_BUILD_COUNT": project.parallel_build_count,
"SNAPCRAFT_PROJECT_NAME": name,
"SNAPCRAFT_PROJECT_VERSION": version,
"SNAPCRAFT_PROJECT_GRADE": grade,
"SNAPCRAFT_STAGE": project.stage_dir,
"SNAPCRAFT_PRIME": project.prime_dir,
}
def snapcraft_part_environment(part: pluginhandler.PluginHandler) -> Dict[str, str]:
return {
"SNAPCRAFT_PART_SRC": part.plugin.sourcedir,
"SNAPCRAFT_PART_BUILD": part.plugin.builddir,
"SNAPCRAFT_PART_INSTALL": part.plugin.installdir,
}
def environment_to_replacements(environment: Dict[str, str]) -> Dict[str, str]:
replacements = {} # type: Dict[str, str]
for variable, value in environment.items():
# Support both $VAR and ${VAR} syntax
replacements["${}".format(variable)] = value
replacements["${{{}}}".format(variable)] = value
return replacements
| cprov/snapcraft | snapcraft/internal/project_loader/_env.py | Python | gpl-3.0 | 4,619 | 0.000649 |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import time
import copy
from util import print_msg, format_satoshis, print_stderr
from bitcoin import is_valid, hash_160_to_bc_address, hash_160
from decimal import Decimal
import bitcoin
from transaction import Transaction
class Command:
def __init__(self, name, min_args, max_args, requires_network, requires_wallet, requires_password, description, syntax = '', options_syntax = ''):
self.name = name
self.min_args=min_args
self.max_args = max_args
self.requires_network = requires_network
self.requires_wallet = requires_wallet
self.requires_password = requires_password
self.description = description
self.syntax = syntax
self.options = options_syntax
known_commands = {}
def register_command(*args):
global known_commands
name = args[0]
known_commands[name] = Command(*args)
payto_options = ' --fee, -f: set transaction fee\n --fromaddr, -F: send from address -\n --changeaddr, -c: send change to address'
listaddr_options = " -a: show all addresses, including change addresses\n -l: include labels in results"
restore_options = " accepts a seed or master public key."
mksendmany_syntax = 'mksendmanytx <recipient> <amount> [<recipient> <amount> ...]'
payto_syntax = "payto <recipient> <amount> [label]\n<recipient> can be a bitcoin address or a label"
paytomany_syntax = "paytomany <recipient> <amount> [<recipient> <amount> ...]\n<recipient> can be a bitcoin address or a label"
signmessage_syntax = 'signmessage <address> <message>\nIf you want to lead or end a message with spaces, or want double spaces inside the message make sure you quote the string. I.e. " Hello This is a weird String "'
verifymessage_syntax = 'verifymessage <address> <signature> <message>\nIf you want to lead or end a message with spaces, or want double spaces inside the message make sure you quote the string. I.e. " Hello This is a weird String "'
# command
# requires_network
# requires_wallet
# requires_password
register_command('contacts', 0, 0, False, True, False, 'Show your list of contacts')
register_command('create', 0, 0, False, True, False, 'Create a new wallet')
register_command('createmultisig', 2, 2, False, True, False, 'similar to bitcoind\'s command')
register_command('createrawtransaction', 2, 2, False, True, False, 'Create an unsigned transaction. The syntax is similar to bitcoind.')
register_command('deseed', 0, 0, False, True, False, 'Remove seed from wallet, creating a seedless, watching-only wallet.')
register_command('decoderawtransaction', 1, 1, False, False, False, 'similar to bitcoind\'s command')
register_command('getprivatekeys', 1, 1, False, True, True, 'Get the private keys of a given address', 'getprivatekeys <bitcoin address>')
register_command('dumpprivkeys', 0, 0, False, True, True, 'Dump all private keys in your wallet')
register_command('freeze', 1, 1, False, True, True, 'Freeze the funds at one of your wallet\'s addresses', 'freeze <address>')
register_command('getbalance', 0, 1, True, True, False, 'Return the balance of your wallet, or of one account in your wallet', 'getbalance [<account>]')
register_command('getservers', 0, 0, True, False, False, 'Return the list of available servers')
register_command('getversion', 0, 0, False, False, False, 'Return the version of your client', 'getversion')
register_command('getaddressbalance', 1, 1, True, False, False, 'Return the balance of an address', 'getaddressbalance <address>')
register_command('getaddresshistory', 1, 1, True, False, False, 'Return the transaction history of a wallet address', 'getaddresshistory <address>')
register_command('getconfig', 1, 1, False, False, False, 'Return a configuration variable', 'getconfig <name>')
register_command('getpubkeys', 1, 1, False, True, False, 'Return the public keys for a wallet address', 'getpubkeys <bitcoin address>')
register_command('getrawtransaction', 1, 1, True, False, False, 'Retrieve a transaction', 'getrawtransaction <txhash>')
register_command('getseed', 0, 0, False, True, True, 'Print the generation seed of your wallet.')
register_command('getmpk', 0, 0, False, True, False, 'Return your wallet\'s master public key', 'getmpk')
register_command('help', 0, 1, False, False, False, 'Prints this help')
register_command('history', 0, 0, True, True, False, 'Returns the transaction history of your wallet')
register_command('importprivkey', 1, 1, False, True, True, 'Import a private key', 'importprivkey <privatekey>')
register_command('ismine', 1, 1, False, True, False, 'Return true if and only if address is in wallet', 'ismine <address>')
register_command('listaddresses', 2, 2, False, True, False, 'Returns your list of addresses.', '', listaddr_options)
register_command('listunspent', 0, 0, True, True, False, 'Returns the list of unspent inputs in your wallet.')
register_command('getaddressunspent', 1, 1, True, False, False, 'Returns the list of unspent inputs for an address.')
register_command('mktx', 5, 5, False, True, True, 'Create a signed transaction', 'mktx <recipient> <amount> [label]', payto_options)
register_command('mksendmanytx', 4, 4, False, True, True, 'Create a signed transaction', mksendmany_syntax, payto_options)
register_command('payto', 5, 5, True, True, True, 'Create and broadcast a transaction.', payto_syntax, payto_options)
register_command('paytomany', 4, 4, True, True, True, 'Create and broadcast a transaction.', paytomany_syntax, payto_options)
register_command('password', 0, 0, False, True, True, 'Change your password')
register_command('restore', 0, 0, True, True, False, 'Restore a wallet', '', restore_options)
register_command('searchcontacts', 1, 1, False, True, False, 'Search through contacts, return matching entries', 'searchcontacts <query>')
register_command('setconfig', 2, 2, False, False, False, 'Set a configuration variable', 'setconfig <name> <value>')
register_command('setlabel', 2,-1, False, True, False, 'Assign a label to an item', 'setlabel <tx_hash> <label>')
register_command('sendrawtransaction', 1, 1, True, False, False, 'Broadcasts a transaction to the network.', 'sendrawtransaction <tx in hexadecimal>')
register_command('signtxwithkey', 1, 3, False, False, False, 'Sign a serialized transaction with a key','signtxwithkey <tx> <key>')
register_command('signtxwithwallet', 1, 3, False, True, True, 'Sign a serialized transaction with a wallet','signtxwithwallet <tx>')
register_command('signmessage', 2,-1, False, True, True, 'Sign a message with a key', signmessage_syntax)
register_command('unfreeze', 1, 1, False, True, False, 'Unfreeze the funds at one of your wallet\'s address', 'unfreeze <address>')
register_command('validateaddress', 1, 1, False, False, False, 'Check that the address is valid', 'validateaddress <address>')
register_command('verifymessage', 3,-1, False, False, False, 'Verifies a signature', verifymessage_syntax)
register_command('encrypt', 2,-1, False, False, False, 'encrypt a message with pubkey','encrypt <pubkey> <message>')
register_command('decrypt', 2,-1, False, True, True, 'decrypt a message encrypted with pubkey','decrypt <pubkey> <message>')
register_command('getproof', 1, 1, True, False, False, 'get merkle proof', 'getproof <address>')
register_command('getutxoaddress', 2, 2, True, False, False, 'get the address of an unspent transaction output','getutxoaddress <txid> <pos>')
register_command('sweep', 2, 3, True, False, False, 'Sweep a private key.', 'sweep privkey addr [fee]')
register_command('make_seed', 3, 3, False, False, False, 'Create a seed.','options: --nbits --entropy --lang')
register_command('check_seed', 1,-1, False, False, False, 'Check that a seed was generated with external entropy. Option: --entropy --lang')
class Commands:
def __init__(self, wallet, network, callback = None):
self.wallet = wallet
self.network = network
self._callback = callback
self.password = None
def _run(self, method, args, password_getter):
cmd = known_commands[method]
if cmd.requires_password and self.wallet.use_encryption:
self.password = apply(password_getter,())
f = getattr(self, method)
result = f(*args)
self.password = None
if self._callback:
apply(self._callback, ())
return result
def make_seed(self, nbits, custom_entropy, language):
from mnemonic import Mnemonic
s = Mnemonic(language).make_seed(nbits, custom_entropy=custom_entropy)
return s.encode('utf8')
def check_seed(self, seed, custom_entropy, language):
from mnemonic import Mnemonic
return Mnemonic(language).check_seed(seed, custom_entropy)
def getaddresshistory(self, addr):
return self.network.synchronous_get([ ('blockchain.address.get_history',[addr]) ])[0]
def listunspent(self):
l = copy.deepcopy(self.wallet.get_unspent_coins())
for i in l: i["value"] = str(Decimal(i["value"])/100000000)
return l
def getaddressunspent(self, addr):
return self.network.synchronous_get([ ('blockchain.address.listunspent',[addr]) ])[0]
def getutxoaddress(self, txid, num):
r = self.network.synchronous_get([ ('blockchain.utxo.get_address',[txid, num]) ])
if r:
return {'address':r[0] }
def createrawtransaction(self, inputs, outputs):
coins = self.wallet.get_unspent_coins(None)
tx_inputs = []
for i in inputs:
prevout_hash = i['txid']
prevout_n = i['vout']
for c in coins:
if c['prevout_hash'] == prevout_hash and c['prevout_n'] == prevout_n:
self.wallet.add_input_info(c)
tx_inputs.append(c)
break
else:
raise BaseException('Transaction output not in wallet', prevout_hash+":%d"%prevout_n)
outputs = map(lambda x: ('address', x[0], int(1e8*x[1])), outputs.items())
tx = Transaction(tx_inputs, outputs)
return tx
def signtxwithkey(self, raw_tx, sec):
tx = Transaction.deserialize(raw_tx)
pubkey = bitcoin.public_key_from_private_key(sec)
tx.sign({ pubkey:sec })
return tx
def signtxwithwallet(self, raw_tx):
tx = Transaction.deserialize(raw_tx)
self.wallet.sign_transaction(tx, self.password)
return tx
def decoderawtransaction(self, raw):
tx = Transaction.deserialize(raw)
return {'inputs':tx.inputs, 'outputs':tx.outputs}
def sendrawtransaction(self, raw):
tx = Transaction.deserialize(raw)
return self.network.synchronous_get([('blockchain.transaction.broadcast', [str(tx)])])[0]
def createmultisig(self, num, pubkeys):
assert isinstance(pubkeys, list)
redeem_script = Transaction.multisig_script(pubkeys, num)
address = hash_160_to_bc_address(hash_160(redeem_script.decode('hex')), 5)
return {'address':address, 'redeemScript':redeem_script}
def freeze(self,addr):
return self.wallet.freeze(addr)
def unfreeze(self,addr):
return self.wallet.unfreeze(addr)
def getprivatekeys(self, addr):
return self.wallet.get_private_key(addr, self.password)
def ismine(self, addr):
return self.wallet.is_mine(addr)
def dumpprivkeys(self, addresses = None):
if addresses is None:
addresses = self.wallet.addresses(True)
return [self.wallet.get_private_key(address, self.password) for address in addresses]
def validateaddress(self, addr):
isvalid = is_valid(addr)
out = { 'isvalid':isvalid }
if isvalid:
out['address'] = addr
return out
def getpubkeys(self, addr):
out = { 'address':addr }
out['pubkeys'] = self.wallet.get_public_keys(addr)
return out
def getbalance(self, account= None):
if account is None:
c, u = self.wallet.get_balance()
else:
c, u = self.wallet.get_account_balance(account)
out = { "confirmed": str(Decimal(c)/100000000) }
if u: out["unconfirmed"] = str(Decimal(u)/100000000)
return out
def getaddressbalance(self, addr):
out = self.network.synchronous_get([ ('blockchain.address.get_balance',[addr]) ])[0]
out["confirmed"] = str(Decimal(out["confirmed"])/100000000)
out["unconfirmed"] = str(Decimal(out["unconfirmed"])/100000000)
return out
def getproof(self, addr):
p = self.network.synchronous_get([ ('blockchain.address.get_proof',[addr]) ])[0]
out = []
for i,s in p:
out.append(i)
return out
def getservers(self):
while not self.network.is_up_to_date():
time.sleep(0.1)
return self.network.get_servers()
def getversion(self):
import electrum_nmc # Needs to stay here to prevent ciruclar imports
return electrum_nmc.ELECTRUM_VERSION
def getmpk(self):
return self.wallet.get_master_public_keys()
def getseed(self):
s = self.wallet.get_mnemonic(self.password)
return s.encode('utf8')
def importprivkey(self, sec):
try:
addr = self.wallet.import_key(sec,self.password)
out = "Keypair imported: ", addr
except Exception as e:
out = "Error: Keypair import failed: " + str(e)
return out
def sweep(self, privkey, to_address, fee = 0.0001):
fee = int(Decimal(fee)*100000000)
return Transaction.sweep([privkey], self.network, to_address, fee)
def signmessage(self, address, message):
return self.wallet.sign_message(address, message, self.password)
def verifymessage(self, address, signature, message):
return bitcoin.verify_message(address, signature, message)
def _mktx(self, outputs, fee = None, change_addr = None, domain = None):
for to_address, amount in outputs:
if not is_valid(to_address):
raise Exception("Invalid Bitcoin address", to_address)
if change_addr:
if not is_valid(change_addr):
raise Exception("Invalid Bitcoin address", change_addr)
if domain is not None:
for addr in domain:
if not is_valid(addr):
raise Exception("invalid Bitcoin address", addr)
if not self.wallet.is_mine(addr):
raise Exception("address not in wallet", addr)
for k, v in self.wallet.labels.items():
if change_addr and v == change_addr:
change_addr = k
final_outputs = []
for to_address, amount in outputs:
for k, v in self.wallet.labels.items():
if v == to_address:
to_address = k
print_msg("alias", to_address)
break
amount = int(100000000*amount)
final_outputs.append(('address', to_address, amount))
if fee: fee = int(100000000*fee)
return self.wallet.mktx(final_outputs, self.password, fee , change_addr, domain)
def mktx(self, to_address, amount, fee = None, change_addr = None, domain = None):
tx = self._mktx([(to_address, amount)], fee, change_addr, domain)
return tx
def mksendmanytx(self, outputs, fee = None, change_addr = None, domain = None):
tx = self._mktx(outputs, fee, change_addr, domain)
return tx
def payto(self, to_address, amount, fee = None, change_addr = None, domain = None):
tx = self._mktx([(to_address, amount)], fee, change_addr, domain)
r, h = self.wallet.sendtx( tx )
return h
def paytomany(self, outputs, fee = None, change_addr = None, domain = None):
tx = self._mktx(outputs, fee, change_addr, domain)
r, h = self.wallet.sendtx( tx )
return h
def history(self):
balance = 0
out = []
for item in self.wallet.get_tx_history():
tx_hash, conf, is_mine, value, fee, balance, timestamp = item
try:
time_str = datetime.datetime.fromtimestamp( timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "----"
label, is_default_label = self.wallet.get_label(tx_hash)
out.append({'txid':tx_hash, 'date':"%16s"%time_str, 'label':label, 'value':format_satoshis(value), 'confirmations':conf})
return out
def setlabel(self, key, label):
self.wallet.set_label(key, label)
def contacts(self):
c = {}
for addr in self.wallet.addressbook:
c[addr] = self.wallet.labels.get(addr)
return c
def searchcontacts(self, query):
results = {}
for addr in self.wallet.addressbook:
if query.lower() in self.wallet.labels.get(addr).lower():
results[addr] = self.wallet.labels.get(addr)
return results
def listaddresses(self, show_all = False, show_label = False):
out = []
for addr in self.wallet.addresses(True):
if show_all or not self.wallet.is_change(addr):
if show_label:
item = { 'address': addr }
if show_label:
label = self.wallet.labels.get(addr,'')
if label:
item['label'] = label
else:
item = addr
out.append( item )
return out
def help(self, cmd=None):
if cmd not in known_commands:
print_msg("\nList of commands:", ', '.join(sorted(known_commands)))
else:
cmd = known_commands[cmd]
print_msg(cmd.description)
if cmd.syntax: print_msg("Syntax: " + cmd.syntax)
if cmd.options: print_msg("options:\n" + cmd.options)
return None
def getrawtransaction(self, tx_hash):
if self.wallet:
tx = self.wallet.transactions.get(tx_hash)
if tx:
return tx
raw = self.network.synchronous_get([ ('blockchain.transaction.get',[tx_hash]) ])[0]
if raw:
return Transaction.deserialize(raw)
else:
return "unknown transaction"
def encrypt(self, pubkey, message):
return bitcoin.encrypt_message(message, pubkey)
def decrypt(self, pubkey, message):
return self.wallet.decrypt_message(pubkey, message, self.password)
| mazaclub/electrum-nmc | lib/commands.py | Python | gpl-3.0 | 19,964 | 0.010068 |
import os
import hashlib
import flask
from sparkpost import SparkPost
from infosystem.common import exception
from infosystem.common.subsystem import manager
from infosystem.common.subsystem import operation
_HTML_EMAIL_TEMPLATE = """
<div style="width: 100%; text-align: center">
<h1>{app_name}</h1>
<h2>CONFIRMAR E CRIAR SENHA</h2>
</div>
<p>Você acaba de ser cadastrado no portal da
{app_name}.</p>
<p>Para ter acesso ao sistema você deve clicar no link abaixo
para confirmar esse email e criar uma senha.</p>
<div style="width: 100%; text-align: center">
<a href="{reset_url}">Clique aqui para CONFIRMAR o
email e CRIAR uma senha.</a>
</div>
"""
def send_email(token_id, user, domain):
try:
sparkpost = SparkPost()
default_app_name = "INFOSYSTEM"
default_email_use_sandbox = False
default_reset_url = 'http://objetorelacional.com.br/#/reset'
default_noreply_email = 'noreply@objetorelacional.com.br'
default_email_subject = 'INFOSYSTEM - CONFIRMAR email e CRIAR senha'
infosystem_app_name = os.environ.get(
'INFOSYSTEM_APP_NAME', default_app_name)
infosystem_reset_url = os.environ.get(
'INFOSYSTEM_RESET_URL', default_reset_url)
infosystem_noreply_email = os.environ.get(
'INFOSYSTEM_NOREPLY_EMAIL', default_noreply_email)
infosystem_email_subject = os.environ.get(
'INFOSYSTEM_EMAIL_SUBJECT', default_email_subject)
infosystem_email_use_sandbox = os.environ.get(
'INFOSYSTEM_EMAIL_USE_SANDBOX',
default_email_use_sandbox) == 'True'
url = infosystem_reset_url + '/' + token_id + '/' + domain.name
sparkpost.transmissions.send(
use_sandbox=infosystem_email_use_sandbox,
recipients=[user.email],
html=_HTML_EMAIL_TEMPLATE.format(
app_name=infosystem_app_name, reset_url=url),
from_email=infosystem_noreply_email,
subject=infosystem_email_subject
)
except Exception:
# TODO(fdoliveira): do something here!
pass
class Create(operation.Create):
def do(self, session, **kwargs):
self.entity = super().do(session, **kwargs)
self.token = self.manager.api.tokens.create(
session=session, user=self.entity)
self.domain = self.manager.api.domains.get(id=self.entity.domain_id)
if not self.domain:
raise exception.OperationBadRequest()
return self.entity
# def post(self):
# send_reset_password_email(self.token.id, self.entity, _RESET_URL)
# send_email(self.token.id, self.entity, self.domain)
class Update(operation.Update):
def do(self, session, **kwargs):
password = kwargs.get('password', None)
if password:
kwargs['password'] = hashlib.sha256(
password.encode('utf-8')).hexdigest()
self.entity = super().do(session, **kwargs)
return self.entity
class Restore(operation.Operation):
def pre(self, **kwargs):
email = kwargs.get('email', None)
domain_name = kwargs.get('domain_name', None)
infosystem_reset_url = os.environ.get(
'INFOSYSTEM_RESET_URL', 'http://objetorelacional.com.br/#/reset/')
self.reset_url = kwargs.get('reset_url', infosystem_reset_url)
if not (domain_name and email and self.reset_url):
raise exception.OperationBadRequest()
domains = self.manager.api.domains.list(name=domain_name)
if not domains:
raise exception.OperationBadRequest()
self.domain = domains[0]
users = self.manager.api.users.list(
email=email, domain_id=self.domain.id)
if not users:
raise exception.OperationBadRequest()
self.user = users[0]
return True
def do(self, session, **kwargs):
token = self.manager.api.tokens.create(user=self.user)
send_email(token.id, self.user, self.domain)
class Reset(operation.Operation):
def pre(self, **kwargs):
self.token = flask.request.headers.get('token')
self.password = kwargs.get('password')
if not (self.token and self.password):
raise exception.OperationBadRequest()
return True
def do(self, session, **kwargs):
token = self.manager.api.tokens.get(id=self.token)
self.manager.update(id=token.user_id, password=self.password)
def post(self):
self.manager.api.tokens.delete(id=self.token)
class Routes(operation.Operation):
def do(self, session, user_id, **kwargs):
grants = self.manager.api.grants.list(user_id=user_id)
grants_ids = [g.role_id for g in grants]
roles = self.manager.api.roles.list()
user_roles_id = [r.id for r in roles if r.id in grants_ids]
# FIXME(fdoliveira) Try to send user_roles_id as paramater on query
policies = self.manager.api.policies.list()
policies_capabilitys_id = [
p.capability_id for p in policies if p.role_id in user_roles_id]
user = self.manager.api.users.list(id=user_id)[0]
capabilities = self.manager.api.capabilities.list(
domain_id=user.domain_id)
policy_capabilities = [
c for c in capabilities if c.id in policies_capabilitys_id]
# NOTE(samueldmq): if there is no policy for a capabiltiy,
# then it's open! add it too!
restricted_capabilities = [p.capability_id for p in policies]
open_capabilities = [
c for c in capabilities if c.id not in restricted_capabilities]
user_routes = [self.manager.api.routes.get(id=c.route_id) for c in (
policy_capabilities + open_capabilities)]
bypass_routes = self.manager.api.routes.list(bypass=True)
return list(set(user_routes).union(set(bypass_routes)))
class Manager(manager.Manager):
def __init__(self, driver):
super(Manager, self).__init__(driver)
self.create = Create(self)
self.update = Update(self)
self.restore = Restore(self)
self.reset = Reset(self)
self.routes = Routes(self)
| samueldmq/infosystem | infosystem/subsystem/user/manager.py | Python | apache-2.0 | 6,277 | 0 |
from . import conn
class Contest(object):
def __init__(self, contestId, name, length, description, problems):
self._contestId = contestId
self._name = name
self._length = length
self._description = description
self._problems = problems
@classmethod
def load(cls, contestId):
with conn.cursor() as cur:
cur.execute('SELECT name, length, description, problems FROM contests WHERE contest_id=%s;', (contestId,))
result = cur.fetchone()
if result is None:
return None
name, length, description, problems = result
return cls(contestId, name, length, description, problems)
@classmethod
def create(cls, name, length, description, problems):
with conn.cursor() as cur:
cur.execute('''
INSERT INTO contests (name, length, description, problems)
VALUES (%(name)s, %(length)s, %(description)s, %(problems)s)
RETURNING contest_id;
''', {
'name': name,
'length': length,
'description': description,
'problems': problems,
})
contestId, = cur.fetchone()
return cls.load(contestId)
def contestId(self):
return self._contestId
def name(self):
return self._name
def length(self):
return self._length
def description(self):
return self._description
def problems(self):
return self._problems
| aioc/aminiaio | aminiaio/db/contest.py | Python | mit | 1,280 | 0.035156 |
#! /usr/bin/env python
def Test():
text ='hi from'
k = text + "call "
print k
return k
def euro():
print "high"
| peterheim1/robbie_ros | robbie_ai/nodes/aiml/know.py | Python | bsd-3-clause | 147 | 0.027211 |
from .exceptions import MalformedRequestException
class Request(object):
def __init__(self, version, metadata, arguments):
self._version = version
self._metadata = metadata
self._arguments = arguments
@property
def version(self):
return self._version
@property
def arguments(self):
return self._arguments
@property
def metadata(self):
return self._metadata
@staticmethod
def loads(s, serializer):
try:
l = serializer.loads(s)
except(ValueError, TypeError):
raise MalformedRequestException(serializer.__name__, s)
try:
version, metadata, args = l[0:3]
except ValueError:
raise MalformedRequestException(serializer.__name__, s)
else:
return Request(version, metadata, args)
def dumps(self, serializer):
return serializer.dumps([self.version, self.metadata, self.arguments])
| cbigler/jackrabbit | jackrabbit/request.py | Python | mit | 977 | 0.002047 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
GridNearestNeighbor.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessing,
QgsProcessingParameterDefinition,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterEnum,
QgsProcessingParameterField,
QgsProcessingParameterNumber,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class GridNearestNeighbor(GdalAlgorithm):
INPUT = 'INPUT'
Z_FIELD = 'Z_FIELD'
RADIUS_1 = 'RADIUS_1'
RADIUS_2 = 'RADIUS_2'
ANGLE = 'ANGLE'
NODATA = 'NODATA'
OPTIONS = 'OPTIONS'
EXTRA = 'EXTRA'
DATA_TYPE = 'DATA_TYPE'
OUTPUT = 'OUTPUT'
TYPES = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64']
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Point layer'),
[QgsProcessing.TypeVectorPoint]))
z_field_param = QgsProcessingParameterField(self.Z_FIELD,
self.tr('Z value from field'),
None,
self.INPUT,
QgsProcessingParameterField.Numeric,
optional=True)
z_field_param.setFlags(z_field_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(z_field_param)
self.addParameter(QgsProcessingParameterNumber(self.RADIUS_1,
self.tr('The first radius of search ellipse'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.RADIUS_2,
self.tr('The second radius of search ellipse'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.ANGLE,
self.tr('Angle of search ellipse rotation in degrees (counter clockwise)'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
maxValue=360.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.NODATA,
self.tr('NODATA marker to fill empty points'),
type=QgsProcessingParameterNumber.Double,
defaultValue=0.0))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
extra_param = QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(extra_param)
dataType_param = QgsProcessingParameterEnum(self.DATA_TYPE,
self.tr('Output data type'),
self.TYPES,
allowMultiple=False,
defaultValue=5)
dataType_param.setFlags(dataType_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(dataType_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT,
self.tr('Interpolated (Nearest neighbor)')))
def name(self):
return 'gridnearestneighbor'
def displayName(self):
return self.tr('Grid (Nearest neighbor)')
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'grid.png'))
def commandName(self):
return 'gdal_grid'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
ogrLayer, layerName = self.getOgrCompatibleSource(self.INPUT, parameters, context, feedback, executing)
arguments = ['-l']
arguments.append(layerName)
fieldName = self.parameterAsString(parameters, self.Z_FIELD, context)
if fieldName:
arguments.append('-zfield')
arguments.append(fieldName)
params = 'nearest'
params += ':radius1={}'.format(self.parameterAsDouble(parameters, self.RADIUS_1, context))
params += ':radius2={}'.format(self.parameterAsDouble(parameters, self.RADIUS_2, context))
params += ':angle={}'.format(self.parameterAsDouble(parameters, self.ANGLE, context))
params += ':nodata={}'.format(self.parameterAsDouble(parameters, self.NODATA, context))
arguments.append('-a')
arguments.append(params)
arguments.append('-ot')
arguments.append(self.TYPES[self.parameterAsEnum(parameters, self.DATA_TYPE, context)])
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, out)
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
options = self.parameterAsString(parameters, self.OPTIONS, context)
if options:
arguments.extend(GdalUtils.parseCreationOptions(options))
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
arguments.append(ogrLayer)
arguments.append(out)
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
| SrNetoChan/QGIS | python/plugins/processing/algs/gdal/GridNearestNeighbor.py | Python | gpl-2.0 | 8,683 | 0.003685 |
# -*- coding: utf-8 -*-
"""Parses for MacOS Wifi log (wifi.log) files."""
from __future__ import unicode_literals
import re
import pyparsing
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import errors
from plaso.lib import definitions
from plaso.lib import timelib
from plaso.parsers import logger
from plaso.parsers import manager
from plaso.parsers import text_parser
class MacWifiLogEventData(events.EventData):
"""Mac Wifi log event data.
Attributes:
action (str): known WiFI action, for example connected to an AP,
configured, etc. If the action is not known, the value is
the message of the log (text variable).
agent (str): name and identifier of process that generated the log message.
function (str): name of function that generated the log message.
text (str): log message
"""
DATA_TYPE = 'mac:wifilog:line'
def __init__(self):
"""Initializes event data."""
super(MacWifiLogEventData, self).__init__(data_type=self.DATA_TYPE)
self.action = None
self.agent = None
self.function = None
self.text = None
class MacWifiLogParser(text_parser.PyparsingSingleLineTextParser):
"""Parses MacOS Wifi log (wifi.log) files."""
NAME = 'macwifi'
DATA_FORMAT = 'MacOS Wifi log (wifi.log) file'
_ENCODING = 'utf-8'
THREE_DIGITS = text_parser.PyparsingConstants.THREE_DIGITS
THREE_LETTERS = text_parser.PyparsingConstants.THREE_LETTERS
# Regular expressions for known actions.
_CONNECTED_RE = re.compile(r'Already\sassociated\sto\s(.*)\.\sBailing')
_WIFI_PARAMETERS_RE = re.compile(
r'\[ssid=(.*?), bssid=(.*?), security=(.*?), rssi=')
_KNOWN_FUNCTIONS = [
'airportdProcessDLILEvent',
'_doAutoJoin',
'_processSystemPSKAssoc']
_AGENT = (
pyparsing.Literal('<') +
pyparsing.Combine(
pyparsing.Literal('airportd') + pyparsing.CharsNotIn('>'),
joinString='', adjacent=True).setResultsName('agent') +
pyparsing.Literal('>'))
_DATE_TIME = pyparsing.Group(
THREE_LETTERS.setResultsName('day_of_week') +
THREE_LETTERS.setResultsName('month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName('day') +
text_parser.PyparsingConstants.TIME_ELEMENTS + pyparsing.Suppress('.') +
THREE_DIGITS.setResultsName('milliseconds'))
# Log line with a known function name.
_MAC_WIFI_KNOWN_FUNCTION_LINE = (
_DATE_TIME.setResultsName('date_time') + _AGENT +
pyparsing.oneOf(_KNOWN_FUNCTIONS).setResultsName('function') +
pyparsing.Literal(':') +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName('text'))
# Log line with an unknown function name.
_MAC_WIFI_LINE = (
_DATE_TIME.setResultsName('date_time') + pyparsing.NotAny(
_AGENT +
pyparsing.oneOf(_KNOWN_FUNCTIONS) +
pyparsing.Literal(':')) +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName('text'))
_MAC_WIFI_HEADER = (
_DATE_TIME.setResultsName('date_time') +
pyparsing.Literal('***Starting Up***').setResultsName('text'))
_DATE_TIME_TURNED_OVER_HEADER = pyparsing.Group(
text_parser.PyparsingConstants.MONTH.setResultsName('month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName('day') +
text_parser.PyparsingConstants.TIME_ELEMENTS)
_MAC_WIFI_TURNED_OVER_HEADER = (
_DATE_TIME_TURNED_OVER_HEADER.setResultsName('date_time') +
pyparsing.Combine(
pyparsing.Word(pyparsing.printables) +
pyparsing.Word(pyparsing.printables) +
pyparsing.Literal('logfile turned over') +
pyparsing.LineEnd(),
joinString=' ', adjacent=False).setResultsName('text'))
# Define the available log line structures.
LINE_STRUCTURES = [
('header', _MAC_WIFI_HEADER),
('turned_over_header', _MAC_WIFI_TURNED_OVER_HEADER),
('known_function_logline', _MAC_WIFI_KNOWN_FUNCTION_LINE),
('logline', _MAC_WIFI_LINE)]
_SUPPORTED_KEYS = frozenset([key for key, _ in LINE_STRUCTURES])
def __init__(self):
"""Initializes a parser."""
super(MacWifiLogParser, self).__init__()
self._last_month = 0
self._year_use = 0
def _GetAction(self, action, text):
"""Parse the well known actions for easy reading.
Args:
action (str): the function or action called by the agent.
text (str): mac Wifi log text.
Returns:
str: a formatted string representing the known (or common) action.
If the action is not known the original log text is returned.
"""
# TODO: replace "x in y" checks by startswith if possible.
if 'airportdProcessDLILEvent' in action:
interface = text.split()[0]
return 'Interface {0:s} turn up.'.format(interface)
if 'doAutoJoin' in action:
match = self._CONNECTED_RE.match(text)
if match:
ssid = match.group(1)[1:-1]
else:
ssid = 'Unknown'
return 'Wifi connected to SSID {0:s}'.format(ssid)
if 'processSystemPSKAssoc' in action:
wifi_parameters = self._WIFI_PARAMETERS_RE.search(text)
if wifi_parameters:
ssid = wifi_parameters.group(1)
bssid = wifi_parameters.group(2)
security = wifi_parameters.group(3)
if not ssid:
ssid = 'Unknown'
if not bssid:
bssid = 'Unknown'
if not security:
security = 'Unknown'
return (
'New wifi configured. BSSID: {0:s}, SSID: {1:s}, '
'Security: {2:s}.').format(bssid, ssid, security)
return text
def _GetTimeElementsTuple(self, key, structure):
"""Retrieves a time elements tuple from the structure.
Args:
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Returns:
tuple: containing:
year (int): year.
month (int): month, where 1 represents January.
day_of_month (int): day of month, where 1 is the first day of the month.
hours (int): hours.
minutes (int): minutes.
seconds (int): seconds.
milliseconds (int): milliseconds.
"""
time_elements_tuple = self._GetValueFromStructure(structure, 'date_time')
# TODO: what if time_elements_tuple is None.
if key == 'turned_over_header':
month, day, hours, minutes, seconds = time_elements_tuple
milliseconds = 0
else:
_, month, day, hours, minutes, seconds, milliseconds = time_elements_tuple
# Note that dfdatetime_time_elements.TimeElements will raise ValueError
# for an invalid month.
month = timelib.MONTH_DICT.get(month.lower(), 0)
if month != 0 and month < self._last_month:
# Gap detected between years.
self._year_use += 1
return self._year_use, month, day, hours, minutes, seconds, milliseconds
def _ParseLogLine(self, parser_mediator, key, structure):
"""Parse a single log line and produce an event object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
time_elements_tuple = self._GetTimeElementsTuple(key, structure)
try:
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(time_elements_tuple))
return
self._last_month = time_elements_tuple[1]
function = self._GetValueFromStructure(structure, 'function')
text = self._GetValueFromStructure(structure, 'text')
if text:
text = text.strip()
event_data = MacWifiLogEventData()
event_data.agent = self._GetValueFromStructure(structure, 'agent')
event_data.function = function
event_data.text = text
if key == 'known_function_logline':
event_data.action = self._GetAction(
event_data.function, event_data.text)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
"""
if key not in self._SUPPORTED_KEYS:
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
self._ParseLogLine(parser_mediator, key, structure)
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a Mac Wifi log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
self._last_month = 0
self._year_use = parser_mediator.GetEstimatedYear()
key = 'header'
try:
structure = self._MAC_WIFI_HEADER.parseString(line)
except pyparsing.ParseException:
structure = None
if not structure:
key = 'turned_over_header'
try:
structure = self._MAC_WIFI_TURNED_OVER_HEADER.parseString(line)
except pyparsing.ParseException:
structure = None
if not structure:
logger.debug('Not a Mac Wifi log file')
return False
time_elements_tuple = self._GetTimeElementsTuple(key, structure)
try:
dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug(
'Not a Mac Wifi log file, invalid date and time: {0!s}'.format(
time_elements_tuple))
return False
self._last_month = time_elements_tuple[1]
return True
manager.ParsersManager.RegisterParser(MacWifiLogParser)
| rgayon/plaso | plaso/parsers/mac_wifi.py | Python | apache-2.0 | 10,515 | 0.006182 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
class Migration(DataMigration):
depends_on = (
("guardian", "0005_auto__chg_field_groupobjectpermission_object_pk__chg_field_userobjectp"),
)
def forwards(self, orm):
pass
# remove old permission label if migrated with old model metadata
try:
ct = ContentType.objects.get(model='xform', app_label='odk_logger')
Permission.objects.get(content_type=ct, codename='can_view').delete()
# add new permission label
perm, created = Permission.objects.get_or_create(content_type=ct, codename='view_xform', name='Can view associated data')
except (ContentType.DoesNotExist, Permission.DoesNotExist):
pass
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.attachment': {
'Meta': {'object_name': 'Attachment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['odk_logger.Instance']"}),
'media_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'odk_logger.instance': {
'Meta': {'object_name': 'Instance'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['auth.User']"}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'),)", 'object_name': 'XForm'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': "orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['odk_logger']
| makinacorpus/formhub | odk_logger/migrations/0012_add_permission_view_xform.py | Python | bsd-2-clause | 7,685 | 0.007547 |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.model.document import Document
from frappe.permissions import (get_valid_perms, update_permission_property)
from frappe import _
class UserPermission(Document):
def on_update(self):
frappe.cache().delete_value('user_permissions')
def on_trash(self): # pylint: disable=no-self-use
frappe.cache().delete_value('user_permissions')
def get_user_permissions(user=None):
'''Get all users permissions for the user as a dict of doctype'''
if not user:
user = frappe.session.user
out = frappe.cache().hget("user_permissions", user)
if out is None:
out = {}
try:
for perm in frappe.get_all('User Permission',
fields=['allow', 'for_value'], filters=dict(user=user)):
meta = frappe.get_meta(perm.allow)
if not perm.allow in out:
out[perm.allow] = []
out[perm.allow].append(perm.for_value)
if meta.is_nested_set():
out[perm.allow].extend(frappe.db.get_descendants(perm.allow, perm.for_value))
frappe.cache().hset("user_permissions", user, out)
except frappe.SQLError as e:
if e.args[0]==1146:
# called from patch
pass
return out | manassolanki/frappe | frappe/core/doctype/user_permission/user_permission.py | Python | mit | 1,295 | 0.026255 |
from cherry import *
robot=Cherry.setup()
Cherry.serve()
Cherry.connect()
| Cherry-project/primitiveWS | startup.py | Python | gpl-3.0 | 76 | 0.026316 |
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
import os, random
from jenkinsflow import jobload
from .framework import api_select
here = os.path.abspath(os.path.dirname(__file__))
_context = dict(
exec_time=1,
params=(),
script=None,
securitytoken='abc',
print_env=False,
create_job=None,
num_builds_to_keep=4,
final_result_use_cli=False,
set_build_descriptions=()
)
def _random_job_name(api, short_name=None):
# If short_name is not specified, use a random name to make sure the job doesn't exist
short_name = short_name or str(random.random()).replace('.', '')
return api.job_name_prefix + short_name, short_name
def _assert_job(api, job_name, cleanup=False):
job = api.get_job(job_name)
assert job is not None
assert job.name == job_name
assert job.public_uri is not None and job_name in job.public_uri
if cleanup:
api.delete_job(job_name)
return None
return job
def test_job_load_new_no_pre_delete(api_type):
api = api_select.api(__file__, api_type, login=True)
full_name, short_name = _random_job_name(api)
api.job(short_name, 1, 1, 1, exec_time=1, non_existing=True)
jobload.update_job_from_template(api, full_name, api.job_xml_template, pre_delete=False, context=_context)
_assert_job(api, full_name, cleanup=True)
def test_job_load_new_pre_delete(api_type):
api = api_select.api(__file__, api_type, login=True)
full_name, short_name = _random_job_name(api)
api.job(short_name, 1, 1, 1, exec_time=1, non_existing=True)
jobload.update_job_from_template(api, full_name, api.job_xml_template, pre_delete=True, context=_context)
_assert_job(api, full_name, cleanup=True)
def test_job_load_existing_pre_delete(api_type):
api = api_select.api(__file__, api_type, login=True)
full_name, short_name = _random_job_name(api)
api.job(short_name, 1, 1, 1, exec_time=1)
jobload.update_job_from_template(api, full_name, api.job_xml_template, pre_delete=True, context=_context)
_assert_job(api, full_name, cleanup=False)
jobload.update_job_from_template(api, full_name, api.job_xml_template, pre_delete=True, context=_context)
_assert_job(api, full_name, cleanup=True)
def test_job_load__existing_update(api_type):
api = api_select.api(__file__, api_type, login=True)
full_name, short_name = _random_job_name(api)
api.job(short_name, 1, 1, 1, exec_time=1)
jobload.update_job_from_template(api, full_name, api.job_xml_template, pre_delete=True, context=_context)
_assert_job(api, full_name, cleanup=False)
jobload.update_job_from_template(api, full_name, api.job_xml_template, pre_delete=False, context=_context)
_assert_job(api, full_name, cleanup=True)
def test_job_load_non_existing_pre_delete(api_type):
api = api_select.api(__file__, api_type, login=True)
full_name, short_name = _random_job_name(api)
api.job(short_name, 1, 1, 1, exec_time=1, non_existing=True)
jobload.update_job_from_template(api, full_name, api.job_xml_template, pre_delete=True, context=_context)
| lhupfeldt/jenkinsflow | test/job_load_test.py | Python | bsd-3-clause | 3,177 | 0.002833 |
from rest_framework.filters import (
FilterSet
)
from trialscompendium.trials.models import Treatment
class TreatmentListFilter(FilterSet):
"""
Filter query list from treatment database table
"""
class Meta:
model = Treatment
fields = {'id': ['exact', 'in'],
'no_replicate': ['exact', 'in', 'gte', 'lte'],
'nitrogen_treatment': ['iexact', 'in', 'icontains'],
'phosphate_treatment': ['iexact', 'in', 'icontains'],
'tillage_practice': ['iexact', 'in', 'icontains'],
'cropping_system': ['iexact', 'in', 'icontains'],
'crops_grown': ['iexact', 'in', 'icontains'],
'farm_yard_manure': ['iexact', 'in', 'icontains'],
'farm_residue': ['iexact', 'in', 'icontains'],
}
order_by = ['tillage_practice', 'cropping_system', 'crops_grown']
| nkoech/trialscompendium | trialscompendium/trials/api/treatment/filters.py | Python | mit | 934 | 0 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Top Block
# Generated: Tue Dec 27 19:28:14 2016
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import wxgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.wxgui import forms
from gnuradio.wxgui import scopesink2
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
import wx
class top_block(grc_wxgui.top_block_gui):
def __init__(self):
grc_wxgui.top_block_gui.__init__(self, title="Top Block")
##################################################
# Variables
##################################################
self.var = var = 11
self.samp_rate = samp_rate = 1e6
self.freq = freq = 1e3
##################################################
# Blocks
##################################################
_freq_sizer = wx.BoxSizer(wx.VERTICAL)
self._freq_text_box = forms.text_box(
parent=self.GetWin(),
sizer=_freq_sizer,
value=self.freq,
callback=self.set_freq,
label='freq',
converter=forms.float_converter(),
proportion=0,
)
self._freq_slider = forms.slider(
parent=self.GetWin(),
sizer=_freq_sizer,
value=self.freq,
callback=self.set_freq,
minimum=0,
maximum=16e3,
num_steps=100,
style=wx.SL_HORIZONTAL,
cast=float,
proportion=1,
)
self.Add(_freq_sizer)
self.wxgui_scopesink2_0 = scopesink2.scope_sink_c(
self.GetWin(),
title='Scope Plot',
sample_rate=samp_rate,
v_scale=0,
v_offset=0,
t_scale=0,
ac_couple=False,
xy_mode=False,
num_inputs=1,
trig_mode=wxgui.TRIG_MODE_AUTO,
y_axis_label='Counts',
)
self.Add(self.wxgui_scopesink2_0.win)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.analog_sig_source_x_0 = analog.sig_source_c(samp_rate, analog.GR_COS_WAVE, freq, 1, 0)
##################################################
# Connections
##################################################
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.wxgui_scopesink2_0, 0))
def get_var(self):
return self.var
def set_var(self, var):
self.var = var
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.wxgui_scopesink2_0.set_sample_rate(self.samp_rate)
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
self.analog_sig_source_x_0.set_sampling_freq(self.samp_rate)
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
self._freq_slider.set_value(self.freq)
self._freq_text_box.set_value(self.freq)
self.analog_sig_source_x_0.set_frequency(self.freq)
def main(top_block_cls=top_block, options=None):
tb = top_block_cls()
tb.Start(True)
tb.Wait()
if __name__ == '__main__':
main()
| james-tate/gnuradio_projects | ettus_lab/lab1/top_block.py | Python | gpl-3.0 | 3,795 | 0.016074 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.